def class_index(self, img):
     '''
     returns the class index 
     '''
     from tensorflow.keras.applications.imagenet_utils import decode_predictions
     index = np.argmax(self.get_predictions(img)[0])
     decode_predictions(self.get_predictions(img))
     return index
Пример #2
0
def classify_batch(model,
                   batchROIs,
                   batchLocs,
                   labels,
                   minProb=0.5,
                   top=10,
                   dims=(224, 224)):
    # pass our batch ROIs through our network and decode the
    # predictions
    preds = model.predict(batchROIs)
    P = imagenet_utils.decode_predictions(preds, top=top)

    # loop over the decoded predictions
    for i in range(0, len(P)):
        for (_, label, prob) in P[i]:
            # filter out weak detections by ensuring the
            # predicted probability is greater than the minimum
            # probability
            if prob > minProb:
                # grab the coordinates of the sliding window for
                # the prediction and construct the bounding box
                (pX, pY) = batchLocs[i]
                box = (pX, pY, pX + dims[0], pY + dims[1])

                # grab the list of predictions for the label and
                # add the bounding box + probability to the list
                L = labels.get(label, [])
                L.append((box, prob))
                labels[label] = L

    # return the labels dictionary
    return labels
Пример #3
0
def index():

    if request.method == 'GET':

        global model

        if not model:
            model = tf.keras.models.load_model('/model')

        return render_template('index.html', prediction=None, img=None)

    if request.method == 'POST':

        # Handle request
        img = request.files['file'].read()

        # A tiny bit of preprocessing
        img = process_request_image(img)

        # Convert image to data URI so it can be displayed without being saved
        uri = create_data_uri(img)

        # Convert to VGG16 input
        img = cv2.resize(img, (224, 224))
        img = np.reshape(img, [1, 224, 224, 3])

        # Classify image
        predictions = model.predict(img)
        labels = decode_predictions(predictions, top=1)

        return render_template('index.html',
                               prediction=labels[0][0][1],
                               img=uri)
Пример #4
0
def memoryPerImage(i):

    modelChosen = models[i][0]
    modelName = models[i][1]
    preprocess = models[i][2]

    fileName = '.keras/models/' + modelName + '.h5'
    model = modelChosen(weights=fileName)

    if modelName not in ['xception', 'inceptionv3']:
        inputShape = (224, 224)
    else:
        inputShape = (299, 299)

    imageLoc = 'images/cat.jpeg'
    image = load_img(imageLoc, target_size=inputShape)
    image = img_to_array(image)
    image = np.expand_dims(image, axis=0)
    image = preprocess(image)

    print("Classifying image with {}".format(modelName))
    preds = model.predict(image)
    P = imagenet_utils.decode_predictions(preds)
    for (i, (imagenetID, label, prob)) in enumerate(P[0]):
        print("{}. {}: {:.2f}%".format(i + 1, label, prob * 100))

    K.clear_session()
Пример #5
0
def predict(image, model):
    # We keep the 2 classes with the highest confidence score
    results = decode_predictions(model.predict(image), 2)[0]
    response = [
        {"class": result[1], "score": float(round(result[2], 3))} for result in results
    ]
    return response
Пример #6
0
def detect_objects(image_path: str,
                   min_prob_filter: float = 0.1) -> Dict[str, float]:

    # Set the shape the image needs to be for VGG19 model
    input_shape = (224, 224)

    # Convert the image into the way we want it
    image = load_img(image_path, target_size=input_shape)
    image = img_to_array(image)

    # Image is loaded as shape (inputShape[0], inputShape[1], 3) however we need to expand the
    # dimension by making the shape (1, inputShape[0], inputShape[1], 3) so we can pass it through
    # the network, this is equivalent to adding a batch (which is how the model was trained)
    image = np.expand_dims(image, axis=0)

    # Preprocess the image by mean subtraction
    image = imagenet_utils.preprocess_input(image)

    # Get the neural network and download the relevant weights (include top specifies to include the final classification layer)
    nn_model = VGG19(weights='imagenet', include_top=True)

    # Make a prediction and decode
    predictions_matrix = nn_model.predict(image)
    predictions = imagenet_utils.decode_predictions(predictions_matrix)

    # Iterate over results and keep relevant ones (predictions[0] as we want the first batch result, since we made one batch for predicting)
    object_probabilities = {}
    for code, object_name, prob in predictions[0]:
        if prob >= min_prob_filter:
            object_probabilities[object_name] = prob

    return object_probabilities


# image_path = 'img/training/Colin/20160618-_IMG7252.jpg'
Пример #7
0
    def post(self):
        # items.append(json.loads(self.request.body))
        # self.write({'message': 'new item added'})
        # initialize the data dictionary that will be returned from the
        # view
        data = {"success": False}

        # ensure an image was properly uploaded to our endpoint
        if self.request.method == "POST":
            if self.request.files.get("image"):
                # read the image in PIL format
                image = self.request.files["image"][0].body # without .read(), again [0] as first image
                image = Image.open(io.BytesIO(image))

                # preprocess the image and prepare it for classification
                image = prepare_image(image, target=(224, 224))

                # classify the input image and then initialize the list
                # of predictions to return to the client
                preds = model.predict(image)
                results = imagenet_utils.decode_predictions(preds)
                data["predictions"] = []
                # loop over the results and add them to the list of
                # returned predictions
                for (imagenetID, label, prob) in results[0]:
                    r = {"label": label, "probability": float(prob)}
                    data["predictions"].append(r)

                # indicate that the request was a success
                data["success"] = True
            self.write(data)
def predict(request):
    # initialize the data dictionary that will be returned from the
    # view
    data = {"success": False}

    # ensure an image was properly uploaded to our endpoint
    if request.method == 'POST':
        if request.POST.get("image", None) is not None:
            # read the image in PIL format
            image = request.POST["image"].file.read()
            image = Image.open(io.BytesIO(image))

            # preprocess the image and prepare it for classification
            image = prepare_image(image, target=(224, 224))

            # classify the input image and then initialize the list
            # of predictions to return to the client
            preds = model.predict(image)
            results = imagenet_utils.decode_predictions(preds)
            data["predictions"] = []
            # loop over the results and add them to the list of
            # returned predictions
            for (imagenetID, label, prob) in results[0]:
                r = {"label": label, "probability": float(prob)}
                data["predictions"].append(r)

            # indicate that the request was a success
            data["success"] = True

    return data
Пример #9
0
	def analyseImage(self, ogimage):
		image = ogimage.copy()
		image = cv2.resize(image,(224,224))

		#mozda je potrebno deljenje??? sa /255.0
		image = image[...,::-1].astype(np.float32)
		# our image is now represented by a NumPy array of shape (224, 224, 3),
		# assuming TensorFlow "channels last" ordering of course, but we need
		# to expand the dimensions to be (1, 3, 224, 224) so we can pass it
		# through the network -- we'll also preprocess the image by subtracting
		# the mean RGB pixel intensity from the ImageNet dataset
		image = np.expand_dims(image, axis=0)
		image = preprocess_input(image)

		#classify image
		preds = self.model.predict(image)
		p = decode_predictions(preds)

		(imagenetId, label, prob) = p[0][0]


		if(prob <0.5):
			return None

		if label.lower() == 'african_elephant':
			label = 'elephant'
		
		if label.lower() == 'water_buffalo':
			label = 'buffalo'

		data = { label : 1 }

		return super().nextAnalyser(ogimage, data)
Пример #10
0
def make_frame_predictions(video_arr):
    #Use mobilenet to generate top three predictions for each frame
    #Will take a few minutes to run
    mobilenet_model = mobilenet.MobileNet()
    video_predictions_1 = []
    video_predictions_2 = []
    video_predictions_3 = []
    for i in range(0, len(video_arr)):
        img_array = np.expand_dims(video_arr[i], axis=0)
        pImg = mobilenet.preprocess_input(img_array)
        prediction = mobilenet_model.predict(pImg)
        results = imagenet_utils.decode_predictions(prediction)
        if i == 0:
            print(results)
        video_predictions_1.append(results[0][0][1])  #, results[0][0][2]))
        video_predictions_2.append(results[0][1][1])  #, results[0][1][2]))
        video_predictions_3.append(results[0][2][1])  #, results[0][2][2]))

    #Combine into single array of tuples
    video_predictions = [None] * len(video_predictions_1)
    for i in range(0, len(video_predictions_1)):
        video_predictions[i] = [(video_predictions_1[i]),
                                (video_predictions_2[i]),
                                (video_predictions_3[i])]

    return video_predictions
def image_prediction(im):
    
    #img = image.load_img(im, target_size=(224, 224))
    #img = im.resize((224, 224), Image.ANTIALIAS)
    x = image.img_to_array(im)
    x = np.expand_dims(x, axis=0)
    x = keras.applications.resnet50.preprocess_input(x)
    print('Input image shape:', x.shape)
    
    preds = model.predict(x)
    
    print('Predicted:', imagenet_utils.decode_predictions(preds))
    
    n = 3

    img_pred =imagenet_utils.decode_predictions(preds, top =n)

    return img_pred
Пример #12
0
def predict_object(model, img_url):
    image = url_to_image(img_url)
    img = resize_img(image)
    #images.append(image[...,::-1])
    #print(type(model))
    result = model.predict(img)
    pred = decode_predictions(result, top=5)
    #show_image(image[...,::-1],pred[0][0][1])
    return pred[0][0][1].upper().replace("_", " ")
Пример #13
0
def return_predictions(file):
    preprocessed_image = prepare_image(file)
    #     with CustomObjectScope({'relu6': mobilenet.relu6}):
    model = load_model("models/model.h5")
    print(model.summary)
    predictions = model.predict(preprocessed_image)
    results = imagenet_utils.decode_predictions(predictions)

    return results
Пример #14
0
def predict_5(model, processed_im):
    top_n = 5
    preds = model.predict(processed_im)
    top_pred_n = decode_predictions(preds, top=top_n)[0]
    classes = np.argsort(preds[0])[-top_n:][::-1]

    P = imagenet_utils.decode_predictions(preds)
    idx = preds.argmax()
    return idx, preds.max(), classes, top_pred_n
def classifyImage(file):
    # Returns a probability scores matrix
    preds = getPrediction(file, model)
    # Decode the matrix to the following format (class_name, class_description, score) and pick the highest score
    # We are going to use class_description, since that describes what the model sees
    prediction = decode_predictions(preds, top=1)
    # prediction[0][0][1] is equal to the first batch, top prediction and class_description
    result = str(prediction[0][0][1])
    return result
Пример #16
0
def classifyImage(file):
    baseFilePath = os.path.dirname(__file__)
    savedFilePath = os.path.join(baseFilePath, r'uploads', secure_filename(file.filename))
    file.save(savedFilePath)
    
    preds = getPrediction(savedFilePath, model)
    prediction = decode_predictions(preds, top=1)
    result = str(prediction[0][0][1])
    return result
def classify_process():
	# pre-trained 케라스 모델 load
	print("* Loading model...")
	model = ResNet50(weights="imagenet")
	print("* Model loaded")
	# 예측을 위해 전송되는 새로운 이미지를 계속 polling
	while True:
		# attempt to grab a batch of images from the database, then
		# initialize the image IDs and batch of images themselves
		# Redis에서 이미지 배치를 가져오고 다음 이미지 ID 및 배치 초기화
		queue = db.lrange(IMAGE_QUEUE, 0, BATCH_SIZE - 1)
		imageIDs = []
		batch = None

		# queue에 쌓인 것들 처리
		for q in queue:
			# 이미지 decodeing
			q = json.loads(q.decode("utf-8"))
			image = base64_decode_image(q["image"], IMAGE_DTYPE,
										(1, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANS))

			# batch 가 None인지 확인
			if batch is None:
				batch = image

			# None이 아니면 stack에 쌓음
			else:
				batch = np.vstack([batch, image])

			# 이미지 ID list 갱신
			imageIDs.append(q["id"])
			# 처리할게 있는지 확인
			if len(imageIDs) > 0:
				# 배치 분류
				print("* Batch size: {}".format(batch.shape))
				preds = model.predict(batch)
				results = imagenet_utils.decode_predictions(preds)

				# 이미지 ID 및 resultSet 반복
				for (imageID, resultSet) in zip(imageIDs, results):
					# 예측 결과리스트
					output = []

					# 예측 결과리스트에 추가
					for (imagenetID, label, prob) in resultSet:
						r = {"label": label, "probability": float(prob)}
						output.append(r)

					# 예측 결과리스트를 Redis db에 Image ID를 키로 저장함
					db.set(imageID, json.dumps(output))

				# 진행한 이미지는 queue에서 제거
				db.ltrim(IMAGE_QUEUE, len(imageIDs), -1)

			# delay
			time.sleep(SERVER_SLEEP)
Пример #18
0
 def testTFwPrediction(self):
     model = SqueezeNet()
     img = image.load_img('images/cat.jpeg', target_size=(227, 227))
     x = image.img_to_array(img)
     x = np.expand_dims(x, axis=0)
     x = preprocess_input(x)
     preds = model.predict(x)
     decoded_preds = decode_predictions(preds)
     #print('Predicted:', decoded_preds)
     self.assertIn(decoded_preds[0][0][1], 'tabby')
Пример #19
0
def predict():
    if request.method == "POST":
        f = request.files["file"]
        basepath = os.path.dirname(__file__)
        file_path = os.path.join(basepath, 'uploads', f.filename)
        f.save(file_path)
        preds = model_predict(file_path, model)
        pred_class = decode_predictions(preds, top=1)  # ImageNet Decode
        result = str(pred_class[0][0][1])  # Convert to string
        return render_template("predict.html", imageprediction=result)
Пример #20
0
def predict():
    if request.method == 'POST':
        img = base64_to_pil(request.json)
        img.save("uploads/" + filename + ".png")
        preds = model_predict(img, model)
        pred_proba = "{:.3f}".format(np.amax(preds))
        pred_class = decode_predictions(preds, top=1)
        result = str(pred_class[0][0][1])
        result = result.replace('_', ' ').capitalize()
    searchterm = result
    directory = "./images/"
    url = "https://www.google.co.in/search?q=" + searchterm + "&source=lnms&tbm=isch"
    browser = webdriver.Chrome('C:\WebDrivers\chromedriver.exe')
    browser.get(url)
    extensions = {"jpg", "jpeg", "png", "gif"}
    if not os.path.exists(directory):
        os.mkdir(directory)
    for _ in range(500):
        browser.execute_script("window.scrollBy(0,10000)")
    html = browser.page_source.split('["')
    imges = []
    for i in html:
        if i.startswith('http') and i.split('"')[0].split(
                '.')[-1] in extensions:
            imges.append(i.split('"')[0])
    print(imges)

    def save_image(img, directory):
        for img in imges:
            img_url = img
            img_type = img.split('.')[-1]
            try:
                path = os.path.join(
                    directory,
                    searchterm + "_" + str(uuid.uuid4()) + "." + 'jpg')
                urllib.request.urlretrieve(img, path)
            except Exception as e:
                print(e)

    save_image(imges, directory)
    browser.close()
    fantasy_zip = zipfile.ZipFile('C:\\Users\\User\\Desktop\\FYP\\images.zip',
                                  'w')

    for folder, subfolders, files in os.walk(
            'C:\\Users\\User\\Desktop\\FYP\\images'):

        for file in files:
            if file.endswith('.jpg'):
                fantasy_zip.write(os.path.join(folder, file),
                                  file,
                                  compress_type=zipfile.ZIP_DEFLATED)
    fantasy_zip.close()
    return jsonify(result=result, probability=pred_proba)
Пример #21
0
def mygradCAM(input_path):
    Model = ResNet50
    # load the pre-trained CNN model imagenet
    print("[INFO] loading model...")
    model = Model(weights="imagenet")

    # load the image for predicting
    origImg = cv2.imread(input_path)

    # resize the image
    resized = cv2.resize(origImg, (224, 224))

    # preprocess the image
    image = load_img(input_path, target_size=(224, 224))
    image = img_to_array(image)
    image = np.expand_dims(image, axis=0)
    image = imagenet_utils.preprocess_input(image)

    # predict image to specific class i
    result = model.predict(image)
    i = np.argmax(result[0])

    # image is decoded by imagenet_utils
    decode = imagenet_utils.decode_predictions(result)
    (imagenetID, label, prob) = decode[0][0]

    # print label of decoded image
    label = "{}: {:.2f}%".format(label, prob * 100)
    print("[INFO] {}".format(label))

    # use ResNet50 model, class i to gradcam
    cam = GradCAM(model, i)
    # compute heatmap for using mask
    heatmap = cam.compute_heatmap(image)

    # resize heatmap
    heatmap = cv2.resize(heatmap, (origImg.shape[1], origImg.shape[0]))
    # put heatmap on the image
    (heatmap, output) = cam.overlay_heatmap(heatmap, origImg, alpha=0.5)

    # print image predicted
    cv2.rectangle(output, (0, 0), (340, 40), (0, 0, 0), -1)
    cv2.putText(output, label, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.8,
                (255, 255, 255), 2)
    output = np.vstack([origImg, heatmap, output])
    output = imutils.resize(output, height=700)
    plt.imshow(output)

    # save the heatmap in images_modify/gradImg.jpg'
    dirout = 'images_modify/gradImg.jpg'
    print("[SAVE] in " + dirout)
    cv2.imwrite(dirout, heatmap)

    return dirout
Пример #22
0
def predict_result(model, file_path):
    print("+++++++++++++++++++++++++++++++++")
    print(file_path)
    img_path = file_path
    img = image.load_img(img_path, target_size=(224, 224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    #   print('x1->', x)
    x = preprocess_input(x)
    print('Input image shape:', x.shape)
    #    print('x2->', x)
    print(model)
    #model.summary()
    preds = model.predict(x)
    #print(preds)
    print('Predicted:', decode_predictions(preds))
    label = decode_predictions(preds)[0][0][1]
    print("predict_result: ", label)
    w = model.get_weights()
    print(w)
Пример #23
0
def upload():
    if request.method == 'POST':
        f = request.files['file']
        basepath = os.path.dirname(__file__)
        file_path = os.path.join(basepath, 'uploads',
                                 secure_filename(f.filename))
        f.save(file_path)
        preds = model_predict(file_path, model)
        pred_class = decode_predictions(preds, top=1)
        result = str(pred_class[0][0][1])
        return result
    return None
def predict(image):
    # 模型
    model = tf.keras.applications.MobileNetV2(weights="imagenet")

    # 影像處理
    image = np.asarray(image.resize((224, 224)))[..., :3]
    image = np.expand_dims(image, 0)
    image = image / 127.5 - 1.0

    # 預測結果輸出
    result = decode_predictions(model.predict(image), 3)[0]
    return result
Пример #25
0
def model_predict():
	image = request.files['image'].read()
	image = Image.open(io.BytesIO(image))
	image_ = processing(image)
	prediction = model.predict(image_)
	results = imagenet_utils.decode_predictions(prediction)
	data = dict()
	data["predictions"] = []
	for (_, label, prob) in results[0]:
		r = {"label": label, "probability": float(prob)}
		data["predictions"].append(r)
	data["success"] = 1
	return jsonify(data)
Пример #26
0
def predict(image: np.ndarray) -> Union[str, None]:

    try:
        with graph.as_default():
            K.set_session(session)
            preds = model.predict(image)

        prediction = str(decode_predictions(preds, top=1)[0])

        return prediction
    except Exception as e:
        print(e)
        return None
Пример #27
0
def upload():
    if request.method == 'POST':
        # Get the file from post request
        f = request.files['file']

        model = load_model('model.h5')
        test_image = image.load_img(f, target_size=(224, 224))
        test_image = image.img_to_array(test_image)
        test_image = np.expand_dims(test_image, axis=0)
        preds = model.predict(test_image)
        pred_class = decode_predictions(preds, top=1)  # ImageNet Decode
        result = str(pred_class[0][0][1])
        return render_template('second.html', result=result)
Пример #28
0
def model_predict(img_path, model):
    result = []
    img = image.load_img(img_path, target_size=(224, 224))
    x = preprocess_input(np.expand_dims(image.img_to_array(img), axis=0))
    predictions = decode_predictions(model.predict(x),
                                     top=3)[0]  # Get Top-3 Accuracy
    for p in predictions:
        _, label, accuracy = p
        result.append((label, accuracy))
    result_html1 = path / 'static' / 'result1.html'
    result_html2 = path / 'static' / 'result2.html'
    result_html = str(result_html1.open().read() + str(result) +
                      result_html2.open().read())
    return HTMLResponse(result_html)
Пример #29
0
def classify_image(image_file):
    """ Classify image using Inception_V3"""

    input_shape = (299, 299)
    img = image.load_img(image_file, target_size=input_shape)
    img = image.img_to_array(img)
    img = np.expand_dims(img, axis=0)
    img = preprocess_input(img)

    preds = MODEL.predict(img)
    p_from_im = imagenet_utils.decode_predictions(preds)

    (_, label, prob) = p_from_im[0][0]
    return [label, prob]
Пример #30
0
def predict_result(model, file_path):
    #   print("+++++++++++++++++++++++++++++++++")
    #  print(file_path)
    global cnt
    img_path = file_path
    img = image.load_img(img_path, target_size=(224, 224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    #    print(x)
    #print('Input image shape:', x.shape)
    #im = Image.open(file_path)
    #im.show()
    preds = model.predict(x)
    #print('Predicted:', decode_predictions(preds))
    label = decode_predictions(preds)[0][0][1]
    if label == 'toilet_tissue':
        print('Predicted:', decode_predictions(preds))
        print(file_path)
        cnt += 1
    else:
        print("+++++++++++++++++++++++++++++++++++++++")
        print('Predicted:', decode_predictions(preds))