Example #1
0
def imageprocess(request):
    form = ImageUploadForm(request.POST, request.FILES)
    if form.is_valid():
        handle_uploaded_file(request.FILES['image'])

    #ML code

    #Pretrained model
    model = ResNet50(weights='imagenet')
    img_path = 'img.jpg'

    img = image.load_img(img_path, target_size=(224, 224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)

    preds = model.predict(x)
    print('Predicted:', decode_predictions(preds, top=3)[0])

    html = decode_predictions(preds, top=3)[0]
    res = []
    for e in html:
        res.append((e[1], np.round(e[2] * 100, 2)))
    return render(request, 'result.html', {'res': res})

    return render(request, 'result.html')
Example #2
0
def upload_file():
    if request.method == 'POST':
        f = request.files['file']
        #path = os.path.join(app.config['UPLOAD_FOLDER'], f.filename)
        model = ResNet50(weights='imagenet')
        #img = image.load_img(path, target_size=(224,224))
        img = image.load_img(f, target_size=(224, 224))
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        preds = model.predict(x)
        preds_decoded = decode_predictions(preds, top=3)[0]
        print(decode_predictions(preds, top=3)[0])
        x = []
        preds = []
        img = []
        f = ""
        #f.save(path)
        return render_template('uploaded.html',
                               title='Success',
                               predC1=preds_decoded[0][1],
                               predC2=preds_decoded[1][1],
                               predC3=preds_decoded[2][1],
                               predP1=preds_decoded[0][2],
                               predP2=preds_decoded[1][2],
                               predP3=preds_decoded[2][2])
Example #3
0
def predict():
    message = request.get_json(force=True)
    encoded = message['image']
    decoded = base64.b64decode(encoded)
    image = Image.open(io.BytesIO(decoded))
    processed_image = preprocess_image(image, target_size=(224, 224))

    with graph.as_default():
        prediction = model.predict(processed_image)
        id = decode_predictions(prediction, top=1)[0][0][0]
        name = decode_predictions(prediction, top=1)[0][0][1]
        accuracy = decode_predictions(prediction, top=1)[0][0][2]

        page = requests.get(
            "http://www.image-net.org/api/text/imagenet.synset.geturls?wnid=" +
            id)
        soup = BeautifulSoup(page.content, 'html.parser')
        str_soup = str(soup)
        split_urls = str_soup.split('\r\n')
        split_urls = split_urls[0:20]
        images_list = []
        for i in range(20):
            if (split_urls[i][0:11] == 'http://farm'):
                images_list.append(split_urls[i])

    response = {
        'name': name,
        'accuracy': str(accuracy),
        'prediction': images_list
    }

    return jsonify(response)
Example #4
0
def talker():
    pub = rospy.Publisher('chatter', String, queue_size=10)
    rospy.init_node('talker', anonymous=True)
    rate = rospy.Rate(10)  # 10hz

    model = ResNet50(weights='imagenet')
    img_path = '/home/rajas/workspace/monorepo/src/image_classification/scripts/elephant.jpg'
    img = image.load_img(img_path, target_size=(224, 224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)

    preds = model.predict(x)
    # decode the results into a list of tuples (class, description, probability)
    # (one such list for each sample in the batch)
    print('Predicted:', decode_predictions(preds, top=3)[0])
    # Predicted: [(u'n02504013', u'Indian_elephant', 0.82658225), (u'n01871265', u'tusker', 0.1122357), (u'n02504458', u'African_elephant', 0.061040461)]

    while not rospy.is_shutdown():
        #import ipdb; ipdb.set_trace()
        hello_str = str(decode_predictions(
            preds, top=3)[0]) + " %s" % rospy.get_time()
        rospy.loginfo(hello_str)
        pub.publish(hello_str)
        rate.sleep()
Example #5
0
def classify_image(path):
    with urllib.request.urlopen(path) as url:
        with open('temp/temp.jpg', 'wb') as f:
            f.write(url.read())
    K.clear_session()
    classifier=ResNet50()
    #print(classifier.summary())
    new_image = image.load_img('temp/temp.jpg', target_size=(224, 224))
    transformed_image= image.img_to_array(new_image)
    #print(transformed_image.shape)
    transformed_image=np.expand_dims(transformed_image,axis=0)
    #print(transformed_image.shape)
    transformed_image=preprocess_input(transformed_image)
    #print(transformed_image)
    y_pred= classifier.predict(transformed_image)
    #print(y_pred)
    #print(y_pred.shape)

    decode_predictions(y_pred, top=5)
    label = decode_predictions(y_pred)
    # retrieve the most likely result, i.e. highest probability
    decoded_label = label[0][0]

    print("######===============########")
    # print the classification
    print('%s (%.2f%%)' % (decoded_label[1], decoded_label[2]*100 ))
    print("######===============########")

    # Destroy references
    del classifier, new_image, transformed_image, y_pred, label
    K.clear_session()
    return ({"Prediction": decoded_label[1], "confidence": decoded_label[2] * 100, "url": path})
Example #6
0
def recognize(img_url):
    print("Getting image from web or local path.")

    img_path = "/var/tmp/image_to_rec.jpg"
    if img_url.startswith("file:"):
        img_path = img_url.replace("file://", "")
    else:
        response = urllib2.urlopen(img_url)
        image_from_web = response.read(response)

        with open("/var/tmp/image_to_rec.jpg", "w") as tmp_img:
            tmp_img.write(image_from_web)

    print("Processing image...")
    img = image.load_img(img_path, target_size=(224, 224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)

    print("Classifying image")
    preds = model.predict(x)
    # decode the results into a list of tuples (class, description, probability)
    # (one such list for each sample in the batch)
    print('Predicted:', decode_predictions(preds, top=3)[0])
    return decode_predictions(preds, top=3)
Example #7
0
def predict(filename):
    logger.info("predicting")
    img = image.load_img(filename, target_size=(224, 224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    logger.info(x.shape)
    K.clear_session()
    model = ResNet50(weights='imagenet')
    preds = model.predict(x)
    K.clear_session()
    # decode the results into a list of tuples (class, description, probability)
    # (one such list for each sample in the batch)
    logger.info('Predicted:' + str(decode_predictions(preds, top=3)[0]))
    return decode_predictions(preds, top=5)[0]
def predict_images(bq_rows, yaml_config):
    pkey = bq_rows[0]
    image_cols = bq_rows[1]

    # Get images
    logging.info("Getting images")
    imgs_dir = "imgs/{}".format(pkey)
    util.run_command("mkdir -p {}".format(imgs_dir), throw_error=True)
    util.gcs_download_dir(yaml_config['gcs_img_path'].format(pkey), imgs_dir)

    # Load model
    start = time.time()
    model = ResNet50(weights='imagenet')
    end = time.time()
    logging.info("Model loading for breed {} took: {:.2f} sec".format(
        pkey, end - start))

    # Predicting
    logging.info("Predicting on breed: {}".format(pkey))
    start = time.time()
    img_predictions = []
    for image_col in image_cols:
        image_key = image_col['url'].split('/')[-1]
        img_path = abspath(imgs_dir + "/" + image_key)

        loaded_img = image.load_img(img_path, target_size=(224, 224))
        x = image.img_to_array(loaded_img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)

        preds = model.predict(x)

        pred_breed = decode_predictions(preds, top=1)[0][0][1]
        score = decode_predictions(preds, top=1)[0][0][2]
        logging.info('For image: {}, predicted: {} with score: {}'.format(
            image_col['url'], pred_breed, score))
        img_predictions.append({
            'url': image_col['url'],
            'breed': image_col['breed'],
            'prediction': pred_breed,
            'score': str(score)
        })
    end = time.time()
    logging.info("Prediction took: {:.2f} sec".format(end - start))

    # Clean up images
    util.run_command("rm -rf {}".format(imgs_dir), throw_error=True)
    return img_predictions
Example #9
0
    def predicition(self,filename):

        model = resnet50.ResNet50()

        img = image.load_img(filename,
                             target_size=(224, 224))


        x = image.img_to_array(img)


        x = np.expand_dims(x, axis=0)


        x = resnet50.preprocess_input(x)


        predictions = model.predict(x)

        predicted_classes = resnet50.decode_predictions(predictions, top=9)

        top_value = []

        for imagenet_id, name, likelihood in predicted_classes[0]:
            top_value.append(name)

        if len(top_value)>0:
            return top_value[0]
        else:
            return "Unknown"
Example #10
0
def predict(url, top_n=3):
    from PIL import Image
    from keras.applications.resnet50 import ResNet50
    from keras.preprocessing import image
    from keras.applications.resnet50 import preprocess_input, decode_predictions
    import numpy as np
    import requests
    from io import BytesIO

    response = requests.get(url)
    try:
        img = Image.open(BytesIO(response.content))

        target_size = ((224, 224))

        model = ResNet50(weights='imagenet')
        if img.size != target_size:
            img = img.resize(target_size)

        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        preds = model.predict(x)

        return decode_predictions(preds, top=top_n)[0][0][1]
    except OSError:
        return ''
Example #11
0
def analyze(img, img_target, model_name, top=3):
    """

    :param img_target: tuple
    :param model_name: str
    :param top: int
    :param img: PIL.Image
    """
    if top is None:
        top = 3
    top = int(top)

    # prepare img
    prepared_img = prepare_image(img, img_target)

    # predict
    predictions = get_model(model_name).predict(prepared_img)
    decoded_predictions = decode_predictions(predictions, top=top)[0]

    # transform data
    listed_predictions = list()
    for class_name, class_description, score in decoded_predictions:
        listed_predictions.append(dict(class_name=class_name, class_description=class_description, score=str(score)))

    return {
        "used_model": model_name,
        "predictions": listed_predictions,
        "target_dimensions": dimensions(img_target)
    }
Example #12
0
def predict(model, image_url, target_size, top_n=3):

    x = image.img_to_array(image_url)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    preds = model.predict(x)
    return decode_predictions(preds, top=top_n)[0]
Example #13
0
def resnet_classifier(image_path):
    img = image.load_img(image_path, target_size=(224, 224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    preds = rn50.predict(x)
    return set([x[1] for x in decode_predictions(preds, top=2)[0]])
Example #14
0
def make_prediction(filename, top=5):
    status = 'SUCCESS'

    # ResNEt50 prediction
    img = image.load_img(filename, target_size=(224, 224))
    X = np.expand_dims(image.img_to_array(img), axis=0)
    X = preprocess_input(X)

    model = ResNet50(weights='imagenet')
    preds = model.predict(X)
    y_pred = decode_predictions(preds, top=top)[0]
    y_pred = {x[1]: "{:.5f}".format(float(x[2])) for x in y_pred}

    # Own model prediction
    infile = open('VGG16_224x224x3_D512_D128.model', 'rb')
    model = pickle.load(infile)
    infile.close()

    infile = open('class_indices', 'rb')
    class_indices = pickle.load(infile)
    infile.close()

    img = image.load_img(filename, target_size=(224, 224))
    tmp = np.expand_dims(img, axis=0)
    tmp2 = tmp / 255

    own_pred = model.predict_proba(tmp2)
    y_own_pred = {}
    for breed, pred in zip(list(class_indices.keys()), own_pred[0]):
        y_own_pred[breed] = "{:.5f}".format(pred)

    return status, y_pred, y_own_pred
Example #15
0
def do_predict(mdl, pred_data):
    # obtain predictions
    #if len(pred_data)>1: print("getting prediction for {} images".format(len(pred_data)))
    st = time.time()
    keys = pred_data.keys()
    pth_imgs = [pred_data[k] for k in keys]

    arr_imgs = []
    for pth_img in pth_imgs:
        img = image.load_img(pth_img) # Load the image file, expecting to be 224x224 pixels (required by this model)
        x = image.img_to_array(img) # Convert the image to a numpy array
        x = resnet50.preprocess_input(x) # Scale the input image to the range used in the trained network
        arr_imgs.append(x)

    x = np.stack( arr_imgs, axis=0 )
    predictions = mdl.predict(x) # Run the image through the deep neural network to make a prediction
    #if len(pred_data)>1: print("I made a set of predictions in {:.2f}s".format(time.time()-st))

    decoded_predictions = resnet50.decode_predictions(predictions, top=len(predictions[0]) ) # Look up the names of the predicted classes. Index zero is the results for the first image.
    response = {}
    for key, prediction in zip(keys,decoded_predictions):
        d = {}
        for imagenet_id, name, likelihood in prediction:
            d[name.lower()] = float(round(likelihood,ROUND_FLOATS_TO))
        response[key] = d

    return response
def upload():
    Get the image data from the POST
    data = request.form['img']
    # This decodes it into an image
    img = Image.open(BytesIO(base64.b64decode(data)))
    # Resize to 224x224 and make sure it's RGB
    img = img.resize((224,224))
    img = img.convert("RGB")
    # Turn it into a matrix (224x224x3)
    x = image.img_to_array(img)
    # Add a dimension to make it (1x224x224x3)
    x = np.expand_dims(x, axis=0)
    # This remaps the pixel values to a negative<->positive range
    x = preprocess_input(x)
    # Get a prediction
    preds = model.predict(x)
    # decode the results into a list of tuples (class, description, probability)
    preds = decode_predictions(preds, top=3)[0]
    data = []
    generated_text = lstmText(preds)
    term = " ".join(preds[0][1].split("_")).capitalize()

    for pred in preds:
        data.append({'id': pred[0], 'term': pred[1], 'score': float(pred[2])})

    return jsonify(status='got image',prediction=data, text=generated_text, term=term)
Example #17
0
def predictFromVGG16(img):
    # 学習済みのVGG16をロード
    # 構造とともに学習済みの重みも読み込まれる
    model = VGG16(weights='imagenet')
    # model.summary()
    
    # 引数で指定した画像ファイルを読み込む
    # サイズはVGG16のデフォルトである224x224にリサイズされる
    # img = image.load_img(filename, target_size=(224, 224))
    
    # 読み込んだPIL形式の画像をarrayに変換
    x = image.img_to_array(img)
    
    # 3次元テンソル(rows, cols, channels) を
    # 4次元テンソル (samples, rows, cols, channels) に変換
    # 入力画像は1枚なのでsamples=1でよい
    x = np.expand_dims(x, axis=0)
    
    # Top-5のクラスを予測する
    # VGG16の1000クラスはdecode_predictions()で文字列に変換される
    preds = model.predict(preprocess_input(x))
    results = decode_predictions(preds, top=1)[0]
    obj = []
    for result in results:
        # 20%以上で予測されたものに限定する
        # 物が検知されなくても表示するため低くしている
        # 上げるとちゃんと検知されないと何も表示されない
        if result[2] > 0.2:
            print(result)
            obj = (result[1], result[2])
        else:
            obj = (-1, -1)
        break
    return obj
Example #18
0
def upload():
    # Get the image data from the POST
    data = request.form['img']
    # This decodes it into an image
    img = Image.open(BytesIO(base64.b64decode(data)))
    # Resize to 224x224 and make sure it's RGB
    img = img.resize((224,224))
    img = img.convert("RGB")

    # Turn it into a matrix (224x224x3)
    x = image.img_to_array(img)
    # Add a dimension to make it (1x224x224x3)
    x = np.expand_dims(x, axis=0)
    # This remaps the pixel values to a negative<->positive range
    x = preprocess_input(x)

    # Get a prediction
    preds = model.predict(x)
    # decode the results into a list of tuples (class, description, probability)
    preds = decode_predictions(preds, top=3)[0]

    # This is a little goofy but we need to convert it to something that works
    # with jsonify. Python sets do not and also numpy float32's do not
    # So made a list of little dictionaries
    data = []
    for pred in preds:
        data.append({'id': pred[0], 'term': pred[1], 'score': float(pred[2])})
    # Send to p5
    return jsonify(status='got image',prediction=data)
Example #19
0
def callback(image_msg):
    print("Received Image")
    #First convert the image to OpenCV image
    cv_image = bridge.imgmsg_to_cv2(image_msg, desired_encoding="bgr8")

    (rows, cols, channels) = cv_image.shape
    if cols > 60 and rows > 60:
        cv2.circle(cv_image, (50, 50), 10, 255)

    cv2.imshow("Classify", cv_image)
    cv_image = cv2.resize(cv_image, target_size)  # resize image
    np_image = np.asarray(cv_image)  # read as np array
    np_image = np.expand_dims(np_image,
                              axis=0)  # Add another dimension for tensorflow
    np_image = np_image.astype(
        float)  # preprocess needs float64 and img is uint8
    np_image = preprocess_input(np_image)  # Regularize the data

    global graph  # This is a workaround for asynchronous execution
    with graph.as_default():
        preds = model.predict(np_image)  # Classify the image
        # decode returns a list  of tuples [(class,description,probability),(class, descrip ...
        pred_string = decode_predictions(preds,
                                         top=1)[0]  # Decode top 1 predictions
        msg_string.data = pred_string[0][1]
        msg_float.data = float(pred_string[0][2])
        pub.publish(msg_string)
        pub1.publish(msg_float)
def plot_top_prediction_classes(ax, preds):
    max_preds = preds.max(axis=0).reshape((1, 1000))
    order = numpy.argsort(max_preds)
    decoded = decode_predictions(max_preds)
    for i in range(5):
        ax.plot(preds[:, order[0][-1 - i]].transpose(), label=decoded[0][i][1])
    ax.legend()
Example #21
0
def predict(filename, featuresize):
    img = image.load_img(filename, target_size=(224, 224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    preds = model.predict(preprocess_input(x))
    results = decode_predictions(preds, top=featuresize)[0]
    return results
	def post(self):
		#try: 				
		formData = request.files.get('file','')
		img = image.load_img(formData, target_size=(224, 224))
		x = image.img_to_array(img)
		x = np.expand_dims(x, axis=0)
		x = preprocess_input(x)

		print('predicting...')
		with graph.as_default():
			preds = clf.predict(x)
		print('done')
		print('predicted', preds)
		# decode the results into a list of tuples (class, description, probability)
		# (one such list for each sample in the batch)
		pred_out = str(decode_predictions(preds, top=3)[0])
		print('Predicted:', )

		response = jsonify({
			"statusCode": 200,
			"status": "Prediction made",
			"result": "Prediction: " + pred_out  # str(data)
			})
		response.headers.add('Access-Control-Allow-Origin', '*')
		return response
def adversarial_noise(model,
                      image,
                      target_class,
                      noise_limit,
                      sess,
                      confidence=0.99,
                      eps=1.0,
                      max_iter=200):
    original = np.expand_dims(image, axis=0)
    target = np.array([target_class])
    encoded_target = to_categorical(target, num_classes=1000)

    wrap = KerasModelWrapper(model)
    fgsm = FastGradientMethod(wrap, sess=sess)
    fgsm_params = {
        'eps': eps,
        'clip_min': 0.0,
        'clip_max': 255.0,
        'y_target': encoded_target
    }

    noisy = original
    for i in range(0, max_iter):
        noisy = fgsm.generate_np(noisy, **fgsm_params)
        current_confidence = model.predict(prep_image(noisy))[0][target_class]
        print(decode_predictions(model.predict(prep_image(noisy)), top=3)[0])
        print(current_confidence)
        if current_confidence > confidence:
            break

    return np.reshape(noisy, noisy.shape[1:])
Example #24
0
def predict(img_path):
    img = image.load_img(img_path, target_size=(224, 224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    preds = model.predict(x)
    print('Predicted:', decode_predictions(preds, top=10)[0])
def predict():
    # initialize the data dictionary that will be returned from the
    # view
    data = {"success": False}
    # ensure an image was properly uploaded to our endpoint
    if flask.request.method == "POST":
        if flask.request.files.get("image"):
            # read the image in PIL format
            image = flask.request.files["image"].read()
            image = Image.open(io.BytesIO(image))

            # preprocess the image and prepare it for classification
            image = prepare_image(image, target=(224, 224))

            # classify the input image and then initialize the list
            # of predictions to return to the client
            preds = model.predict(image)
            results = decode_predictions(preds)
            data["predictions"] = []

            # loop over the results and add them to the list of
            # returned predictions
            for (imagenetID, label, prob) in results[0]:
                r = {"label": label, "probability": float(prob)}
                data["predictions"].append(r)

            # indicate that the request was a success
            data["success"] = True

    # return the data dictionary as a JSON response
    return flask.jsonify(data)
def predecir_resnet50(ruta_imagen='glue_sticks.jpg'):
    from keras.applications.resnet50 import ResNet50
    from keras.preprocessing import image
    from keras.applications.resnet50 import preprocess_input, decode_predictions
    from keras import backend as K
    import numpy as np

    model = ResNet50(weights='imagenet')
    img_path = ruta_imagen
    img = image.load_img(img_path, target_size=(224, 224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)

    preds = model.predict(x)
    K.clear_session()
    # decode the results into a list of tuples (class, description, probability)
    # (one such list for each sample in the batch)
    resultados = decode_predictions(preds, top=3)[0][0]
    descripcion = resultados[1]
    presicion = resultados[2]
    return {
        'descripcion': descripcion,
        'precision': presicion * 100,
        'imagen': str(img_path)
    }
Example #27
0
def respred(input_):
    print("")
    print("Now Loading resnet50 for prediction")
    print("")
    resmodel = ResNet50(weights='imagenet')
    image = load_img(input_, target_size=(224, 224))
    # convert the image pixels to a numpy array
    image = img_to_array(image)
    # reshape data for the model
    image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
    # prepare the image for the VGG model
    image = preprocess_input(image)
    # predict the probability across all output classes
    yhat = resmodel.predict(image)
    # convert the probabilities to class labels
    label = decode_predictions(yhat)
    # retrieve the most likely result, e.g. highest probability
    label = label[0][0]
    # print the classification
    class_, percentage = (label[1], label[2] * 100)
    print("")
    print(
        "Loaded Resnet50 predicted the input image as {} with {}% confidence".
        format(class_, round(percentage, 2)))
    print("")
Example #28
0
def predict(q):
    model = ResNet50(weights='imagenet')
    while True:
        client_socket = q.get()
        if client_socket is None:
            break
        msg = ''
        while msg[-7:] != '##END##':
            ss = client_socket.recvfrom(10000)
            msg += ss[0].decode('utf-8')
        print(len(msg))
        dic = json.loads(msg[:-7])
        chat_id = dic['chat_id']
        image_data = base64.b64decode(dic['image'])
        img_path = 'predict.png'
        with open(img_path, 'wb') as outfile:
            outfile.write(image_data)
        img = image.load_img(img_path, target_size=(224, 224))
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        preds = decode_predictions(model.predict(x), top=5)[0]
        pred_list = []
        for entry in preds:
            dic = {'label': entry[1], 'proba': str(entry[2])}
            pred_list.append(dic)
        result = {'predictions': pred_list, 'chat_id': chat_id}
        print(result)
        client_socket.sendall((json.dumps(result) + '##END##').encode('utf-8'))
        client_socket.close()
Example #29
0
def compute():
    model = ResNet50(include_top=True, weights='imagenet')

    tag = request.args.get('image')

    myimages = []
    myimages.append(tag)
    images = []

    for ii in myimages:
        images.append(load_img(ii, target_size=(224, 224)))

    for y in range(len(images)):
        images[y] = img_to_array(images[y])
        x = np.expand_dims(images[y], axis=0)
        x = preprocess_input(x)
        yhat = model.predict(x)

        label_t = decode_predictions(yhat)

        label = label_t[0][0]

        a = label[1]
        l = []
        l.append(a)
        response = app.response_class(response=json.dumps(l),
                                      status=200,
                                      mimetype='application/json')
        return response
def print_predictions(filename, preds, threshold=DEFAULT_THRESHOLD):
    """prints tabular output: filename, label, prob"""
    y_pred = decode_predictions(preds, top=3)
    for lev1 in y_pred:
        for _, label, prob in lev1:
            if prob > threshold:
                print(f"{filename}, {label},{prob}")
def classifyImage(fname):
    img = image.load_img(fname, target_size=(224, 224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    
    pred = decode_predictions(model.predict(x), top=1)[0][0]
    
    return (pred[1], pred[2])
def predict(model, img, target_size, top_n=3):
 """Run model prediction on image
 """
 if img.size != target_size:
   img = img.resize(target_size)
 x = image.img_to_array(img)
 x = np.expand_dims(x, axis=0)
 x = preprocess_input(x)
 preds = model.predict(x)
 return decode_predictions(preds, top=top_n)[0]
def predict(img):
  print('{}:'.format(img))
  # Load the image file, resizing it to 224x224 pixels (required by this model)
  img_df = image.load_img(img, target_size=(224, 224))
  x = image.img_to_array(img_df)  # [row[columns[r, g, b]]]
  x = np.expand_dims(x, axis=0)  # array of images

  # Scale and predict
  predictions = model.predict(resnet50.preprocess_input(x))
  predicted_classes = resnet50.decode_predictions(predictions, top=5)
  for imagenet_id, name, likelihood in predicted_classes[0]:
    print(' - {}: {:2f} likelihood'.format(name, likelihood))
  print('\n')
Example #34
0
def predict(model, img, target_size, top_n=3):
  """Run model prediction on image
  Args:
    model: keras model
    img: PIL format image
    target_size: (w,h) tuple
    top_n: # of top predictions to return
  Returns:
    list of predicted labels and their probabilities
  """
  if img.size != target_size:
    img = img.resize(target_size)

  x = image.img_to_array(img)
  x = np.expand_dims(x, axis=0)
  x = preprocess_input(x)
  preds = model.predict(x)
  return decode_predictions(preds, top=top_n)[0]
from keras.applications.resnet50 import ResNet50
from keras.preprocessing import image
from keras_applications.imagenet_utils import preprocess_input
from keras.applications.resnet50 import preprocess_input, decode_predictions
import numpy as np


model = ResNet50(weights='imagenet')

img_path = '/Users/mosampatel/Documents/mosam.jpeg'
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)

preds = model.predict(x)
# decode the results into a list of tuples (class, description, probability)
# (one such list for each sample in the batch)
print('Predicted:', decode_predictions(preds, top=3)[0])
Example #36
0
from keras.applications.resnet50 import ResNet50
from keras.preprocessing import image
from keras.applications.resnet50 import preprocess_input, decode_predictions
import numpy as np

height = 224
width = 224
channels = 3
top = 5


def load_image(path):
    img = image.load_img(path, target_size=(224, 224))
    x = image.img_to_array(img)
    return x


if __name__ == '__main__':
    args = docopt(__doc__)
    paths = args['IMAGE_PATH']
    model = ResNet50(weights='imagenet')

    x = np.zeros((len(paths), height, width, channels))
    for i, path in enumerate(paths):
        x[i] = load_image(path)
    x = preprocess_input(x)

    predictions = decode_predictions(model.predict(x), top=top)
    for prediction in predictions:
        print(prediction)
Example #37
0
File: main1.py Project: Daiver/jff
target_size = (128, 128)
#target_size = (224, 224)

#model = ResNet50(input_shape=(target_size[0], target_size[1], 3), weights='imagenet')
model = keras.applications.mobilenet.MobileNet(input_shape=(target_size[0], target_size[1], 3), alpha=1.0, depth_multiplier=1, dropout=1e-3, include_top=True, weights='imagenet', input_tensor=None, pooling=None)

print 'n params', model.count_params()

#img_path = 'elephant.jpg'
#img_path = '/home/daiver/c2VzcDQrsVI.jpg'
img_path = '14586818_Alt01.jpg'

#img = image.load_img(img_path, target_size=target_size)
#x = image.img_to_array(img)

x = cv2.imread(img_path)
x = cv2.cvtColor(x, cv2.COLOR_BGR2RGB)
width, height, channels = x.shape
targetDim = height if width > height else width
x = x[:targetDim, :targetDim]
x = cv2.resize(x, target_size).astype(np.float32)

x = np.expand_dims(x, axis=0)
x = preprocess_input(x)

preds = model.predict(x)
# decode the results into a list of tuples (class, description, probability)
# (one such list for each sample in the batch)
for x in decode_predictions(preds, top=10)[0]:
    print x
# create a model object
model = Model(inputs=resnet.input, outputs=activation_layer.output)

# get the feature map weights
final_dense = resnet.get_layer('fc1000')
W = final_dense.get_weights()[0]


while True:
  img = image.load_img(np.random.choice(image_files), target_size=(224, 224))
  x = preprocess_input(np.expand_dims(img, 0))
  fmaps = model.predict(x)[0] # 7 x 7 x 2048

  # get predicted class
  probs = resnet.predict(x)
  classnames = decode_predictions(probs)[0]
  print(classnames)
  classname = classnames[0][1]
  pred = np.argmax(probs[0])

  # get the 2048 weights for the relevant class
  w = W[:, pred]

  # "dot" w with fmaps
  cam = fmaps.dot(w)

  # upsample to 224 x 224
  # 7 x 32 = 224
  cam = sp.ndimage.zoom(cam, (32, 32), order=1)

  plt.subplot(1,2,1)
Example #39
0
    def do_POST(s):
        length = int(s.headers['Content-Length'])
        body = s.rfile.read(length).decode('utf-8')
        if s.headers['Content-type'] == 'application/json':
            post_data = json.loads(body)
        else:
            post_data = urllib.parse.parse_qs(body)

        modelid = post_data['model']
        try:    
            model = model_impls[modelid]['class'](**model_impls[modelid]['params'])
        except Exception as e:
            logger.error("Unable to load model: {reason}".format(reason=e.message))
            s.send_response(300)
            s.send_header("Content-type", "application/json")
            s.end_headers()
            json.dump({
                    "status": 300,
                    "message": e.message,
                 }, s.wfile)
            return
        
        target_size = (dict([(m["id"],m['image_size']) for m in models]))[modelid]
        concepts = []
        for annotation in post_data['annotations']:
            aid = annotation['annotationid']
            begin = annotation['begin']
            begin = annotation['end']
            
            batch_x = np.zeros((len(annotation['frames']),target_size,target_size,3), dtype=np.float32)
            for i,frame in enumerate(annotation['frames']):
                # Load image to PIL format
                img = Image.open(BytesIO(base64.b64decode(frame['screenshot'])))
                # cache frame - FIXME: currently there is no mean to identify the video - same timstamp will overwrite an old frame (hash?)
                img.save(os.path.join(CACHE_DIR,'{0}.png'.format(frame['timecode'])))
                if img.mode != 'RGB':
                    img = img.convert('RGB')
                hw_tuple = (target_size, target_size)
                if img.size != hw_tuple:
                    logger.warn("Scaling image to model size - this should be done in advene!")
                    img = img.resize(hw_tuple)
                x = image.img_to_array(img)
                x = np.expand_dims(x, axis=0)
                x = preprocess_input(x)
                batch_x[i] = x[0,:,:,:]
            preds = model.predict_on_batch(np.asarray(batch_x))

            # decode the results into a list of tuples (class, description, probability)
            # (one such list for each sample in the batch)
            decoded = decode_predictions(preds, top=top_n_preds)
            confidences = dict()
            for t in itertools.chain.from_iterable(decoded):
                if t[1] in confidences:
                    confidences[t[1]].append(float(t[2]))
                else:
                    confidences[t[1]] = [float(t[2])]
            logger.debug(confidences)
            
            concepts.extend([
            {
                'annotationid': aid,
                'confidence': max(confidences[l]),
                #FIXME: set correct timecode - set timecode of frame with max confidence?
                'timecode': annotation['begin'], #timestamp_in_ms,
                'label': l,
                'uri': 'http://concept.org/%s' % l
            } for l in confidences]
            )

        logger.debug(concepts)
        s.send_response(200)
        s.send_header("Content-type", "application/json")
        s.end_headers()
        response=json.dumps({
            "status": 200,
            "message": "OK",
            "data": {
                'media_filename': post_data["media_filename"],
                'media_uri': post_data["media_uri"],
                'concepts': concepts
            }
        })
        s.wfile.write(response.encode())