Пример #1
0
def classify_image():
    image_data = request.form['image_data']
    #base64 to cv2
    encoded_data = image_data.split(',')[1]
    nparr = np.frombuffer(base64.b64decode(encoded_data), np.uint8)
    img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
    cv2.imwrite('image.jpg', img)

    # enhancing image
    im = Image.open('image.jpg')
    enhancers = ImageEnhance.Sharpness(im)
    enhanced_im = enhancers.enhance(2.75)
    enhanced_im.save("sample.jpg")

    im = Image.open('sample.jpg')
    enhancercon = ImageEnhance.Contrast(im)
    enhanced_im = enhancercon.enhance(1.2)
    enhanced_im.save("sample.jpg")

    im = Image.open('sample.jpg')
    enhancerc = ImageEnhance.Color(im)
    enhanced_im = enhancerc.enhance(1.1)
    enhanced_im.save("sample.jpg")
    # enhanced Image file

    data = {}
    with open('sample.jpg', mode='rb') as file:
        imgw = file.read()
    data['imgw'] = base64.b64encode(imgw)

    response = jsonify(util.classify_image("b," + str(data['imgw'])[2:]))

    response.headers.add('Access-Control-Allow-Origin', '*')

    return response
def classify_image():
    image_data = request.form['image_data']

    response = jsonify(util.classify_image(image_data))

    response.headers.add('Access-Control-Allow-Origin', '*')

    return response
Пример #3
0
def classify_image():
    image_data = request.form['image_data']
    back_response = util.classify_image(image_data)
    response = jsonify(back_response)
    print(back_response)
    response.headers.add('Access-control-Allow-Origin', '*')

    return response
Пример #4
0
def classify_image():
    image_data = request.form['image_data']
    response = jsonify(util.classify_image(image_data))
    response.headers.add('Access-Control-Allow-Origin', '*')
    response.headers.add('Access-Control-Allow-Headers',
                         'Content-Type,Authorization')
    response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE')
    return response
Пример #5
0
def classify_image():
    image_data = request.form['image_data']
    data=[]
    url = "https://api.unsplash.com/search/photos"
    query={"page":"1", "client_id":"vrB0FKvJ2uIq780UySc3VE8LI8uxKhja3MyxU1MN9MI"}

    q = util.classify_image(image_base64_data=image_data)
    query["query"]=q
    response = requests.get(url,params=query)
    for i in range(0,10):
        data.append(response.json()["results"][i]["urls"]["raw"])
    response = jsonify(data)
    response.headers.add('Access-Control-Allow-Origin', '*')

    return response
Пример #6
0
model_name = 'mobilenet_v1_1.0_224_quant.tflite'
repeat = 10

model_dir = download_model_zoo(model_dir, model_name)
tflite_model_file = os.path.join(model_dir, model_name)
tflite_model_buf = open(tflite_model_file, "rb").read()
try:
    import tflite
    tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0)
except AttributeError:
    import tflite.Model
    tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0)

interpreter = Interpreter(tflite_model_file, num_threads=get_cpu_count())
interpreter.allocate_tensors()

_, height, width, _ = interpreter.get_input_details()[0]['shape']
image = load_test_image('uint8', height, width)

numpy_time = np.zeros(repeat)

for i in range(0, repeat):
    start_time = time.time()
    results = classify_image(interpreter, image)

    elapsed_ms = (time.time() - start_time) * 1000
    numpy_time[i] = elapsed_ms

print("tflite %-20s %-19s (%s)" % (model_name, "%.2f ms" % np.mean(numpy_time),
                                   "%.2f ms" % np.std(numpy_time)))