예제 #1
0
def show_crop2(filename):
    # prima predizione 
    filename=os.path.join(app.config['UPLOAD_FOLDER'], 'step2.jpg')
    tensor = label_image.read_tensor_from_image_file(filename)
    input_operation = model.get_operation_by_name(input_name)
    output_operation = model.get_operation_by_name(output_name)
    with tf.Session(graph=model) as sess:
         results = sess.run(output_operation.outputs[0],
                            {input_operation.outputs[0]: tensor})
    results = list(np.squeeze(results))    
    print(results)
    index_ = results.index(max(results))
    predizione_1 = str(labels[index_])
    prob_1 = results[index_]
    
    # seconda predizione
    filename=os.path.join(app.config['UPLOAD_FOLDER'], 'step2_90.jpg')
    tensor = label_image.read_tensor_from_image_file(filename)
    input_operation = model.get_operation_by_name(input_name)
    output_operation = model.get_operation_by_name(output_name)
    with tf.Session(graph=model) as sess:
         results = sess.run(output_operation.outputs[0],
                            {input_operation.outputs[0]: tensor})
    results = list(np.squeeze(results))    
    print(results)
    index_90 = results.index(max(results))
    predizione_2 = str(labels[index_90])
    prob_2 = results[index_90]
    
    predizione = predizione_1
    prob = round(prob_1,2)
    if prob_2 > prob_1:
        predizione = predizione_2
        prob = round(prob_2,2)
    
    ocr_out = ''
    param1 = 'e.g. 205'
    param2 = 'e.g. 55'
    param3 = 'e.g. 16'
    if app.config['OCR'] != False:
        ocr_results = {'bridgestone': '<p>REDWEAL\nWITHOUT CONTACTO\n\nIKAK LOAD CH 012355\nAT 850Tat1 KAX PECAS\nTUBI LESS RADIAL\nSPAR\n7516Y <span class="measur">185 65 R15 </span>SSE\nPYSWZ\nE\nOUTSIDE\nPLIECETREAD POLYESTER - 2 STEL - 1 POLYFOTTE\nSIDE ALLIPOLYESTER\nBCLX U9F (0813)\n9) 0290 74 52WRE\nE\n00933413\nD EG GODT NG PET PPSELT HEAD\nDUPIETEET\nSED DUE PEOPGE\nOLO\nNEOS DOU D\nOLD TOUTES\n-\n</p>',\
                       'continental': '<p>Continental\n-e as .\nConfinoles\nTREAOTEAR 280\nTRACTION\nA\nTEMPERATURE\ntinental\n <span class="measur">205/55R16 </span>V.\nContiPremiumContact 5\nNOM\nDOT GYOF D7L5 1816\nCONTINENTAL\nL-99812S-2328112328473\ncontinental-tires.com\n82\nMAX INFLATION PRESSURE 350 KPA (1 PSD\nMAX LOAD 615 KG (1356 LB)\nPY\nSTELA\n</p>',\
                       'michelin': '<p>maiores\n175/65 R\nWARNING\n <span class="measur">175/65R14 </span>\n327\nR\nA\nA\nSDPLES\nIG HELINO TUBELES RADIAL X\nBIREWOLLPLY\nCena\nMICHELINO TUBELES3 RADIAL\nFOLESTE\n122502 52\nbesparende\n</p>',\
                       'pirelli': '<p>Per la\n7.com\nwww.pirelli\nESTERNO AUSSEN\nEXTERIEUR OUTER\nASA CANADA WA LOLEN ONLI\nLO 615 1356 051\n409001\nP <span class="measur">205/55 R16 </span>\nSTANDARD LOAD\n1316)\nRADIAL\nTUBELESS\n21.129SZ WRI\n6253353\nLIEGEZWE COQ\nOLETS\n1089978\nAG9978\n2012\n</p>'}
        diz_param = {'bridgestone': ['185', '65', '15'],\
                     'continental': ['205', '55', '16'],\
                     'michelin': ['175', '65', '14'],\
                     'pirelli': ['205', '55', '16']}
        param1 = diz_param[app.config['OCR']][0]
        param2 = diz_param[app.config['OCR']][1]
        param3 = diz_param[app.config['OCR']][2]
        ocr_out = ocr_results[app.config['OCR']]
    
    return render_template('try.html', filename='step2.jpg', init=False, crop1 = False, crop2=True,\
                            pred = predizione, prob = prob, downloadenabled=app.config['DOWNLOAD'], OCR=app.config['OCR'], ocr_text = ocr_out,
                            marca = predizione, param1 = param1, param2 = param2, param3 = param3)
예제 #2
0
    def classify(self, image):
        """
        Downloads the image from the scraped url if it has not been cached yet (eg lambda functions being closed).

        Reads the image file and runs the classifier.

        :param image: The image to classify
        :return: the images classification
        """
        print(f"\tclassifying image {image.file_name()}")

        if not os.path.exists(image.file_name()):
            print(f"\tImage not saved, downloading. {image.file_name()}")
            r = requests.get(image.url, timeout=2.0)
            if r.status_code == 200:
                with open(image.file_name(), 'wb') as f:
                    f.write(r.content)

        t = read_tensor_from_image_file(image.file_name(),
                                        input_height=self.size,
                                        input_width=self.size,
                                        input_mean=0,
                                        input_std=255)

        results = self.session.run(self.output_operation.outputs[0],
                                   {self.input_operation.outputs[0]: t})

        results = np.squeeze(results)

        soup_confidence = results[self.labels.index("soup")].astype(float)
        print(
            f"\tfinished classifying image with soup confidence {soup_confidence}: {image}"
        )
        image.soup_confidence = soup_confidence
def main(stdscr):
    stdscr.clear()
    # Configure the camera
    camera = picamera.PiCamera()

    # configure the graph
    graph, session = load_graph(args.model)
    labels = load_labels(args.labels)

    while True:
        try:
            key = stdscr.getkey()
            stdscr.addstr('Detected key:\n')
            stdscr.addstr(str(key))
            stdscr.addstr('\n')
            if str(key) == 'q':
                break
            if str(key) == 'c':
                stdscr.clear()
                camera.start_preview()
                time.sleep(2)
                camera.capture('test.jpg')
                camera.stop_preview()
                t = read_tensor_from_image_file('test.jpg',
                                                input_height=224,
                                                input_width=224,
                                                input_mean=128,
                                                input_std=128)
                # Start benchmarking
                input_name = "import/input"
                output_name = "import/final_result"
                input_operation = graph.get_operation_by_name(input_name)
                output_operation = graph.get_operation_by_name(output_name)
                start = time.time()
                results = session.run(output_operation.outputs[0],
                                      {input_operation.outputs[0]: t})
                end = time.time()
                results = np.squeeze(results)
                top_k = results.argsort()[-5:][::-1]
                stdscr.addstr(
                    '\nEvaluation time (1-image): {:.3f}s\n'.format(end -
                                                                    start))
                for i in top_k:
                    stdscr.addstr(
                        str(labels[i]) + ': ' + str(results[i]) + '\n')
        except Exception as e:
            stdscr.addstr('Exception occured')
            stdscr.addstr(str(e))
            pass
예제 #4
0
def testModel(pathToFrames, modelFile, labelFile, inputLayer, outputLayer):

	graph = label_image.load_graph(modelFile)
	correct = 0
	total = 0

	for label in os.listdir(pathToFrames):
		if label == '.DS_Store':
			continue

		cur_class = os.path.join(pathToFrames, label)
		classified = {}

		for frame in os.listdir(cur_class):
			if(label+"_0" in frame):

				cur_frame = os.path.join(cur_class, frame)

				# run model on this frame
				t = label_image.read_tensor_from_image_file(cur_frame)

				input_name = "import/" + inputLayer
				output_name = "import/" + outputLayer
				input_operation = graph.get_operation_by_name(input_name)
				output_operation = graph.get_operation_by_name(output_name)

				with tf.Session(graph=graph) as sess:
					results = sess.run(output_operation.outputs[0], {input_operation.outputs[0]: t})
				results = np.squeeze(results)
				top_k = results.argsort()[-3:][::-1]
				labels = label_image.load_labels(labelFile)
				# print(top_k)
				for i in top_k:
					if labels[i] in classified:
						classified[labels[i]] += 1
					else:
						classified[labels[i]] = 1
					break
		best = keywithmaxval(classified)
		label_clean = convertLabel(label)
		print(best)
		print(label_clean)
		if(best == label_clean):
			correct += 1
		total += 1
		print(total)
		print(correct)

	return correct*1.0/total
def tf_label(graph, filename, label_file):
    t = label_image.read_tensor_from_image_file(filename,
                                                input_height=input_height,
                                                input_width=input_width,
                                                input_mean=input_mean,
                                                input_std=input_std)
    with tf.Session(graph=graph) as sess:
        results = sess.run(output_operation.outputs[0],
                           {input_operation.outputs[0]: t})
    results = np.squeeze(results)

    top_k = results.argsort()[-5:][::-1]
    labels = label_image.load_labels(label_file)

    results = list(zip(labels, results))
    print(results)
    return results
def main(stdscr):
    stdscr.clear()
    # Configure the camera
    camera = picamera.PiCamera()

    # configure the graph
    graph, session = load_graph(args.model)
    labels = load_labels(args.labels)

    while True:
        try:
            key = stdscr.getkey()
            stdscr.addstr('Detected key:\n')
            stdscr.addstr(str(key))
            stdscr.addstr('\n')
            if str(key) == 'q':
                break
            if str(key) == 'c':
                stdscr.clear()
                camera.start_preview()
                time.sleep(2)
                camera.capture('test.jpg')
                camera.stop_preview()
                t = read_tensor_from_image_file('test.jpg',
                                            input_height=224,
                                            input_width=224,
                                            input_mean=128,
                                            input_std=128)
                # Start benchmarking
                input_name = "import/input"
                output_name = "import/final_result"
                input_operation = graph.get_operation_by_name(input_name)
                output_operation = graph.get_operation_by_name(output_name)
                start = time.time()
                results = session.run(output_operation.outputs[0],
                                    {input_operation.outputs[0]: t})
                end=time.time()
                results = np.squeeze(results)
                top_k = results.argsort()[-5:][::-1]
                stdscr.addstr('\nEvaluation time (1-image): {:.3f}s\n'.format(end-start))
                for i in top_k:
                    stdscr.addstr(str(labels[i]) + ': ' + str(results[i]) + '\n')
        except Exception as e: 
            stdscr.addstr('Exception occured')
            stdscr.addstr(str(e))
            pass
예제 #7
0
def messy():
    """
    """
    with picamera.PiCamera() as camera:
        camera.resolution = (3200, 2464)
        time.sleep(1)
        camera.capture("instant.jpg")
        print("I took a picture!")
    file_name = "instant.jpg"
    model_file = "rooms_82.pb"
    label_file = "rooms_82.txt"
    input_height = 299
    input_width = 299
    input_mean = 0
    input_std = 255
    input_layer = "Placeholder"
    output_layer = "final_result"

    graph = ml.load_graph(model_file)
    t = ml.read_tensor_from_image_file(file_name,
                                       input_height=input_height,
                                       input_width=input_width,
                                       input_mean=input_mean,
                                       input_std=input_std)

    input_name = "import/" + input_layer
    output_name = "import/" + output_layer
    input_operation = graph.get_operation_by_name(input_name)
    output_operation = graph.get_operation_by_name(output_name)

    with ml.tf.Session(graph=graph) as sess:
        results = sess.run(output_operation.outputs[0],
                           {input_operation.outputs[0]: t})
    results = np.squeeze(results)

    top_k = results.argsort()[-5:][::-1]
    labels = ml.load_labels(label_file)

    messy_end_label = 0
    for i in top_k:
        print(labels[i], results[i])
        messy_end_label += float(labels[i]) * float(results[i])

    return messy_end_label
예제 #8
0
def loadImage(filepath,
              model_file='output_graph.pb',
              label_file='output_labels.txt',
              input_height=299,
              input_width=299,
              input_mean=0,
              input_std=255,
              input_layer='Placeholder',
              output_layer='final_result'):

    graph = label_image.load_graph(model_file)
    t = label_image.read_tensor_from_image_file(filepath,
                                                input_height=input_height,
                                                input_width=input_width,
                                                input_mean=input_mean,
                                                input_std=input_std)

    input_name = "import/" + input_layer
    output_name = "import/" + output_layer
    input_operation = graph.get_operation_by_name(input_name)
    output_operation = graph.get_operation_by_name(output_name)

    with tf.Session(graph=graph) as sess:
        results = sess.run(output_operation.outputs[0],
                           {input_operation.outputs[0]: t})

    results = np.squeeze(results)

    bestGuess = results.argsort()[-1]

    labels = label_image.load_labels(label_file)
    actName = filepath.split('/')[-2]

    plt.cla()

    ax = plt.axes()
    ax.set_title('Predicted: ' + fish_lut[labels[bestGuess]] + '\nActual: ' +
                 fish_lut[actName] + '\nConfidence: ' +
                 str(results[bestGuess]))

    plt.imshow(mpimg.imread(filepath))

    plt.draw()
    def recognize(self, file_name, graph):
        t = read_tensor_from_image_file(file_name,
                                        input_height=INPUT_HEIGHT,
                                        input_width=INPUT_WIDTH,
                                        input_mean=INPUT_MEAN,
                                        input_std=INPUT_STD)

        input_name = "import/" + INPUT_LAYER
        output_name = "import/" + OUTPUT_LAYER
        input_operation = graph.get_operation_by_name(input_name)
        output_operation = graph.get_operation_by_name(output_name)

        results = self.sess.run(output_operation.outputs[0],
                                {input_operation.outputs[0]: t})
        results = np.squeeze(results)

        top_k = results.argsort()[-5:][::-1]
        labels = load_labels(LABEL_FILE)
        # for i in top_k:
        #     print(labels[i], results[i])
        k = top_k[0]
        return labels[k]
예제 #10
0
def run_on_image(file_name):
    t = label_image.read_tensor_from_image_file(
        file_name,
        input_height=input_height,
        input_width=input_width,
        input_mean=input_mean,
        input_std=input_std,
    )

    input_name = 'import/' + input_layer
    output_name = 'import/' + output_layer

    global graph
    input_operation = graph.get_operation_by_name(input_name)
    output_operation = graph.get_operation_by_name(output_name)

    with tf.Session(graph=graph) as sess:
        results = sess.run(output_operation.outputs[0],
                           {input_operation.outputs[0]: t})
    results = np.squeeze(results)
    top_k = results.argsort()[-5:][::-1]
    labels = label_image.load_labels(label_file)
    return labels[top_k[0]]
예제 #11
0
def take_and_label_picture():
    # ts = time.time()
    # timeStamp = datetime.fromtimestamp(ts).strftime('%Y-%m-%d_%H:%M')
    image_file = 'static/lab.jpg'
    print('Taking picture and saving to ' + image_file)
    camera.take_picture(image_file, True)

    graph = label_image.load_graph(MODEL_FILE)
    t = label_image.read_tensor_from_image_file(image_file)

    input_operation = graph.get_operation_by_name('import/Placeholder')
    output_operation = graph.get_operation_by_name('import/final_result')

    with tf_session(graph=graph) as sess:
        results = sess.run(output_operation.outputs[0],
                           {input_operation.outputs[0]: t})
    results = numpy.squeeze(results)

    labels = label_image.load_labels(LABEL_FILE)
    for i, label in enumerate(labels):
        result[label] = float(results[i])

    print(result)
    message(result)
예제 #12
0
def read_single_letter(file_name):
    model_file = \
    "output_graph.pb"
    label_file = "output_labels.txt"
    input_height = 224
    input_width = 224
    input_mean = 0
    input_std = 255
    input_layer = "input"
    output_layer = "final_result"

    graph = label_image.load_graph(model_file)
    t = label_image.read_tensor_from_image_file(file_name,
                                                input_height=input_height,
                                                input_width=input_width,
                                                input_mean=input_mean,
                                                input_std=input_std)

    input_name = "import/" + input_layer
    output_name = "import/" + output_layer
    input_operation = graph.get_operation_by_name(input_name)
    output_operation = graph.get_operation_by_name(output_name)

    with tf.Session(graph=graph) as sess:
        results = sess.run(output_operation.outputs[0],
                           {input_operation.outputs[0]: t})
    results = np.squeeze(results)

    top_k = results.argsort()[-5:][::-1]
    labels = label_image.load_labels(label_file)
    for i in top_k:
        a = labels[i]
        b = a.split(" ")
        print(b)
        return chr(int(b[1]))
        break
예제 #13
0
    graph = load_graph(model_file)

    input_name = "import/" + input_layer
    output_name = "import/" + output_layer
    labels = load_labels(label_file)

    for root, dirs, files in os.walk(image_dir, topdown=False):
        for name in files:
            input_operation = graph.get_operation_by_name(input_name)
            output_operation = graph.get_operation_by_name(output_name)
            file_name = (os.path.join(root, name))
            if not file_name.endswith('.jpg'):
                continue
            print("processing", file_name)

            t = read_tensor_from_image_file(file_name,
                                            input_height=input_height,
                                            input_width=input_width,
                                            input_mean=input_mean,
                                            input_std=input_std)

            with tf.Session(graph=graph) as sess:
                results = sess.run(output_operation.outputs[0],
                                   {input_operation.outputs[0]: t})
            results = np.squeeze(results)

            with open(file_name.rstrip('.jpg') + '.txt', 'w') as fp:
                for i in range(len(labels)):
                    fp.write("%s:%s\n" % (labels[i], results[i]))
예제 #14
0
    def check(self, fileToUpload, submit):
        try:
            res = "<html><body><h1>Test Result</h1>"
            res_var = None
            if fileToUpload:
                file_name = "uploads/1.jpg"

                img = open(file_name, 'wb')
                img.write(io.BytesIO(fileToUpload.file.read()).read())
                img.close()

                model_file = "output_graph.pb"
                label_file = "output_labels.txt"
                input_height = 299
                input_width = 299
                input_mean = 0
                input_std = 255
                input_layer = "Mul"
                output_layer = "final_result"

                graph = li.load_graph(model_file)
                t = li.read_tensor_from_image_file(
                       file_name,
                       input_height=input_height,
                       input_width=input_width,
                       input_mean=input_mean,
                       input_std=input_std)

                input_name = "import/" + input_layer
                output_name = "import/" + output_layer
                input_operation = graph.get_operation_by_name(input_name)
                output_operation = graph.get_operation_by_name(output_name)

                with tf.Session(graph=graph) as sess:
                    results = sess.run(output_operation.outputs[0], {
                              input_operation.outputs[0]: t
                    })
                results = np.squeeze(results)

                top_k = results.argsort()[-5:][::-1]
                labels = li.load_labels(label_file)
                if results[top_k[0]] > 0.7:
                    res += "<h3>You uploaded image of " + self.cm[str(labels[top_k[0]])] + ' ' + str(results[top_k[0]] * 100) +  "%</h3>"
                    res_var = '\n Probability of ' + self.cm[str(labels[top_k[0]])] + ' = ' + str(results[top_k[0]] * 100) +  "%"
                else:
                    res += "<h3>Sorry couldn't detect<br>Try with different image</h3>"

            try:
                data = self.get_top()
                res1 = ''.join([line for line in open('result1.html', 'r')])
                res2 = ''.join([line for line in open('result2.html', 'r')])

                resm = "<img src=\"uploads/1.jpg\" width=\"400\"  >" + "<h1 class=\"mb-10\"> \n Our Result:</h1>" + "<p>" + res_var + "</p>"

                return data + res1 + resm + res2
            except Exception as e1:
                print(e1)
                return res + "<a href='/'>Try another</a></body></html>"
        except Exception as e:
            print(e)
            return "<html><body><h1>Please try again (Corrupt or invalid Image)</h1></body></html>"
예제 #15
0
def predict():
    message = request.get_json(force=True)
    encoded = message["image"]
    decoded = base64.b64decode(encoded)
    img = Image.open(io.BytesIO(decoded))

    destination = "images"
    if not os.path.exists(destination):
        os.makedirs(destination)

    now = datetime.datetime.now()
    rand_str = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(8))
    file_name = os.path.join(destination, str(now.strftime("%Y-%m-%d-%H-%M-%S-"))+rand_str+'.jpg')

    try:
        img.save(os.path.join(file_name), "JPEG", quality=80, optimize=True, progressive=True)
    except IOError:
        ImageFile.MAXBLOCK = img.size[0] * img.size[1]
        img.save(file_name, "JPEG", quality=80, optimize=True, progressive=True)

    t = label_image.read_tensor_from_image_file(file_name,
                                    input_height=input_height,
                                    input_width=input_width,
                                    input_mean=input_mean,
                                    input_std=input_std)

    input_name = "import/" + input_layer
    output_name = "import/" + output_layer
    input_operation = graph.get_operation_by_name(input_name);
    output_operation = graph.get_operation_by_name(output_name);

    with tf.Session(graph=graph) as sess:
        start = time.time()
        results = sess.run(output_operation.outputs[0],
                        {input_operation.outputs[0]: t})
        end=time.time()

    results = np.squeeze(results)

    top_k = results.argsort()[-5:][::-1]
    labels = label_image.load_labels(label_file)

    print('\nEvaluation time (1-image): {:.3f}s\n'.format(end-start))
    
    
    for i in top_k:
        if max(results) == results[i]:
            res=results[i]
            lab=labels[i]
            print(labels[i], results[i],"this is max")
        else:
            print(labels[i], results[i],"sry is not max")

    

    response = {
        'prediction': {
            'prediction': lab,
            'value' : str(res)
        }
    }
    return jsonify(response)