Exemple #1
0
def predict(img):
    graph_file = 'output_graph.pb'
    label_file = 'output_labels.txt'

    graph = load_graph(graph_file)
    input_name = "import/input"
    output_name = "import/final_result"
    input_operation = graph.get_operation_by_name(input_name)
    output_operation = graph.get_operation_by_name(output_name)
    labels = load_labels(label_file)

    with tf.Session(graph=graph) as sess:
        image = np.asarray(bytearray(img), dtype="uint8")
        frame = cv2.imdecode(image, cv2.IMREAD_COLOR)

        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        # img = get_image()

        t,r = preprocess(frame, 224, 224)
        t1 = time.time()

        results = sess.run(output_operation.outputs[0],
                        {input_operation.outputs[0]: t})
        results = np.squeeze(results)

        top_k = results.argsort()[-5:][::-1]
        Top1 = top_k[0]
        return labels[Top1], results[Top1]
    def recognize_disease(self, img):

        graph = load_graph(MODEL_FILE)

        with tf.Session(graph=graph) as self.sess:
            disease = self.recognize(img, graph)

        return disease
    def __init__(self, model_path, labels_path):

        # Load graph, labels, input and output sensors
        self.graph = label_image.load_graph(model_path)
        self.labels = label_image.load_labels(labels_path)
        self.red_idx = self.get_red_idx()
        self.input_operation = self.graph.get_operation_by_name(INPUT_LAYER)
        self.output_operation = self.graph.get_operation_by_name(OUTPUT_LAYER)

        # Create sessions
        self.sess = tf.Session(graph=self.graph)
Exemple #4
0
def load_tf_transfer():
    model_dir = os.path.join(os.getcwd(), 'retrained',
                             'classify_image_graph_def.pb')
    graph = label_image.load_graph(model_dir)
    out = {
        'graph': graph,
        'label_path': os.path.join(os.getcwd(), 'retrained',
                                   'output_labels.txt'),
        'input_name': 'import/DecodeJpeg',
        'output_name': 'import/final_result'
    }
    return out
def testModel(pathToFrames, modelFile, labelFile, inputLayer, outputLayer):

	graph = label_image.load_graph(modelFile)
	correct = 0
	total = 0

	for label in os.listdir(pathToFrames):
		if label == '.DS_Store':
			continue

		cur_class = os.path.join(pathToFrames, label)
		classified = {}

		for frame in os.listdir(cur_class):
			if(label+"_0" in frame):

				cur_frame = os.path.join(cur_class, frame)

				# run model on this frame
				t = label_image.read_tensor_from_image_file(cur_frame)

				input_name = "import/" + inputLayer
				output_name = "import/" + outputLayer
				input_operation = graph.get_operation_by_name(input_name)
				output_operation = graph.get_operation_by_name(output_name)

				with tf.Session(graph=graph) as sess:
					results = sess.run(output_operation.outputs[0], {input_operation.outputs[0]: t})
				results = np.squeeze(results)
				top_k = results.argsort()[-3:][::-1]
				labels = label_image.load_labels(labelFile)
				# print(top_k)
				for i in top_k:
					if labels[i] in classified:
						classified[labels[i]] += 1
					else:
						classified[labels[i]] = 1
					break
		best = keywithmaxval(classified)
		label_clean = convertLabel(label)
		print(best)
		print(label_clean)
		if(best == label_clean):
			correct += 1
		total += 1
		print(total)
		print(correct)

	return correct*1.0/total
Exemple #6
0
    def __init__(self, graph_name="resources/output_graph_mobile.pb"):
        """
        Initializes the tensorflow session and loads the classifier graph.
        :param graph_name: the graph name can be overridden to use a different model
        """

        self.graph = load_graph(graph_name)
        self.input_operation = self.graph.get_operation_by_name(
            self.input_name)
        self.output_operation = self.graph.get_operation_by_name(
            self.output_name)
        self.session = tf.Session(graph=self.graph)
        self.labels = load_labels("resources/output_labels.txt")

        if not os.path.exists("/tmp/photos"):
            os.makedirs("/tmp/photos")
Exemple #7
0
def messy():
    """
    """
    with picamera.PiCamera() as camera:
        camera.resolution = (3200, 2464)
        time.sleep(1)
        camera.capture("instant.jpg")
        print("I took a picture!")
    file_name = "instant.jpg"
    model_file = "rooms_82.pb"
    label_file = "rooms_82.txt"
    input_height = 299
    input_width = 299
    input_mean = 0
    input_std = 255
    input_layer = "Placeholder"
    output_layer = "final_result"

    graph = ml.load_graph(model_file)
    t = ml.read_tensor_from_image_file(file_name,
                                       input_height=input_height,
                                       input_width=input_width,
                                       input_mean=input_mean,
                                       input_std=input_std)

    input_name = "import/" + input_layer
    output_name = "import/" + output_layer
    input_operation = graph.get_operation_by_name(input_name)
    output_operation = graph.get_operation_by_name(output_name)

    with ml.tf.Session(graph=graph) as sess:
        results = sess.run(output_operation.outputs[0],
                           {input_operation.outputs[0]: t})
    results = np.squeeze(results)

    top_k = results.argsort()[-5:][::-1]
    labels = ml.load_labels(label_file)

    messy_end_label = 0
    for i in top_k:
        print(labels[i], results[i])
        messy_end_label += float(labels[i]) * float(results[i])

    return messy_end_label
Exemple #8
0
def loadImage(filepath,
              model_file='output_graph.pb',
              label_file='output_labels.txt',
              input_height=299,
              input_width=299,
              input_mean=0,
              input_std=255,
              input_layer='Placeholder',
              output_layer='final_result'):

    graph = label_image.load_graph(model_file)
    t = label_image.read_tensor_from_image_file(filepath,
                                                input_height=input_height,
                                                input_width=input_width,
                                                input_mean=input_mean,
                                                input_std=input_std)

    input_name = "import/" + input_layer
    output_name = "import/" + output_layer
    input_operation = graph.get_operation_by_name(input_name)
    output_operation = graph.get_operation_by_name(output_name)

    with tf.Session(graph=graph) as sess:
        results = sess.run(output_operation.outputs[0],
                           {input_operation.outputs[0]: t})

    results = np.squeeze(results)

    bestGuess = results.argsort()[-1]

    labels = label_image.load_labels(label_file)
    actName = filepath.split('/')[-2]

    plt.cla()

    ax = plt.axes()
    ax.set_title('Predicted: ' + fish_lut[labels[bestGuess]] + '\nActual: ' +
                 fish_lut[actName] + '\nConfidence: ' +
                 str(results[bestGuess]))

    plt.imshow(mpimg.imread(filepath))

    plt.draw()
Exemple #9
0
def take_and_label_picture():
    # ts = time.time()
    # timeStamp = datetime.fromtimestamp(ts).strftime('%Y-%m-%d_%H:%M')
    image_file = 'static/lab.jpg'
    print('Taking picture and saving to ' + image_file)
    camera.take_picture(image_file, True)

    graph = label_image.load_graph(MODEL_FILE)
    t = label_image.read_tensor_from_image_file(image_file)

    input_operation = graph.get_operation_by_name('import/Placeholder')
    output_operation = graph.get_operation_by_name('import/final_result')

    with tf_session(graph=graph) as sess:
        results = sess.run(output_operation.outputs[0],
                           {input_operation.outputs[0]: t})
    results = numpy.squeeze(results)

    labels = label_image.load_labels(LABEL_FILE)
    for i, label in enumerate(labels):
        result[label] = float(results[i])

    print(result)
    message(result)
Exemple #10
0
def read_single_letter(file_name):
    model_file = \
    "output_graph.pb"
    label_file = "output_labels.txt"
    input_height = 224
    input_width = 224
    input_mean = 0
    input_std = 255
    input_layer = "input"
    output_layer = "final_result"

    graph = label_image.load_graph(model_file)
    t = label_image.read_tensor_from_image_file(file_name,
                                                input_height=input_height,
                                                input_width=input_width,
                                                input_mean=input_mean,
                                                input_std=input_std)

    input_name = "import/" + input_layer
    output_name = "import/" + output_layer
    input_operation = graph.get_operation_by_name(input_name)
    output_operation = graph.get_operation_by_name(output_name)

    with tf.Session(graph=graph) as sess:
        results = sess.run(output_operation.outputs[0],
                           {input_operation.outputs[0]: t})
    results = np.squeeze(results)

    top_k = results.argsort()[-5:][::-1]
    labels = label_image.load_labels(label_file)
    for i in top_k:
        a = labels[i]
        b = a.split(" ")
        print(b)
        return chr(int(b[1]))
        break
Exemple #11
0
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 29 20:31:52 2018

@author: TOKIMASA
"""

import label_image

image_data = label_image.load_image('alopias_superciliosus_01.jpg')
labels = label_image.load_labels('retrained_labels_2.txt')
label_image.load_graph('retrained_graph_2.pb')

input_layer = 'DecodeJpeg/contents:0'
output_layer = 'final_result:0'
num_top_predictions = 5

label_dict = label_image.run_graph(image_data, labels, input_layer, output_layer,
              num_top_predictions)

input_dict = {'image_name':'xxx','longitude':'-0.703107, -120.9375','ocean_name':''}
input_dict_ = {'image_name':'xxx','longitude':'','ocean_name':'Pacific Ocean'}

ocean_name = get_location(input_dict_)
species_name = label_dict['species']

label_dict['ban_boolean'] = get_banboolean(species_name,ocean_name)
Exemple #12
0
        # if the window does not meet our desired window size, ignore it
        if window.shape[0] != winH or window.shape[1] != winW:
            continue

        clone = image.copy()
        cv2.rectangle(clone, (x, y), (x + winW, y + winH), (0, 255, 0), 2)
        print(x, y)
        croped = clone[y:y + winH, x:x + winW]
        #cv2.imshow("croped image", croped)
        # img2= cv2.resize(croped,dsize=(64,16,3), interpolation = cv2.INTER_CUBIC)
        # np_image_data = np.asarray(img2)
        # np_final = np.expand_dims(np_image_data,axis=0)

        #print(np_final.shape)

        graph = li.load_graph(model_file)
        # t = li.read_tensor_from_image_file(
        # 	file_name,
        # 	input_height=input_height,
        # 	input_width=input_width,
        # 	input_mean=input_mean,
        # 	input_std=input_std
        # 	)
        print(croped.shape)
        cv2.imshow("croped image", croped)
        t = croped
        #t = np.pad(croped,((0,0),(38,38),(0,0)),'constant')
        # t = t.reshape(1,224,224,3)
        t = t.reshape(1, 96, 96, 3)
        print(t.shape)
Exemple #13
0
    if args.labels:
        label_file = args.labels
    if args.input_height:
        input_height = args.input_height
    if args.input_width:
        input_width = args.input_width
    if args.input_mean:
        input_mean = args.input_mean
    if args.input_std:
        input_std = args.input_std
    if args.input_layer:
        input_layer = args.input_layer
    if args.output_layer:
        output_layer = args.output_layer

    graph = label_image.load_graph(model_file)

    total_ok = 0
    total_images = 0

    for label in os.listdir(dataset_path):
        images_dir = os.path.join(dataset_path, label)
        image_paths = []
        for image_name in os.listdir(images_dir):
            image_path = os.path.join(images_dir, image_name)
            image_paths.append(image_path)
        pool = ProcessPool(4)
        results = pool.map(run_on_image, image_paths)
        for i in range(len(image_paths)):
            if results[i] == label:
                total_ok += 1
#!/usr/bin/env python
# usage: bash tf_classify_server.sh [PORT_NUMBER]
from flask import Flask, request
import tensorflow as tf
import label_image as tf_classify
import json
app = Flask(__name__)
FLAGS, unparsed = tf_classify.parser.parse_known_args()
labels = tf_classify.load_labels(FLAGS.labels)
tf_classify.load_graph(FLAGS.graph)
sess = tf.Session()
@app.route('/', methods=['POST'])
def classify():
    try:
        data = request.files.get('data').read()
        result = tf_classify.run_graph(data, labels, FLAGS.input_layer, FLAGS.output_layer, FLAGS.num_top_predictions, sess)
        return json.dumps(result), 200
    except Exception as e:
        return repr(e), 500
app.run(host='0.0.0.0',port=12480)

Exemple #15
0
    def stop(self):
        self.runable = False


config = {
    "apiKey": "AIzaSyBFPAXHOjsn-ODsrSHa9TddDWp5UGhIefw",
    "authDomain": "pickle-f6850.firebaseapp.com",
    "databaseURL": "https://pickle-f6850.firebaseio.com/",
    "storageBucket": "pickle-f6850.appspot.com"
}

firebase = pyrebase.initialize_app(config)
db = firebase.database()

graph = label_image.load_graph("retrained_graph.pb")


def read_tensor_from_image_file(file_name,
                                input_height=299,
                                input_width=299,
                                input_mean=0,
                                input_std=255):
    input_name = "file_reader"
    output_name = "normalized"
    file_reader = tf.read_file(file_name, input_name)
    if file_name.endswith(".png"):
        image_reader = tf.image.decode_png(file_reader,
                                           channels=3,
                                           name='png_reader')
    elif file_name.endswith(".gif"):
# ==============================================================================

import cv2
from networktables import NetworkTables
from time import sleep
import sys
sys.path.insert(0, './label_image')

import label_image as li

if __name__ == '__main__':
    ip = sys.argv[1]
    NetworkTables.initialize(server=ip)
    sd = NetworkTables.getTable("SmartDashboard")
    cam = cv2.VideoCapture(0)
    graph = li.load_graph("quantized_graph.pb")
    while (1 == 1):
        i, img = cam.read()
        scaledImg = cv2.resize(img, (224, 224), interpolation=cv2.INTER_CUBIC)
        cv2.imwrite("fromCamera.png", scaledImg)
        result = str(
            li.classify("fromCamera.png", graph, "output_labels.txt", 224, 224,
                        128, 128, "input", "final_result", True))
        objects = result.split()
        if ("cube" in objects[0]):
            sd.putNumber("isCube", "true")
            print("Yooo, that's a PowerCube(TM)!")
        else:
            sd.putNumber("isCube", "false")
            print("Nope, not a PowerCube.")
    def check(self, fileToUpload, submit):
        try:
            res = "<html><body><h1>Test Result</h1>"
            res_var = None
            if fileToUpload:
                file_name = "uploads/1.jpg"

                img = open(file_name, 'wb')
                img.write(io.BytesIO(fileToUpload.file.read()).read())
                img.close()

                model_file = "output_graph.pb"
                label_file = "output_labels.txt"
                input_height = 299
                input_width = 299
                input_mean = 0
                input_std = 255
                input_layer = "Mul"
                output_layer = "final_result"

                graph = li.load_graph(model_file)
                t = li.read_tensor_from_image_file(
                       file_name,
                       input_height=input_height,
                       input_width=input_width,
                       input_mean=input_mean,
                       input_std=input_std)

                input_name = "import/" + input_layer
                output_name = "import/" + output_layer
                input_operation = graph.get_operation_by_name(input_name)
                output_operation = graph.get_operation_by_name(output_name)

                with tf.Session(graph=graph) as sess:
                    results = sess.run(output_operation.outputs[0], {
                              input_operation.outputs[0]: t
                    })
                results = np.squeeze(results)

                top_k = results.argsort()[-5:][::-1]
                labels = li.load_labels(label_file)
                if results[top_k[0]] > 0.7:
                    res += "<h3>You uploaded image of " + self.cm[str(labels[top_k[0]])] + ' ' + str(results[top_k[0]] * 100) +  "%</h3>"
                    res_var = '\n Probability of ' + self.cm[str(labels[top_k[0]])] + ' = ' + str(results[top_k[0]] * 100) +  "%"
                else:
                    res += "<h3>Sorry couldn't detect<br>Try with different image</h3>"

            try:
                data = self.get_top()
                res1 = ''.join([line for line in open('result1.html', 'r')])
                res2 = ''.join([line for line in open('result2.html', 'r')])

                resm = "<img src=\"uploads/1.jpg\" width=\"400\"  >" + "<h1 class=\"mb-10\"> \n Our Result:</h1>" + "<p>" + res_var + "</p>"

                return data + res1 + resm + res2
            except Exception as e1:
                print(e1)
                return res + "<a href='/'>Try another</a></body></html>"
        except Exception as e:
            print(e)
            return "<html><body><h1>Please try again (Corrupt or invalid Image)</h1></body></html>"
Exemple #18
0
    file_path = 'sample/'
    file_name = 'sample1.vtt'

    model_file = "/tmp/output_graph.pb"
    label_file = "/tmp/output_labels.txt"

    subtitle = parse_subtitle_file(os.path.join(file_path, file_name))
    cap = open_video('sample/sample1.mp4')
    fps = cap.get(cv2.CAP_PROP_FPS)

    success, first_image = cap.read()

    working_time = datetime.now()
    print(working_time)

    graph = label_image.load_graph(modelFullPath)

    create_graph()

    with tf.Session() as sess:
        for i in range(0, len(subtitle)):
            line = subtitle[i]
            print('작업: %s' % line['time'])
            frames = [int(x * fps) for x in list(range(line['start'], line['end']))]
            frames = list(set(frames))
            count = 0
            for frame in frames:
                video_on_text = subtitle[i]['video_on_text']
                if video_on_text:
                    break
                cap.set(cv2.CAP_PROP_POS_FRAMES, frame)
Exemple #19
0
from ScrapingFunctions import Parser
import label_image
import tensorflow as tf
from time import sleep

app = Flask(__name__)

UPLOAD_FOLDER = os.path.basename('uploads')
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER

const0 = 600
input_layer = "Mul"
output_layer = "final_result"
input_name = "import/" + input_layer
output_name = "import/" + output_layer
model = label_image.load_graph('retrained_graph.pb')
labels = ['Bridgestone', 'Continental', 'Michelin', 'Pirelli']

app.config['DOWNLOAD']=False

app.config['OCR']=False

#================================================================
# HOMEPAGE , ABOUT , RESULTS , TRY
#================================================================

@app.route('/')
def homepage():
    return render_template('index.html')   
    
@app.route('/method')
Exemple #20
0
import csv
import label_image
import imageio
import random
import time
import utils


CAR_WORDS = ['car']



files = glob('../rob599_dataset_deploy/test/*/*_image.jpg')
files.sort()

graph = label_image.load_graph('./retrained_graph.pb')
labels = label_image.load_labels('./retrained_labels.txt')

fig1 = plt.figure(1, figsize=(16, 9))
fig3 = plt.figure(3, figsize=(10,10))

with open('./outfile.txt','w') as f:
    f.write('guid/image,N\n')

plt.ion()

for i in range(len(files)):
    if i%1 == 0:
        print("Trial ", i, 'out of ', len(files))
        
    imgpath = files[i]