Example #1
0
def classify_image(image_arg):
	print("[INFO] loading and preprocessing image...")
	image = image_utils.load_img(image_arg, target_size=(224, 224))
	image = image_utils.img_to_array(image)

	# our image is now represented by a NumPy array of shape (3, 224, 224),
	# but we need to expand the dimensions to be (1, 3, 224, 224) so we can
	# pass it through the network -- we'll also preprocess the image by
	# subtracting the mean RGB pixel intensity from the ImageNet dataset
	image = np.expand_dims(image, axis=0)
	image = preprocess_input(image)

	# load the VGG16 network
	# print("[INFO] loading network...")	
	# classify the image
	# print("[INFO] classifying image...")
	preds = model.predict(image,32,1)
	preds=preds.reshape(1000)
	#(inID, label) = decode_predictions(preds)[0]
	top_pred,out_label_probs = decode_predictions(preds)

	# print "Top predictions : "
	# print "Label : {}, Probability: {}".format(top_pred['label'],top_pred['prob'])

	# print "Top labels with proobabilites : "
	# for label in out_label_probs:
	# 	print "Label : {}, Probability: {}".format(label['label'],label['prob'])
	return top_pred, out_label_probs
Example #2
0
    def predictor(self, sys):
        try:
            if len(sys.argv) < 2:
                print("Usage : ./saying-object.py <image path>")
                sys.exit()

            img_path = sys.argv[1]

            model = VGG16(weights="imagenet")
            img = image.load_img(img_path, target_size=(224, 224))
            x = image.img_to_array(img)
            x = np.expand_dims(x, axis=0)
            x = preprocess_input(x)

            objek = "?"
            preds = model.predict(x)
            pred_dict = decode_predictions(preds)
            objek = pred_dict[0][0][1]
            if len(objek) > 1:
                words = "Your object detected as " + objek.replace("_", " ")
                self.tts_online_gtts_speak(words)
            else:
                words = "sorry sir failed to predict your image"
                self.tts_online_gtts_speak(words)
        except:
            raise
Example #3
0
    def predict(self, frame):
        image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB).astype(np.float32)
        image = image.transpose((2, 0, 1))
        image = image.reshape((1, ) + image.shape)

        image = preprocess_input(image)
        preds = self.model.predict(image)
        return decode_predictions(preds)[0]
 def predict(self, frame):
     image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB).astype(np.float32)
     image = np.expand_dims(image, axis=0)
     image = preprocess_input(image)
     preds = self.model.predict(image)
     label = decode_predictions(preds)[0][0][0:2]
     #self.write_arduino(label)
     return label
def classify_image(img_path):
    #model = ResNet50(weights='imagenet')
    #img_path = '1360x.jpeg'
    img = image.load_img(img_path, target_size=(224, 224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    preds = model.predict(x)
    print('Predicted:', decode_predictions(preds))
Example #6
0
def Generate_scores(img):

    print("[INFO] loading and preprocessing image...")
    image = image_utils.load_img(img, target_size=(224, 224))
    image = image_utils.img_to_array(image)

    image = np.expand_dims(image, axis=0)
    image = preprocess_input(image)

    print("[INFO] loading network...")
    model = VGG16(weights="imagenet")

    print("[INFO] classifying image...")
    preds = model.predict(image)

    (__, inID, label) = decode_predictions(preds)[0][0]

    result = decode_predictions(preds, top=10)[0]

    display(Image(img))

    result_frame = pd.DataFrame(result).ix[:, 1:]
    result_frame.columns = ["item", "probability"]
    result_frame.index = result_frame.index + 1
    #display(result_frame)

    import seaborn as sns
    import matplotlib.pyplot as plt
    get_ipython().run_line_magic('matplotlib', 'inline')

    plt.figure(figsize=(10, 7))
    sns.set_style('white')
    sns.set_context('talk', font_scale=1.8)
    sns.set_color_codes("pastel")
    ax = sns.barplot(x='probability',
                     y='item',
                     data=result_frame,
                     color="b",
                     palette="Blues_r")
    sns.plt.title('What is it?')
    ax.set(xlim=(0, 1))
    ax.set(xlabel='Probability', ylabel='Object')
Example #7
0
 def get_prediction(input_path):
     input_img = load_img(input_path, single_input_shape, grayscale)
     with get_evaluation_context():
         return jsonify(
             json.loads(
                 get_json(
                     decode_predictions(
                         model.predict(input_img)
                     )
                 )
             )
         )
Example #8
0
def recognize(path):
  global model

  img = image.load_img(path, target_size=(299, 299))
  x = image.img_to_array(img)
  x = np.expand_dims(x, axis=0)
  x = preprocess_input(x)

  preds = model.predict(x)
  decoded = decode_predictions(preds, 3)
  result = [(x[1],float(x[2])) for x in decoded[0]]
  return(result)
    def predict(filename):
        assert os.path.isfile(filename) and 'cannot find file'
        model = ResNet50(weights='imagenet')

        img = image.load_img(filename, target_size=(224, 224))
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)

        preds = decode_predictions(model.predict(x))
        if len(preds) == 0:
            return None
        return preds[0][0][1]
def Generate_scores(img):    
        
    print("[INFO] loading and preprocessing image...")
    image = image_utils.load_img(img, target_size=(224, 224))
    image = image_utils.img_to_array(image)
    
    image = np.expand_dims(image, axis=0)
    image = preprocess_input(image)
    
    print("[INFO] loading network...")
    model = VGG16(weights="imagenet")
    
    print("[INFO] classifying image...")
    preds = model.predict(image)

    (__, inID, label) = decode_predictions(preds)[0][0]
    
    result=decode_predictions(preds, top=10)[0]
    
    display(Image(img))
    
    result_frame=pd.DataFrame(result).ix[:,1:]
    result_frame.columns=["item", "probability"]
    result_frame.index=result_frame.index +1
    #display(result_frame)

    import seaborn as sns
    import matplotlib.pyplot as plt
    get_ipython().run_line_magic('matplotlib', 'inline')
    
    plt.figure(figsize=(10,7))
    sns.set_style('white')
    sns.set_context('talk',font_scale=1.8)
    sns.set_color_codes("pastel")
    ax=sns.barplot(x='probability',y='item',data=result_frame,color="b", palette="Blues_r")
    sns.plt.title('What is it?')
    ax.set(xlim=(0, 1))
    ax.set(xlabel='Probability', ylabel='Object')
Example #11
0
def callback(data):
    if prev == data:
        return
    img = image.load_img(img_path, target_size=(224, 224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)

    # print('start')

    preds = model.predict(x)

    print('Predicted:', decode_predictions(preds))
    prev = data
Example #12
0
def eval(paths=['cat.jpg']):

    model = VGG19(include_top=True, weights='imagenet')

    imgs = [
        image.load_img(img_path, target_size=(224, 224)) for img_path in paths
    ]

    preds = []

    for img in imgs:
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        print('Input image shape:', x.shape)

        preds.extend(decode_predictions(model.predict(x)))

    return preds
Example #13
0
def accuracy(Y_true, Y_pred):
    val_labels_fnames = \
        '/lfs/1/ddkang/specializer/imagenet/ILSVRC2012_devkit_t12/data/val_gt_wnid.txt'
    df_labels = pd.read_csv(val_labels_fnames, names=['wnid'])
    Y_true = df_labels.values
    Y_true = map(lambda x: x[0], Y_true)

    Y_pred_dec = imagenet_utils.decode_predictions(Y_pred)

    acc = 0.0
    t5 = 0.0
    for i in xrange(len(Y_true)):
        s = set(map(lambda x: x[0], Y_pred_dec[i]))
        if Y_true[i] == Y_pred_dec[i][0][0]:
            acc += 1
        if Y_true[i] in s:
            t5 += 1
    acc /= len(Y_true)
    t5 /= len(Y_true)
    print '%f,%f' % (acc, t5)
    def predictOutput(self):
        orig = cv2.imread(self.imageName)
        # classify the image
        print("[INFO] classifying image...")
        preds = self.model.predict(self.image)
        preds_class = self.model.predict_classes(self.image)
        #top4class_index = [i for i in np.argsort(preds[0])[-4:]]
        #top4class_prob = [preds[0][i] for i in np.argsort(preds[0])[-4:]]
        #print top4class_index
        #print top4class_prob

        preds_prob = preds[0][preds_class]
        (inID, label) = decode_predictions(preds)[0]

        # display the predictions to our screen
        output_text = ('Class Label :{}'.format(preds_class) + '\n' +
                       'class Prob : {}'.format(preds_prob) + '\n' +
                       'Class Name : {}'.format(label) + '\n' +
                       'Imagenet Id : {}'.format(inID))
        self.console_output.setText(output_text)

        print("ImageNet ID: {}, Label: {}".format(inID, label))
Example #15
0
def predict(path, model_path, index_file_path, MainUI):
    try:
        result_string = " Detected Object : Probability \n \n"

        # Making check to load Model
        if (MainUI.resnet_model_loaded == False):
            wx.CallAfter(pub.sendMessage, "report101", message="Loading ResNet model for the first time. This may take a few minutes or less than a minute. Please wait. \nLoading.....")
            model = ResNet50(include_top=True, weights="imagenet", model_path=model_path)
            wx.CallAfter(pub.sendMessage, "report101", message="ResNet model loaded.. Picture about to be processed.. \nLoading......")
            MainUI.model_collection_resnet.append(model)  # Loading model if not loaded yet
            MainUI.resnet_model_loaded = True
        else:
            wx.CallAfter(pub.sendMessage, "report101", message="Retrieving loaded model. \nLoading........")
            model = MainUI.model_collection_resnet[0]  # Getting Model from model array if loaded before
            wx.CallAfter(pub.sendMessage, "report101", message="ResNet model loaded.. Picture about to be processed.. \nLoading......")

        # Image prediction processing
        target_image = image.load_img(path, grayscale=False, target_size=(224, 224))
        target_image = image.img_to_array(target_image, data_format="channels_last")
        target_image = np.expand_dims(target_image, axis=0)

        target_image = preprocess_input(target_image, data_format="channels_last")
        wx.CallAfter(pub.sendMessage, "report101", message="Picture is transformed for prediction. \nLoading........")
        prediction = model.predict(x=target_image, steps=1)
        wx.CallAfter(pub.sendMessage, "report101", message="Picture prediction is done. Sending in results. \nLoading......")

        # Retrieving prediction result and sending it back to the thread
        prediction_result = decode_predictions(prediction, top=10, index_file_path=index_file_path)

        for results in prediction_result:
            countdown = 0
            for result in results:
                countdown += 1
                result_string += "(" + str(countdown) + ") " + str(result[1]) + " : " + str(100 * result[2])[
                                                                                        0:4] + "% \n"

        return result_string
    except Exception as e:
        return getattr(e, "message", repr(e))
Example #16
0
def predict_image(image_name):
    image_path = os.path.join(os.getcwd(), 'images', image_name)
    print("[INFO] loading and preprocessing image...")
    image = image_utils.load_img(image_path, target_size=(224, 224))
    image = image_utils.img_to_array(image)

    image = np.expand_dims(image, axis=0)
    image = preprocess_input(image)
    print(image.shape)

    # load the VGG16 network
    print("[INFO] loading network...")
    model = VGG16(weights="imagenet")

    # classify the image
    print("[INFO] classifying image...")
    preds = model.predict(image)
    #report = decode_predictions(preds)
    #print(report)
    (inID, label, probability) = decode_predictions(preds)[0][0]

    # display the predictions to our screen
    print("ImageNet ID: {}, Label: {}".format(inID, label))
    return label
Example #17
0
from __future__ import print_function
from keras.datasets import cifar10
from keras.utils import np_utils
from keras.preprocessing import image
from imagenet_utils import decode_predictions, preprocess_input
from VideoDebugger import imshow
from vgg16 import VGG16
import cv2
import numpy as np

(X_train, y_train), (X_test, y_test) = cifar10.load_data()
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')

model = VGG16(include_top=True,
              weights='vgg16_weights_tf_dim_ordering_tf_kernels.h5')

for img in X_test:
    img = cv2.resize(img, (224, 224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    preds = decode_predictions(model.predict(x))
    print(preds)
    cv2.putText(img, preds[0][0][1], (10, 200), cv2.FONT_HERSHEY_SIMPLEX, 1,
                (255, 255, 255), 2)
    imshow(img)
# NumPy array
print("[INFO] loading and preprocessing image...")
image = image_utils.load_img(args["image"], target_size=(224, 224))
image = image_utils.img_to_array(image)

# our image is now represented by a NumPy array of shape (3, 224, 224),
# but we need to expand the dimensions to be (1, 3, 224, 224) so we can
# pass it through the network -- we'll also preprocess the image by
# subtracting the mean RGB pixel intensity from the ImageNet dataset
image = np.expand_dims(image, axis=0)
image = preprocess_input(image)

# load the VGG16 network
print("[INFO] loading network...")
model = VGG16(weights="imagenet")
print(model.summary())

# classify the image
print("[INFO] classifying image...")
preds = model.predict(image)
P = decode_predictions(preds)
(imagenetID, label, prob) = P[0][0]
print(P)

# display the predictions to our screen
print("ImageNet ID: {}, Label: {}".format(imagenetID, label))
cv2.putText(orig, "Label: {}".format(label), (10, 30),
            cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
cv2.imshow("Classification", orig)
cv2.waitKey(0)
Example #19
0
while True:
    if predict_now:
        vid = imageio.get_reader(filename, 'ffmpeg')
        for i, im in enumerate(vid):
            print('Frame #:', i)
            rows, cols, channels = im.shape
            im = im[:, cols // 2 - rows // 2:cols // 2 +
                    rows // 2, :]  # crop image so that cols = rows)
            im = resize(
                im, (224, 224))  # resize to 224 x 224, the network input size
            x = image.img_to_array(im)
            x = np.expand_dims(x, axis=0)
            x = preprocess_input(x)
            #print('Input image shape:', x.shape)
            preds = model.predict(x)
            decoded = decode_predictions(preds)[0]
            predList = [x[0] for x in decoded]
            nameList = [x[1] for x in decoded]
            probList = [x[2] for x in decoded]
            detectedList = [
                [nameList[jj], probList[jj]] for jj in range(len(predList))
                if predList[jj] in vWordList and probList[jj] > 0.09
            ]
            #print(nameList)
            if len(detectedList) > 0:
                print('VEHICLE PRESENT')
                #print(detectedList)
        predict_now = False

    if doexit:
        print('Exiting...')
Example #20
0
# get all available images
img_names_on_disk = os.listdir(path_images)
img_paths_on_disk = [path_images + x for x in img_names_on_disk]

# loop through all images, make a classification and plot
for img_path in img_paths_on_disk:
    img_name = img_path.split("/")[-1]
    # img = image.load_img(img_path, target_size=(224, 224))
    img = image.load_img(img_path, target_size=(299, 299))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)

    preds = model.predict(x)
    preds_dec = decode_predictions(preds)
    print('Predicted:', preds_dec)

    # save predictions in data frame
    pred_labels = [x[1] for x in preds_dec[0]]
    pred_scores = [x[2] for x in preds_dec[0]]
    preds_df = pd.DataFrame({'Species' : pred_labels, 'Score': pred_scores})

    plot_pred_vs_image(img,preds_df,path + 'models/IncepV3_' + img_name.strip('.JPG'))






Example #21
0
  preds = model.predict(x)
  # Quantization of the model
  if nb_images == 0: # Do it only once.
    nb_wq = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,3,3,3,3] # each element of the vector quantizes 1 layer.
    nb_iq = 6
    qsf   = 0 # Quantize Starting From layer number qsf
    all_weights_bu = back_up()
    non_zero = cnz(all_weights_bu)
    print('Number of MBytes used by weights     : ', non_zero*32/float(8*1024**2)) 
    modelq = model
    all_weightsq = q_weights(nb_wq,all_weights_bu, qsf)
    xq = q_inputs(x, nb_iq)
    non_zeroq = cntz_bits(all_weightsq, nb_wq)
    print('Memory MBytes used by weights        : ', non_zeroq/float(8*1024**2))
  
  predsq = modelq.predict(x)
  print('NON Q predicted : ', decode_predictions(preds))
  print('    Q predicted : ', decode_predictions(predsq))


  result += evaluate_q(preds, predsq)
  # Number of images evaluated
  nb_images += 1
 
  print('Number of images evaluated : ', nb_images)
  print('Percentage of matches      : ', result/float(nb_images))

os.chdir("../.")


Example #22
0
# Load the VGG16 network
print("[INFO] loading network...")
model = VGG16(weights="imagenet")

for file in files:
    # Load the image using OpenCV
    orig = cv2.imread(file)

    # Load the image using Keras helper ultility
    print("[INFO] loading and preprocessing image...")
    image = image_utils.load_img(file, target_size=(224, 224))
    image = image_utils.img_to_array(image)

    # Convert (3, 224, 224) to (1, 3, 224, 224)
    # Here "1" is the number of images passed to network
    # We need it for passing batch containing serveral images in real project
    image = np.expand_dims(image, axis=0)
    image = preprocess_input(image)

    # Classify the image
    print("[INFO] classifying image...")
    preds = model.predict(image)
    (inID, label) = decode_predictions(preds)[0]

    # Display the predictions
    print("ImageNet ID: {}, Label: {}".format(inID, label))
    cv2.putText(orig, "Label: {}".format(label), (10, 30),
                cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
    cv2.imshow("Classification", orig)
    cv2.waitKey(0)
# load the ResNet50 network
print("[INFO] loading network...")
model = ResNet50(weights="imagenet")

# classify the image
print("[INFO] classifying image...")
# get the symbolic outputs of each "key" layer (we gave them unique names).
layer_outputs = [layer.output for layer in model.layers]
viz_model = Model(input=model.input, output=layer_outputs)
preds = model.predict(image)

features = viz_model.predict(image)
pool = features[174]

P = decode_predictions(preds, top=10)

(imagenetID, label1, prob) = P[0][0]

#writing embeddings to TSV file
print(pool.shape)
with open("globel_pool.tsv", "w") as fp:
    fp.write("#\tFeatures (Channels)\n")
    for (i, val) in enumerate(pool[0][0][0]):
        fp.write(str(i + 1))
        fp.write("\t")
        fp.write(str(val))
        fp.write("\n")
    fp.close()

# loop over the predictions and display the rank-5 predictions +
#yeni ağı yeniden eğitebilmek için tüm katmanların eğitim moduna getiriyoruz
for layer in custom_resnet_model.layers[:-1]:
	layer.trainable = False

custom_resnet_model.layers[-1].trainable

custom_resnet_model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])

#eğitim
t=time.time()
hist = custom_resnet_model.fit(X_train, y_train, batch_size=32, epochs=1, verbose=1, validation_data=(X_test, y_test))
print('eğitim süresi: %s' % (time.time()-t))
(loss, accuracy) = custom_resnet_model.evaluate(X_test, y_test, batch_size=10, verbose=1)

print("loss={:.4f}, accuracy: {:.4f}%".format(loss,accuracy * 100))

#### test

img_path = 'kedi.jpg'
img = image.load_img(img_path, target_size=(224, 224))

test_img = image.img_to_array(img)
test_img = np.expand_dims(test_img, axis=0)
test_img = preprocess_input(test_img)

preds = model.predict(test_img)
print('Tahmin:', decode_predictions(preds))


Example #25
0
# our image is now represented by a NumPy array of shape (3, 224, 224),
# but we need to expand the dimensions to be (1, 3, 224, 224) so we can
# pass it through the network -- we'll also preprocess the image by
# subtracting the mean RGB pixel intensity from the ImageNet dataset
image = np.expand_dims(image, axis=0)
image = preprocess_input(image)

# load the VGG16 network
print("[INFO] loading network...")
model = VGG16(weights="imagenet")

# classify the image
print("[INFO] classifying image...")
preds = model.predict(image)
result = decode_predictions(preds, top=1)

(inID, label, val) = decode_predictions(preds)[0][0]
print(result[0])
print(len(result))
#result1 = ([col.strip() for col in part] for part in result)
#print(result1)
#print(decode_predictions(preds)[0])
# display the predictions to our screen
print("ImageNet ID: {}, Label: {}".format(inID, label))

cv2.putText(orig, "Label: {}".format(label), (10, 30),
            cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)

plt.ioff()
plt.imshow(orig)
Example #26
0
	success,frame = vidcap.read()
	frame_count = 0

	model,preprocess_mode = load_model(args)

	prediction_dict = {}

	while success:

		img = load_image(frame,args.model)
		x = image.img_to_array(img)
		x = np.expand_dims(x, axis=0)
		x = preprocess_input(x,mode=preprocess_mode)
		preds = model.predict(x)
	
		prediction = decode_predictions(preds, 3)

		vidcap.set(1,frame_count)
		success,frame = vidcap.read()
		frame_count += offset

		#Print frame prediction
		print ("Frame : %d -> \t %s , \t %s , \t %s" % (frame_count,prediction[0][0][1],prediction[0][1][1],prediction[0][2][1]))

		#Make a dictionary with probs
		for i in range(0,3):			
			if str(prediction[0][i][1]) in  prediction_dict :
				prediction_dict[str(prediction[0][i][1])]+=prediction[0][i][2]
			else : 
				prediction_dict[str(prediction[0][i][1])]=prediction[0][i][2]
		
def predict(path, model_path, index_file_path, MainUI):

    try:
        result_string = " Detected Object : Probability \n \n"

        if (MainUI.squeezenet_model_loaded == False):
            wx.CallAfter(
                pub.sendMessage,
                "report101",
                message=
                "Loading SqueezeNet model for the first time. This may take few minutes or less than a minute. Please wait. \nLoading....."
            )
            model = SqueezeNet(model_path=model_path)
            wx.CallAfter(
                pub.sendMessage,
                "report101",
                message=
                "SqueezeNet model loaded.. Picture about to be processed.. \nLoading......"
            )
            MainUI.model_collection_squeezenet.append(model)
            MainUI.squeezenet_model_loaded = True
        else:
            wx.CallAfter(pub.sendMessage,
                         "report101",
                         message="Retrieving loaded model. \nLoading........")
            model = MainUI.model_collection_squeezenet[0]
            wx.CallAfter(
                pub.sendMessage,
                "report101",
                message=
                "ResNet model loaded.. Picture about to be processed.. \nLoading......"
            )

        img = image.load_img(path, target_size=(227, 227))
        img = image.img_to_array(img, data_format="channels_last")
        img = np.expand_dims(img, axis=0)

        img = preprocess_input(img, data_format="channels_last")
        wx.CallAfter(
            pub.sendMessage,
            "report101",
            message="Picture is transformed for prediction. \nLoading........")

        prediction = model.predict(img, steps=1)
        wx.CallAfter(
            pub.sendMessage,
            "report101",
            message=
            "Picture prediction is done. Sending in results. \nLoading......")

        predictiondata = decode_predictions(prediction,
                                            top=10,
                                            index_file_path=index_file_path)

        for results in predictiondata:
            countdown = 0
            for result in results:
                countdown += 1
                result_string += "(" + str(countdown) + ") " + str(
                    result[1]) + " : " + str(100 * result[2])[0:4] + "% \n"

        return result_string
    except Exception as e:
        return getattr(e, "message", repr(e))
Example #28
0
if __name__ == '__main__':
    model = InceptionV3(include_top=True, weights='imagenet')

    cam = cv2.VideoCapture(0)

    while (True):
        ret, frame = cam.read()

        cv2.imwrite("output.png", frame)
        img_path = "output.png"
        img = image.load_img(img_path, target_size=(299, 299))
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)

        x = preprocess_input(x)

        preds = model.predict(x)
        recognize = decode_predictions(preds)
        print('Label:', recognize[0][0][1])

        cv2.putText(frame, "Label: {}".format(recognize), (10, 30),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
        cv2.imshow("Show FLAME Image", frame)

        k = cv2.waitKey(1)
        if k == ord('q'):
            break

    cam.release()
    cv2.destroyAllWindows()
Example #29
0
def proc(taskFile, rootDir, atOnce=10000):
    good = list()
    bad = list()
    ugly = list()
    interesting = set()

    count = 0
    im = 0

    model = ResNet50(weights='imagenet')

    for x in [
            'car', 'pickup', 'suv', 'truck', 'crossover', 'van', 'minivan',
            'sports_car', 'cab', 'racer', 'convertible', 'jeep', 'ambulance'
    ]:

        interesting.add(x)

    data = readTasking(taskFile)

    startTime = time.time()
    for d in data:

        img_path = '{0}/{1}'.format(rootDir, d['filename'])

        flag = True

        try:
            img = image.load_img(img_path, target_size=(224, 224))

        except:
            # ugly this is not a decodable image
            ugly.append(d)
            flag = False

        if flag:

            x = image.img_to_array(img)
            x = np.expand_dims(x, axis=0)
            x = preprocess_input(x)
            preds = model.predict(x)
            predictions = decode_predictions(preds)[0][:4]

            found = False

            for prediction in predictions:
                i, t, score = prediction
                if t in interesting:
                    # image was interesting
                    good.append((d, t))
                    found = True
                    break
            if not found:
                # image was not interesting
                bad.append((d, predictions[0][1]))

        if count == atOnce:
            count = 0
            endTime = time.time() - startTime
            im = im + 1
            print('processed:', im * atOnce, 'Images', 'good', len(good),
                  'bad', len(bad), 'file', len(file), endTime)
            startTime = time.time()
        count = count + 1

    return (good, bad, ugly)
Example #30
0
accuracy = 0
accuracy5 = 0
recall = 0
precision = 0
for root, d, files in os.walk(v_data_dir):
    for name in files:
        print(i)
        imagePath = str(v_data_dir) + str(name)
        if not imagePath.endswith("JPEG"):
            continue
        img = image_utils.load_img(imagePath, target_size=(224, 224))
        temp = image_utils.img_to_array(img)
        temp = np.expand_dims(temp, axis=0)
        temp = preprocess_input(temp)
        preds = TopModel.predict(temp)
        P = decode_predictions(preds, par)
        (imagenetID, label, prob, index) = P[0][0]
        print("Predicted Label : " + str(label))
        print("ImageNet ID : " + str(imagenetID))
        print("Value Prob : " + str(prob))
        print("Predicted Label : " + str(index))
        if index in predMat:
            predMat[index] += 1
        else:
            predMat[index] = 1
        #print(preds)
        fName = str(name)

        allLabels = dict()
        al = 0
        for y in range(0, par):
Example #31
0
        else:
            if include_top:
                weights_path = get_file(
                    'vgg16_weights_tf_dim_ordering_tf_kernels.h5',
                    TF_WEIGHTS_PATH,
                    cache_subdir='models')
            else:
                weights_path = get_file(
                    'vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5',
                    TF_WEIGHTS_PATH_NO_TOP,
                    cache_subdir='models')
            model.load_weights(weights_path)
            if K.backend() == 'theano':
                convert_all_kernels_in_model(model)
    return model


if __name__ == '__main__':

    model = VGG16(include_top=True, weights='imagenet')

    img_path = 'elephant.jpg'
    img = image.load_img(img_path, target_size=(224, 224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    print('Input image shape:', x.shape)

    preds = model.predict(x)
    print('Predicted:', decode_predictions(preds))
Example #32
0
                convert_all_kernels_in_model(model)
        else:
            if include_top:
                weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels.h5',
                                        TF_WEIGHTS_PATH,
                                        cache_subdir='models',
                                        md5_hash='a7b3fe01876f51b976af0dea6bc144eb')
            else:
                weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
                                        TF_WEIGHTS_PATH_NO_TOP,
                                        cache_subdir='models',
                                        md5_hash='a268eb855778b3df3c7506639542a6af')
            model.load_weights(weights_path)
            if K.backend() == 'theano':
                convert_all_kernels_in_model(model)
    return model


if __name__ == '__main__':
    model = ResNet50(include_top=True, weights='imagenet')

    img_path = 'elephant.jpg'
    img = image.load_img(img_path, target_size=(224, 224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    print('Input image shape:', x.shape)

    preds = model.predict(x)
    print('Predicted:', decode_predictions(preds))