Пример #1
0
def test_decode_predictions():
    x = np.zeros((2, 1000))
    x[0, 372] = 1.0
    x[1, 549] = 1.0
    outs = utils.decode_predictions(x, top=1)
    scores = [out[0][2] for out in outs]
    assert scores[0] == scores[1]

    # the numbers of columns and ImageNet classes are not identical.
    with pytest.raises(ValueError):
        utils.decode_predictions(np.ones((2, 100)))
Пример #2
0
def predict():
	# initialize the data dictionary that will be returned from the
	# view
	data = {"success": False}

	# ensure an image was properly uploaded to our endpoint
	if flask.request.method == "POST":
		if flask.request.files.get("image"):
			# read the image in PIL format
			image = flask.request.files["image"].read()
			image = Image.open(io.BytesIO(image))

			# preprocess the image and prepare it for classification
			image = prepare_image(image, target=(224, 224))

			# classify the input image and then initialize the list
			# of predictions to return to the client
			with graph.as_default():
				preds = model.predict(image)
				results = imagenet_utils.decode_predictions(preds)
				data["predictions"] = []

				# loop over the results and add them to the list of
				# returned predictions
				for (imagenetID, label, prob) in results[0]:
					r = {"label": label, "probability": float(prob)}
					data["predictions"].append(r)

				# indicate that the request was a success
				data["success"] = True

	# return the data dictionary as a JSON response
	return flask.jsonify(data)
Пример #3
0
def classify_process():
	# load the pre-trained Keras model (here we are using a model
	# pre-trained on ImageNet and provided by Keras, but you can
	# substitute in your own networks just as easily)
	print("* Loading model...")
	model = ResNet50(weights="imagenet")
	print("* Model loaded")

	while True:
		# attempt to grab a batch of images from the database, then
		# initialize the image IDs and batch of images themselves
		queue = db.lrange(IMAGE_QUEUE, 0, BATCH_SIZE - 1)
		imageIDs = []
		batch = None

		# loop over the queue
		for q in queue:
			# deserialize the object and obtain the input image
			q = json.loads(q.decode("utf-8"))
			image = base64_decode_image(q["image"], IMAGE_DTYPE,
				(1, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANS))

			# check to see if the batch list is None
			if batch is None:
				batch = image

			# otherwise, stack the data
			else:
				batch = np.vstack([batch, image])

			# update the list of image IDs
			imageIDs.append(q["id"])
		if len(imageIDs)>0:
			print("* Batch size: {}".format(batch.shape))
			preds = model.predict(batch)
			results = imagenet_utils.decode_predictions(preds)

			# loop over the image IDs and their corresponding set of
			# results from our model
			for (imageID, resultSet) in zip(imageIDs, results):
				# initialize the list of output predictions
				output = []

				# loop over the results and add them to the list of
				# output predictions
				for (imagenetID, label, prob) in resultSet:
					r = {"label": label, "probability": float(prob)}
					output.append(r)

				# store the output predictions in the database, using
				# the image ID as the key so we can fetch the results
				db.set(imageID, json.dumps(output))
 
			# remove the set of images from our queue
			db.ltrim(IMAGE_QUEUE, len(imageIDs), -1)

		# sleep for a small amount
		time.sleep(SERVER_SLEEP)
def load_fine_tune_googlenet_v3(img):
    # 加载fine-tuning googlenet v3模型,并做预测
    model = InceptionV3(include_top=True, weights='imagenet')
    model.summary()
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    preds = model.predict(x)
    print('Predicted:', decode_predictions(preds))
    plt.subplot(212)
    plt.plot(preds.ravel())
    plt.show()
    return model, x
def imagenet_imagegraph(imagefile):
	im1=image.load_img(imagefile,target_size=(224,224))
	im1array=image.img_to_array(im1)
	im1array=np.expand_dims(im1array,axis=0)
	im1array=preprocess_input(im1array)
	model=ResNet50(weights="imagenet")
	preds=model.predict(im1array)
	decodepreds=decode_predictions(preds)
	print "Predictions:",decodepreds
	image_to_text=""
	for pred in decodepreds[0]:
		image_to_text += " "
		image_to_text += pred[1]
	imagegraph=RecursiveGlossOverlapGraph(image_to_text)
	print "ImageGraph:",imagegraph
	return imagegraph
Пример #6
0
def upload():
    if request.method == 'POST':
        # Get the file from post request
        f = request.files['file']

        # Save the file to ./uploads
        basepath = os.path.dirname(__file__)
        file_path = os.path.join(
            basepath, 'uploads', secure_filename(f.filename))
        f.save(file_path)

        # Make prediction
        preds = model_predict(file_path, model)

        # Process your result for human
        # pred_class = preds.argmax(axis=-1)            # Simple argmax
        pred_class = decode_predictions(preds, top=1)   # ImageNet Decode
        result = str(pred_class[0][0][1])               # Convert to string
        return result
    return None
Пример #7
0
    def decode_prob(self, class_probabilities):
        r = imagenet_utils.decode_predictions(class_probabilities,
                                              top=self.top_probs)
        results = [
            [{'code': entry[0],
              'name': entry[1],
              'prob': '{:.3f}'.format(entry[2])}
             for entry in row]
            for row in r
        ]
        classes = imagenet_utils.CLASS_INDEX
        class_keys = list(classes.keys())
        class_values = list(classes.values())

        for result in results:
            for entry in result:
                entry['index'] = int(
                    class_keys[class_values.index([entry['code'],
                                                   entry['name']])])
        return results
x_nasnet_mobile = preprocessing_image(img_path, (224, 224), nasnet)
x_densenet_121 = preprocessing_image(img_path, (224, 224), densenet)

# Get the predicted probabilities:
preds_inception_v3 = model_inception_v3.predict(x_inception_v3)
preds_vgg_16 = model_vgg_16.predict(x_vgg_16)
preds_vgg_19 = model_vgg_19.predict(x_vgg_19)
preds_resnet_50 = model_resnet_50.predict(x_resnet_50)
preds_mobilenet = model_mobilenet.predict(x_mobilenet)
preds_xception = model_xception.predict(x_xception)
preds_nasnet_mobile = model_nasnet_mobile.predict(x_nasnet_mobile)
preds_densenet_121 = model_nasnet_mobile.predict(x_densenet_121)

# Print the results (class, description, probability):
print('Predicted InceptionV3:',
      decode_predictions(preds_inception_v3, top=5)[0])
print('Predicted VGG16:', decode_predictions(preds_vgg_16, top=5)[0])
print('Predicted VGG19:', decode_predictions(preds_vgg_19, top=5)[0])
print('Predicted ResNet50:', decode_predictions(preds_resnet_50, top=5)[0])
print('Predicted MobileNet:', decode_predictions(preds_mobilenet, top=5)[0])
print('Predicted Xception:', decode_predictions(preds_xception, top=5)[0])
print('Predicted NASNetMobile:',
      decode_predictions(preds_nasnet_mobile, top=5)[0])
print('Predicted DenseNet121:',
      decode_predictions(preds_densenet_121, top=5)[0])

# Show results:
numpy_image = np.uint8(image.img_to_array(image.load_img(img_path))).copy()
numpy_image = cv2.resize(numpy_image, (500, 500))
numpy_image_res = numpy_image.copy()
Пример #9
0
 def call_model(img_array):
     img_array = np.expand_dims(img_array, axis=0)
     img_array = preprocess_input(img_array)
     preds = model.predict(img_array)
     preds = decode_predictions(preds, top=number_results)[0]
     return preds
def classify_process():
    # load the pre-trained Keras model (here we are using a model
    # pre-trained on ImageNet and provided by Keras, but you can
    # substitute in your own networks just as easily)
    print("* Loading model...")
    model = ResNet50(weights="imagenet")
    print("* Model loaded")

    # continually pool for new images to classify
    while True:
        # attempt to grab a batch of images from the database, then
        # initialize the image IDs and batch of images themselves
        queue = db.lrange(IMAGE_QUEUE, 0, BATCH_SIZE - 1)
        imageIDs = []
        batch = None

        # loop over the queue
        for q in queue:
            # deserialize the object and obtain the input image
            q = json.loads(q.decode("utf-8"))
            image = base64_decode_image(
                q["image"], IMAGE_DTYPE,
                (1, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANS))

            # check to see if the batch list is None
            if batch is None:
                batch = image

            # otherwise, stack the data
            else:
                batch = np.vstack([batch, image])

            # update the list of image IDs
            imageIDs.append(q["id"])

        # check to see if we need to process the batch
        if len(imageIDs) > 0:
            # classify the batch
            print("* Batch size: {}".format(batch.shape))
            preds = model.predict(batch)
            results = imagenet_utils.decode_predictions(preds)

            # loop over the image IDs and their corresponding set of
            # results from our model
            for (imageID, resultSet) in zip(imageIDs, results):
                # initialize the list of output predictions
                output = []

                # loop over the results and add them to the list of
                # output predictions
                for (imagenetID, label, prob) in resultSet:
                    r = {"label": label, "probability": float(prob)}
                    output.append(r)

                # store the output predictions in the database, using
                # the image ID as the key so we can fetch the results
                db.set(imageID, json.dumps(output))

            # remove the set of images from our queue
            db.ltrim(IMAGE_QUEUE, len(imageIDs), -1)

        # sleep for a small amount
        time.sleep(SERVER_SLEEP)
Пример #11
0
                shape = maxpool.output_shape[1:]
                dense = model.get_layer(name='fc1000')
                layer_utils.convert_dense_weights_data_format(
                    dense, shape, 'channels_first')

        if K.image_data_format() == 'channels_first' and K.backend(
        ) == 'tensorflow':
            warnings.warn('You are using the TensorFlow backend, yet you '
                          'are using the Theano '
                          'image data format convention '
                          '(`image_data_format="channels_first"`). '
                          'For best performance, set '
                          '`image_data_format="channels_last"` in '
                          'your Keras config '
                          'at ~/.keras/keras.json.')
    elif weights is not None:
        model.load_weights(weights)

    return model


model = ResNet50(weights='imagenet')
img_path = 'F:/elephant.jpg'
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)

preds = model.predict(x)
print('Predicted:', decode_predictions(preds, top=3)[0])
Пример #12
0
from keras.applications import InceptionV3
from keras.applications.imagenet_utils import preprocess_input, decode_predictions
from keras.preprocessing import image
import numpy as np
from keras.layers import Input

model = InceptionV3(weights='imagenet')

img = image.load_img('test.jpg', target_size=(299, 299))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)

preds = model.predict(x)
for i in range(5):
    print('Predicted:', decode_predictions(preds, top=2)[0][i])
Пример #13
0
 def _post_process(self, preds):
     return imagenet_utils.decode_predictions(preds)[0]
Пример #14
0
 def decode(predictions):
     pred_arr = np.expand_dims(np.array(predictions), axis=0)
     decoded = decode_predictions(pred_arr, top=topK)[0]
     # convert numpy dtypes to python native types
     return [(t[0], t[1], t[2].item()) for t in decoded]
Пример #15
0
from keras.layers import Conv2D
from keras.layers import SeparableConv2D
from keras.layers import MaxPooling2D
from keras.layers import GlobalAveragePooling2D
from keras.layers import GlobalMaxPooling2D
from keras.engine.topology import get_source_inputs
from keras.utils.data_utils import get_file
from keras import backend as K
from keras.applications.imagenet_utils import decode_predictions
from keras.applications.imagenet_utils import _obtain_input_shape
from keras.applications import Xception
from keras.applications.xception import preprocess_input

TF_WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.4/xception_weights_tf_dim_ordering_tf_kernels.h5'
TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.4/xception_weights_tf_dim_ordering_tf_kernels_notop.h5'


if __name__ == '__main__':
    model = Xception(include_top=True, weights='imagenet')

    img_path = 'data\\dogscats\\train\\cats\\cat.10013.jpg'
    img = image.load_img(img_path, target_size=(299, 299))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    print('Input image shape:', x.shape)

    preds = model.predict(x)
    print(np.argmax(preds))
    print('Predicted:', decode_predictions(preds, 1))  # ('n02123394', 'Persian_cat', 0.91428012)
Пример #16
0
for path in image_paths:
    
    # For InceptionV3
    seed_img = utils.load_img(path, target_size=(299, 299))
    pred = model.predict(preprocess_input(np.expand_dims(img_to_array(seed_img), axis=0)))
    
    # For VGG16
    # seed_img = utils.load_img(path, target_size=(224, 224))
    # pred_class = np.argmax(model.predict(np.array([img_to_array(seed_img)])))
    # print(utils.get_imagenet_label(pred_class))
    
    
    # seed_img = utils.load_img(path, target_size=(224, 224))
    # pred_class = np.argmax(model.predict(np.array([img_to_array(seed_img)])))
    
    print('Predicted:', decode_predictions(pred))
    print('Predicted:', decode_predictions(pred)[0][0][1])
    
    # pred_class = np.argmax(model.predict(preprocess_input(np.array([img_to_array(seed_img)]))))

    # Here we are asking it to show attention such that prob of `pred_class` is maximized.
    # heatmap = visualize_saliency(model, layer_idx, [pred_class], seed_img, text=utils.get_imagenet_label(pred_class))
    heatmap = visualize_cam(model, layer_idx, [pred_class], seed_img, text=utils.get_imagenet_label(pred_class))
    heatmaps.append(heatmap)
    
    # Generate three different images of the same output index.
    # vis_images = [visualize_activation(model, layer_idx, filter_indices=idx, text=str(idx), max_iter=500) for idx in [294, 294, 294]]
    # vis_images.append(vis_image)

name = "Gradient-based Localization map"
cv2.imshow(name, utils.stitch_images(heatmaps))
Пример #17
0
            for whypo in root.findall("./SHYPO/WHYPO"):
                word = whypo.get("WORD")
                #ret, frame = cam.read()
                #cv2.imshow("Show FLAME Image", frame)
                if u"何ですか" in word:

                    cv2.imwrite("output.png", frame)
                    img_path = "output.png"
                    img = image.load_img(img_path, target_size=(299, 299))
                    x = image.img_to_array(img)
                    x = np.expand_dims(x, axis=0)

                    x = preprocess_input(x)

                    preds = model.predict(x)
                    recognize = decode_predictions(preds)
                    speak = recognize[0][0][1]
                    if speak == "nematode":
                        jtalk("わかりません")
                        data = ""
                        continue
                    #subprocess.check_output(["espeak", "-k5", "-s150", speak])
                    print('Label:', speak)

                    with open('imagenet_class_index.json', 'r') as f:
                        obj = json.load(f)
                        for i in obj:
                            if i['en'] == speak:
                                jp_speak = i['ja']
                                jp_speak = jp_speak.encode('utf-8')
                                print(jp_speak)
Пример #18
0
#
# print("Final result :")
# for name, score in zip(model.metrics_names, scores):
#     print(name, score)

predictions = model.predict(x=[sentence_test, countries_test, devices_test], batch_size=BATCHSIZE, verbose=1)

k = 5
top_accuracy = top_k_categorical_accuracy(labels_test, predictions, k=k)
top_accuracy = K.get_session().run(top_accuracy)

print()
print("Top %d Accuracy : " % k, top_accuracy)
print()

ground_labels = decode_predictions(labels_test)
label_decoder = get_class_decoder()

results = []

for pred in predictions:
    top_k_preds_indices = pred.argsort()[-k:][::-1]
    result = [(label_decoder.classes_[i], pred[i]) for i in top_k_preds_indices]
    result.sort(key=lambda x: x[-1], reverse=True)
    results.append(result)

for gt, pt in zip(ground_labels, results):
    data = 'GT : %s | ' % gt
    print(data, pt)
    print()
Пример #19
0
 def decode(predictions):
     pred_arr = np.expand_dims(np.array(predictions), axis=0)
     decoded = decode_predictions(pred_arr, top=topK)[0]
     # convert numpy dtypes to python native types
     return [(t[0], t[1], t[2].item()) for t in decoded]
Пример #20
0
            if include_top:
                maxpool = model.get_layer(name='avg_pool')
                shape = maxpool.output_shape[1:]
                dense = model.get_layer(name='fc1000')
                layer_utils.convert_dense_weights_data_format(dense, shape, 'channels_first')

            if K.backend() == 'tensorflow':
                warnings.warn('You are using the TensorFlow backend, yet you '
                              'are using the Theano '
                              'image data format convention '
                              '(`image_data_format="channels_first"`). '
                              'For best performance, set '
                              '`image_data_format="channels_last"` in '
                              'your Keras config '
                              'at ~/.keras/keras.json.')
    return model


if __name__ == '__main__':
    model = ResNet50(include_top=True, weights='imagenet')

    img_path = 'elephant.jpg'
    img = image.load_img(img_path, target_size=(224, 224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    print('Input image shape:', x.shape)

    preds = model.predict(x)
    print('Predicted:', decode_predictions(preds))
Пример #21
0
import numpy as np
from keras_squeezenet import SqueezeNet
from keras.applications.imagenet_utils import preprocess_input, decode_predictions
from keras.preprocessing import image
#import matplotlib.pyplot as plt
#import matplotlib.image as mpimg


model = SqueezeNet()

img = image.load_img('pexels-photo-280207.jpeg', target_size=(227, 227))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)

preds = model.predict(x)
all_results = decode_predictions(preds)
for results in all_results:
    for result in results:
        print('Probability %0.2f%% => [%s]' % (100*result[2], result[1]))
        #result_text= 'Probability %0.2f%% => [%s]' % (100*result[2], result[1])
        #break
#plt.figure(num=1,figsize=(8, 6), dpi=80)
#plt.imshow(img)
#plt.text(130,90,result_text,horizontalalignment='center', verticalalignment='center',fontsize=16,color='black')
#plt.axis('off')
#plt.show()

                maxpool = model.get_layer(name='avg_pool')
                shape = maxpool.output_shape[1:]
                dense = model.get_layer(name='fc1000')
                layer_utils.convert_dense_weights_data_format(dense, shape, 'channels_first')

            if K.backend() == 'tensorflow':
                warnings.warn('You are using the TensorFlow backend, yet you '
                              'are using the Theano '
                              'image data format convention '
                              '(`image_data_format="channels_first"`). '
                              'For best performance, set '
                              '`image_data_format="channels_last"` in '
                              'your Keras config '
                              'at ~/.keras/keras.json.')
    return model


if __name__ == '__main__':
    model = ResNet50(include_top=True, weights='imagenet')

    img_path = 'Afghan_hound_00125.jpg'
    img = image.load_img(img_path, target_size=(224, 224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    print('Input image shape:', x.shape)

    preds = model.predict(x)
print('Predicted:', decode_predictions(preds))

# -*- coding: utf-8 -*-
'''Inception-ResNet-v2 model for Keras.
'''
from __future__ import print_function

import numpy as np
import warnings

from keras.layers import Input
from keras import layers
from keras.preprocessing import image
from keras.applications.imagenet_utils import decode_predictions
from keras.applications.inception_resnet_v2 import InceptionResNetV2, preprocess_input

if __name__ == '__main__':
    model = InceptionResNetV2(include_top=True, weights='imagenet')

    img_path = 'data\\dogscats\\train\\cats\\cat.10013.jpg'
    img = image.load_img(img_path, target_size=(299, 299))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    print('Input image shape:', x.shape)

    preds = model.predict(x)
    print('Predicted:', decode_predictions(preds))  # ('n02123394', 'Persian_cat', 0.94211012)
Пример #24
0
                shape = maxpool.output_shape[1:]
                dense = model.get_layer(name='fc1')
                layer_utils.convert_dense_weights_data_format(dense, shape, 'channels_first')

            if K.backend() == 'tensorflow':
                warnings.warn('You are using the TensorFlow backend, yet you '
                              'are using the Theano '
                              'image data format convention '
                              '(`image_data_format="channels_first"`). '
                              'For best performance, set '
                              '`image_data_format="channels_last"` in '
                              'your Keras config '
                              'at ~/.keras/keras.json.')
    return model


if __name__ == '__main__':
    import glob
    model = VGG16(include_top=True, weights='imagenet')

    img_paths = sorted(glob.glob("*.jpg"))
    for img_path in img_paths:
        print("## {}".format(img_path))
        img = image.load_img(img_path, target_size=(224, 224))
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        preds = model.predict(x)
        for id_, class_name, prob in decode_predictions(preds)[0]:
            print('\t{:0.2f}%: {}'.format((prob * 100), class_name))
Пример #25
0
# print(device_lib.list_local_devices())

# 이미지 불러오기
x = cv2.imread('./imgs/tests/4.jpg')
x_image = cv2.resize(x, (224, 224))
x_img_rgb = cv2.cvtColor(x_image, cv2.COLOR_BGR2RGB)
x = preprocess_input(x_image)
x = np.expand_dims(x, 0)

# resnet pre-trained weight 불러오기
# load model - This will take < 10 min since we have to download weights (about 240 Mb)
# model = ResNet152(input_shape=(224,224,3), weights='imagenet', classes=1000)
model = ResNet50(input_shape=(224, 224, 3), weights='imagenet', classes=1000)
# model = ResNet34(input_shape=(224,224,3), weights='imagenet', classes=1000)

# processing image
y = model.predict(x)

# 결과
predictions_array = decode_predictions(y)[0]

# 시각화
plt.imshow(x_img_rgb)

for pred in predictions_array:
    _, class_name, pred_num = pred
    text = class_name + ': ' + str(pred_num)
    print(text)

plt.imshow(x_img_rgb)
def img_text_model(url):
    print(">> analysis started !!!")
    t1 = time.time()
    response = requests.get(url)
    main_soup = BeautifulSoup(response.text, "html.parser")

    data = main_soup.findAll('p')
    print(">> Text Analysis")
    s = ''
    for i in data:
        s += " " + i.getText()

    result = re.sub(r'\d+', '', s)
    result_1 = re.sub(r'[^\w\s]', '', result)
    result_1 = result_1.strip()

    stop_words = set(stopwords.words('english'))
    tokens = word_tokenize(result_1.lower())
    result = [i for i in tokens if not i in stop_words]

    stemmer = PorterStemmer()
    #input_str=”There are several types of stemming algorithms.”
    #input_str=word_tokenize(result)
    res_1 = []
    for word in result:
        res_1.append(stemmer.stem(word))

    res_2 = []
    lemmatizer = WordNetLemmatizer()
    for word in res_1:
        res_2.append(lemmatizer.lemmatize(word))

    allWordExceptStopDist = nltk.FreqDist(w for w in res_2)
    mostCommon = allWordExceptStopDist.most_common(150)
    print("time elapsed for *Text Analysis*: ", time.time() - t1)
    t1 = time.time()
    r = requests.get(url)
    html = r.text
    soup = BeautifulSoup(html, 'lxml')

    img_links = []
    for word in soup.find_all('body'):
        for k in word.findAll('img'):
            #print(k)
            #print(k['src'])
            #assert(False)
            try:
                k1 = k['src']
                img_links.append(k1)
            except:
                pass
    unique_img_list = list(dict.fromkeys(img_links))
    #plt.show()

    spam_words = [
        'free', 'market', 'credit', 'offer', 'rate', 'remov', 'money', 'email',
        'cash', 'order', 'earn', 'home', 'hidden', 'invest', 'time', 'debt',
        'get', 'stock', 'claim', 'spam', 'new', 'onlin', 'dollar', 'form',
        'mail', 'guarante', 'sale', 'million', 'one', 'stop', 'friend', 'busi',
        'bonu', 'access', 'price', 'call', 'check', 'click', 'deal', 'today',
        'per', 'incom', 'instant', 'give', 'away', 'increas', 'insur', 'lose',
        'weight', 'lower', 'mortgag', 'win', 'winner', 'revers', 'age',
        'asset', 'snore', 'dig', 'dirt', 'disclaim', 'statement', 'compar',
        'cabl', 'convert', 'list', 'instal', 'auto', 'collect', 'lead', 'amaz',
        'ad', 'promis', 'search', 'engin', 'preview', 'bureau', 'accept',
        'appli', 'best', 'billion', 'brand', 'card', 'consolid', 'copi', 'dvd',
        'cost', 'direct', 'dont', 'extra', 'week', 'term', 'elimin', 'e',
        'financi', 'freedom', 'phone', 'prioriti', 'quot', 'sampl', 'trial',
        'websit', 'refund', 'inform', 'traffic', 'request', 'internet', 'join',
        'lifetim', 'limit', 'lowest', 'make', 'solut', 'hundr', 'percent',
        'day', 'prize', 'refin', 'satisfact', 'isnt', 'unsecur', 'vacat',
        'work', 'multi', 'level', 'wrinkl', 'compet', 'grant', 'child',
        'support', 'stuff', 'tell', 'accord', 'law', 'seriou', 'satisfi',
        'accordingli', 'act', 'afford', 'avoid', 'bargain', 'beneficiari',
        'beverag', 'big', 'buck', 'bill', 'address', 'pager', 'buy', 'cancel',
        'combin'
    ]
    h_words = [
        'deadli', 'bale', 'fatal', 'lethal', 'murder', 'pestil', 'imperil',
        'destruct', 'damag', 'danger', 'fight', 'harm', 'deathli', 'fell',
        'mortal', 'termin', 'vital', 'hostil', 'inim', 'unfriendli', 'contagi',
        'infecti', 'infect', 'pestifer', 'pestilenti', 'poison', 'venom',
        'insidi', 'menac', 'omin', 'sinist', 'threaten', 'hazard', 'jeopard',
        'parlou', 'peril', 'riski', 'unsaf', 'unsound', 'nasti', 'noisom',
        'unhealth', 'unhealthi', 'unwholesom', 'killer', 'malign', 'ruinou',
        'advers', 'bad', 'bane', 'deleteri', 'detriment', 'evil', 'hurt',
        'ill', 'injuri', 'mischiev', 'nocuou', 'noxiou', 'pernici',
        'prejudici', 'wick', 'suicid', 'kill', 'knife', 'bomb', 'reveng',
        'gun', 'weapon', 'fire', 'ak', 'effect', 'mg', 'mm', 'target', 'rifl',
        'hk', 'lightweight', 'hit', 'xm', 'acsw', 'submachin', 'hunt',
        'deadliest', 'cau', 'terribl', 'move', 'assault', 'barrel', 'sniper',
        'grenad', 'launcher', 'defen'
    ]

    spam_count = 0
    for i in mostCommon:
        if (i[0] in spam_words):
            spam_count += 1

    if (spam_count > 20):
        print("WebPage Blocked (Spam Content Detected)")
    elif (spam_count > 15):
        print("**Webpage Warning (Spam Content Detected)**")
    elif (spam_count > 10 and spam_count < 15):
        print("**Webpage Warning (Spam Content Detected)**")
        print("\n>> Image Analysis")
        weaps = ['assault_rifle', 'rifle', 'military_uniform', 'pickelhaube']
        il_weap = 0
        #plt.figure(1)
        for img in unique_img_list:
            urllib.request.urlretrieve(str(img), "temp_img.png")
            #load_temp_img = cv2.imread("temp_img.png")
            original = load_img("temp_img.png", target_size=(224, 224))
            numpy_image = img_to_array(original)

            image_batch = np.expand_dims(numpy_image, axis=0)
            #print('image batch size', image_batch.shape)
            #plt.imshow(np.uint8(image_batch[0]))

            processed_image = vgg16.preprocess_input(image_batch.copy())
            predictions = vgg_model.predict(processed_image)

            label = decode_predictions(predictions)
            if (label[0][0][1] in weaps):
                il_weap += 1
                #for i in range(1,il_weap+1):
                #    plt.subplot("21{}".format(i))
                #    plt.imshow(np.uint8(image_batch[0]))
        print("Illegal weapons count {} out of {} ".format(
            il_weap, len(unique_img_list)))
        spam_count = 0
        for i in mostCommon:
            if (i[0] in h_words):
                spam_count += 1
        if (spam_count > 20):
            print("** WebPage Blocked (Illegal Content) **")
        elif (spam_count > 10):
            print("** Webpage Warning ((Illegal Content)) **")
        else:
            if (il_weap > 3):
                print("** webpage has more illegal images **")
            else:
                print("** You are good to go!!! **")
        print("time elapsed for *Image Analysis*: ", time.time() - t1)
    else:
        #Load the VGG model
        print("** No spam in this website.. :)**")
        print("\n>> Image Analysis")
        weaps = ['assault_rifle', 'rifle', 'military_uniform', 'pickelhaube']
        il_weap = 0
        #plt.figure(1)
        for img in unique_img_list:
            urllib.request.urlretrieve(str(img), "temp_img.png")
            #load_temp_img = cv2.imread("temp_img.png")
            original = load_img("temp_img.png", target_size=(224, 224))
            numpy_image = img_to_array(original)

            image_batch = np.expand_dims(numpy_image, axis=0)
            #print('image batch size', image_batch.shape)
            #plt.imshow(np.uint8(image_batch[0]))

            processed_image = vgg16.preprocess_input(image_batch.copy())
            predictions = vgg_model.predict(processed_image)

            label = decode_predictions(predictions)
            if (label[0][0][1] in weaps):
                il_weap += 1
                #for i in range(1,il_weap+1):
                #    plt.subplot("21{}".format(i))
                #    plt.imshow(np.uint8(image_batch[0]))
        print("Illegal weapons count {} out of {} ".format(
            il_weap, len(unique_img_list)))
        spam_count = 0
        for i in mostCommon:
            if (i[0] in h_words):
                spam_count += 1
        if (spam_count > 20):
            print("** WebPage Blocked (Illegal Content) **")
        elif (spam_count > 10):
            print("** Webpage Warning ((Illegal Content)) **")
        else:
            if (il_weap > 3):
                print("** webpage has more illegal images **")
            else:
                print("** You are good to go!!! **")
        print("time elapsed for *Image Analysis*: ", time.time() - t1)
Пример #27
0
               name='conv_pw_%d' % block_id)(x)
    x = BatchNormalization(name='conv_pw_%d_bn' % block_id)(x)
    return Activation(relu6, name='conv_pw_%d_relu' % block_id)(x)


def relu6(x):
    return K.relu(x, max_value=6)


def preprocess_input(x):
    x /= 255.
    x -= 0.5
    x *= 2.
    return x


if __name__ == '__main__':
    model = MobileNet(input_shape=(224, 224, 3))
    model.summary()

    img_path = 'elephant.jpg'
    img = image.load_img(img_path, target_size=(224, 224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    print('Input image shape:', x.shape)

    preds = model.predict(x)
    print(np.argmax(preds))
    print('Predicted:', decode_predictions(preds, 1))  # 只显示top1
Пример #28
0
plt.imshow(np.uint8(image_batch[0]))



# prepare the image for the VGG model (normalisation for channels)
processed_image = inception_resnet_v2.preprocess_input(image_batch.copy())



# get the predicted probabilities for each class
predictions = inceptionResnet_v2_model.predict(processed_image)
# print predictions

# convert the probabilities to class labels
# We will get top 5 predictions which is the default
predicted_tags = decode_predictions(predictions,  top=10)
print('Predicted:', predicted_tags )






#
# # get the predicted probabilities for each class
# predictions = nasnet_model.predict(processed_image)
# # print predictions
#
# # convert the probabilities to class labels
# # We will get top 5 predictions which is the default
# print('Predicted:', decode_predictions(predictions, top=10    ))
Пример #29
0
plt.imshow(img)
plt.show()
print(cv_img[:, :, ::-1] - img)
# # print(img)
exit()

img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
org_x = np.copy(x)

x = preprocess_input(x)

features = model.predict(x)
print(features.shape)
print('Predicted:', decode_predictions(features, top=5)[0])

import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.contrib.slim.nets import vgg

input_image = tf.placeholder(tf.float32, shape=(None, 224, 224, 3))
# 创建模型,使用默认的arg scope参数
# arg_scope是slim library的一个常用参数
# 可以设置它指定网络层的参数,比如stride, padding 等等。
with slim.arg_scope(vgg.vgg_arg_scope()):
    logits, _ = vgg.vgg_16(
        input_image,
        # logits, _ = resnet_v1.resnet_v1_50(input_image,
        num_classes=1000,
        is_training=False)
Пример #30
0
# process an image to be mobilenet friendly
def process_image(img_path):
  img = image.load_img(img_path, target_size=(224, 224))
  img_array = image.img_to_array(img)
  img_array = np.expand_dims(img_array, axis=0)
  pImg = mobilenet.preprocess_input(img_array)
  return pImg

# main function
if __name__ == '__main__':

  # path to image
  #img_path = "trixi.png"
  #img_path = "trixi_frog.png"
  #img_path = "trixi_sealion.png"
  img_path = sys.argv[1]

  # process the image
  pImg = process_image(img_path)

  # define the mobilenet model
  mobilenet = mobilenet.MobileNet()

  # make predictions on image using mobilenet
  prediction = mobilenet.predict(pImg)

  # obtain the top-5 predictions
  results = imagenet_utils.decode_predictions(prediction)
  print(results)

Пример #31
0
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Activation, Dropout, Flatten, Dense
from keras import applications
from keras.models import Model
from keras.layers import Input
from keras.preprocessing import image
from keras.applications.imagenet_utils import preprocess_input, decode_predictions
#from http://keras-cn.readthedocs.io/en/latest/other/application/

#http://keras-cn.readthedocs.io/en/latest/utils/get_file()
#应用的时候,需要下载.h5文件,有时候会网络错误,可以提前按照官网提示下载到D:\DeepLearning\Anaconda\Lib\site-packages\keras\datasets
model = applications.mobilenet.MobileNet(input_shape=None,
                                         alpha=1.0,
                                         depth_multiplier=1,
                                         dropout=1e-3,
                                         include_top=True,
                                         weights='imagenet',
                                         input_tensor=None,
                                         pooling=None,
                                         classes=1000)
img = image.load_img('机柜.jpg', target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
preds = model.predict(x)
# decode_predictions 输出5个最高概率:(类名, 语义概念, 预测概率) decode_predictions(y_pred)
for results in decode_predictions(preds):
    for result in results:
        print('Probability %0.2f%% => [%s]' % (100 * result[2], result[1]))
Пример #32
0
datagen = ImageDataGenerator(rescale=1./255)

# test = []

# for i in range(1,11):
# 	location = 'test/' + str(i) + '.jpg'
# 	test.append(io.imread(location))
# 	x = x.reshape((1, 3 , img_width, img_height))
image = image_utils.load_img('testtest/2.jpg', target_size=(img_width, img_height))
image = image_utils.img_to_array(image)
image = np.expand_dims(image, axis=0)
image = preprocess_input(image)
pred = model.predict_classes(image)
print pred
exit()
(inID, label) = decode_predictions(pred)[0]
exit()
print inId, label
exit()
test = datagen.flow_from_directory(
	'testtest',
	batch_size=1,
	target_size=(img_width, img_height),
	classes=None,
	shuffle=False)
test_data = model.predict_generator(test, 1)
# test_data_load = test.reshape((1, 150, 150, 3))
np.save(open('test_data.npy', 'w'), test_data)
# test_data_load = np.load(open('test_data.npy', 'rb'))
# test = np.array()
print model.predict_classes(test_data)
Пример #33
0
                dense = model.get_layer(name='fc1')
                layer_utils.convert_dense_weights_data_format(
                    dense, shape, 'channels_first')

            if K.backend() == 'tensorflow':
                warnings.warn('You are using the TensorFlow backend, yet you '
                              'are using the Theano '
                              'image data format convention '
                              '(`image_data_format="channels_first"`). '
                              'For best performance, set '
                              '`image_data_format="channels_last"` in '
                              'your Keras config '
                              'at ~/.keras/keras.json.')
    return model


if __name__ == '__main__':
    import glob
    model = VGG16(include_top=True, weights='imagenet')

    img_paths = sorted(glob.glob("*.jpg"))
    for img_path in img_paths:
        print("## {}".format(img_path))
        img = image.load_img(img_path, target_size=(224, 224))
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        preds = model.predict(x)
        for id_, class_name, prob in decode_predictions(preds)[0]:
            print('\t{:0.2f}%: {}'.format((prob * 100), class_name))
Пример #34
0
# -*- coding: utf-8 -*-
'''ResNet50 model for Keras.
# Reference:
- [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385)
Adapted from code contributed by BigMoyan.
'''
from __future__ import print_function

import numpy as np
import warnings

from keras.layers import Input
from keras import layers
from keras.preprocessing import image
from keras.applications.imagenet_utils import decode_predictions
from keras.applications.imagenet_utils import preprocess_input
from keras.applications.resnet50 import ResNet50

if __name__ == '__main__':
    model = ResNet50(include_top=True, weights='imagenet')

    img_path = 'data\\dogscats\\train\\cats\\cat.10013.jpg'
    img = image.load_img(img_path, target_size=(224, 224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    print('Input image shape:', x.shape)

    preds = model.predict(x)
    print('Predicted:', decode_predictions(preds))  # ('n02123394', 'Persian_cat', 0.87062669)
Пример #35
0
def preprocess(x):
    x = resize(x, (224,224), mode='constant') * 255
    x = preprocess_input(x)
    if x.ndim == 3:
        x = np.expand_dims(x, 0)
    return x

if __name__ == '__main__':

    print('loading ResNet-152 model...', end='')
    model = ResNet152()
    print('done!')

    print('Predicting image with "cat" ...')
    cat = imread('./imgs/cat.jpg')
    x = preprocess(cat)
    y = model.predict(x)
    pred_title = decode_predictions(y, top=1)[0][0][1]
    print('Model prediction: {}\n'.format(pred_title))
    assert pred_title == 'tiger_cat'

    print('Predicting image with "dog" ...')
    dog = imread('./imgs/dog.jpg')
    x = preprocess(dog)
    y = model.predict(x)
    pred_title = decode_predictions(y, top=1)[0][0][1]
    print('Model prediction: {}\n'.format(pred_title))
    assert pred_title == 'Eskimo_dog'
    print ('Success!')
Пример #36
0
filename = 'C:\P_workspace\workspace\개발과제\images\card\card0.jpg'
# 이미지 로딩. PIL format
original = load_img(filename, target_size=(224, 224))
print('PIL image size', original.size)
plt.imshow(original)
plt.show()

# PIL 이미지를 numpy 배열로 변환
# Numpy 배열 (height, width, channel)
numpy_image = img_to_array(original)
plt.imshow(np.uint8(numpy_image))
plt.show()
print('numpy array size', numpy_image.shape)

# 이미지를 배치 포맷으로 변환
# 데이터 학습을 위해 특정 축에 차원 추가
# 네트워크 형태는 batchsize, height, width, channels 이 됨
image_batch = np.expand_dims(numpy_image, axis=0)
print('image batch size', image_batch.shape)
plt.imshow(np.uint8(image_batch[0]))

# 모델 준비
processed_image = vgg16.preprocess_input(image_batch.copy())

# 각 클래스 속할 확률 예측
predictions = vgg_model.predict(processed_image)

# 예측된 확률을 클래스 라벨로 변환. 상위 5개 예측된 클래스 표시
label = decode_predictions(predictions)
print(label)
Пример #37
0
import lime_nh

model, preprocess = vgg16.VGG16(weights='imagenet',
                                include_top=True), vgg16.preprocess_input

# Load the data from ImageNet and store it in X. Load the images you want to explain into "to_explain"
X, y = shap.datasets.imagenet50()
to_explain = X[[39, 41]]
imagenumber = 0

# Get an overview of how the image you want to explain looks like
plt.imshow(to_explain[imagenumber].astype("int64"))

# Show the top 5 predicted classes for the image
preds = model.predict(to_explain)
for x in decode_predictions(preds)[imagenumber]:
    print(x)

# Print the indexes of the top 5 predicted classes
print(model.predict(to_explain).argsort()[0, -5:][::-1])

# Create Lime Explainer by calling lime_nh.CreateLimeImageExpl
LimeExpl = lime_nh.CreateLimeImageExpl()

# Define arguments for your explanation
# int num_samples = Number of samples = amount of how many samples you want the explainer to be trained (suggested: 100)
# int top_labels = Number of top_labels you want to explain (suggested: 5)
num_samples = 100
top_labels = 5

# Create Lime explanation by calling the function lime_nh.CreateExplanation
Пример #38
0
#name: Image Classification
#description: Image classification based of Efficientnet model
#language: python
#input: file file
#output: map classes [Detected classes with probabilities]
#tags: demo, panel, files, efficientnet
#condition: file.isFile && file.size < 1e6 && (file.name.endsWith("jpg") || file.name.endsWith("jpeg") || file.name.endsWith("png"))
#help-url: https://github.com/qubvel/efficientnet

import numpy as np
from skimage.io import imread
from efficientnet.keras import EfficientNetB0
from keras.applications.imagenet_utils import decode_predictions
from efficientnet.keras import center_crop_and_resize, preprocess_input

image = imread(file)
model = EfficientNetB0(weights='imagenet')
image_size = model.input_shape[1]
_image = center_crop_and_resize(image, image_size=image_size)
_image = preprocess_input(_image)
_image = np.expand_dims(_image, 0)
predicted = model.predict(_image)
predicted = decode_predictions(predicted)[0]

classes = {}
for p in predicted:
    classes[p[1]] = float(p[2])
Пример #39
0
#Load the image paths, use only first image out of 5 (lpath1)
paths = joblib.load('/Users/Dhanush/Desktop/Cnn2Word2Vec/lpath1.pkl')
print("Total number of images is ", len(paths))
correct = 0
#Lets first find all correct predictions
skip = ['cleaver']
correct_predictions = []
for i in range(len(paths)):
    if vocab[i] in skip:
        continue
    img = image.load_img(paths[i], target_size=(299, 299))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    preds = model.predict(x)
    out = decode_predictions(preds)[0]
    topprediction = out[0][1].lower().strip().replace("-", " ")
    topprediction = topprediction.replace("_", " ")
    #print (topprediction)
    #print (vocab[i],topprediction)
    #print (mapping[topprediction][0],mapping[vocab[i]][0])
    if mapping[topprediction][0] == mapping[vocab[i]][0]:  #It got this correct
        correct_predictions.append(mapping[topprediction][0])
        correct += 1
        continue
print(len(correct_predictions))

predictions = {}
incorrect = 0

print(len(list(set(correct_predictions))))
Пример #40
0
def getPreds_top5(model, imgData):
    preds = model.predict(imgData)
    preds_top5 = decode_predictions(preds, top=5)
    print('Predicted:', preds_top5)
    return preds_top5
Пример #41
0
def transform_img_fn(path_list):
    out = []
    for img_path in path_list:
        img = image.load_img(img_path, target_size=(299, 299))
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = inc_net.preprocess_input(x)
        out.append(x)
    return np.vstack(out)

images = transform_img_fn([os.path.join('/content/drive/My Drive/cat_mouse.jpg')])
# I'm dividing by 2 and adding 0.5 because of how this Inception represents images
plt.imshow(images[0] / 2 + 0.5)
preds = inet_model.predict(images)
for x in decode_predictions(preds)[0]:
    print(x)

# Commented out IPython magic to ensure Python compatibility.
# %load_ext autoreload
# %autoreload 2
import os,sys
try:
    import lime
except:
    sys.path.append(os.path.join('..', '..')) # add the current directory
    import lime
from lime import lime_image

explainer = lime_image.LimeImageExplainer()
# define image paths to classify
image_files = glob.glob('images/*/*')

# preprocess images
image_list = [image_preprocessor(path) for path in image_files]
#------------------------------------------------

#####################
# load model and make predictions
#####################
# load vgg19 model pretrained with imagenet
model = keras.applications.VGG19(weights='imagenet')

# get model predictions
preds = [model.predict(image) for image in image_list]
preds = [list(decode_predictions(pred, top=1)[0][0]) for pred in preds]

# convert list of predictions to df and drop class name column
pred_df = pd.DataFrame(preds)
pred_df = pred_df.drop(0, 1)

#make names match names in R output for consistency
pred_df.columns = ['class_description', 'score']
#------------------------------------------------

#####################
# add dog/cat labels, add file name, sort by score
#####################
#read in breed labels
dog_labs = read_dog_cat_labels('data/dog_classes.txt')
cat_labs = read_dog_cat_labels('data/cat_classes.txt')
# load the input image using the Keras helper utility while ensuring
# that the image is resized to 224x224 pxiels, the required input
# dimensions for the network -- then convert the PIL image to a
# NumPy array
print("[INFO] loading and preprocessing image...")
image = image_utils.load_img(args["image"], target_size=(224, 224))
image = image_utils.img_to_array(image)

# our image is now represented by a NumPy array of shape (3, 224, 224),
# but we need to expand the dimensions to be (1, 3, 224, 224) so we can
# pass it through the network -- we'll also preprocess the image by
# subtracting the mean RGB pixel intensity from the ImageNet dataset
image = np.expand_dims(image, axis=0)
image = preprocess_input(image)

# load the VGG16 network
print("[INFO] loading network...")
model = MobileNet(weights="imagenet")

# classify the image
print("[INFO] classifying image...")
preds = model.predict(image)
label = decode_predictions(preds, top=3)[0]
print('Predicted:', label)

# display the predictions to our screen
cv2.putText(orig, "Label: {}".format(label), (10, 30),
            cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
cv2.imshow("Classification", orig)
cv2.waitKey(0)
Пример #44
0
def predict(img_list, top=5):
    inputs = preprocess_images(img_list)
    classes = model.predict(inputs, verbose=0)

    return imagenet_utils.decode_predictions(classes, top)
preprocess = imagenet_utils.preprocess_input

if args['model'] in ('xception', 'inception'):
    inputShape = (299, 299)
    preprocess = preprocess_input

print('[INFO] loading {}...'.format(args['model']))
Network = MODELS[args['model']]
model = Network(weights='imagenet')

print('[INFO] loading image and preprocessing')
image = load_img(args['image'], target_size=inputShape)
image = img_to_array(image)

image = np.expand_dims(image, axis=0)
image = preprocess(image)

print('[INFO] Classifying Image')

preds = model.predict(image)
P = imagenet_utils.decode_predictions(preds)

for (i, (imageID, label, prob)) in enumerate(P[0]):
    print('[INFO] {}. {} --- {:.2f}%'.format(i + 1, label, prob * 100))

image = cv2.imread(args['image'])
(id, label, prob) = P[0][0]
cv2.putText(image, "Label: {}".format(label), (10, 30),
            cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2)
cv2.imshow('Preds', image)
cv2.waitKey(0)
Пример #46
0
def decode_predictions(preds, top=5):
    return imagenet_utils.decode_predictions(preds, top=top)
Пример #47
0
from keras.applications import VGG19
from keras.applications import imagenet_utils
from keras.preprocessing.image import img_to_array, load_img
import numpy as np
import cv2

img_path = 'images/test8.jpg'
img = load_img(img_path)

img = img.resize((224, 224))
img_array = img_to_array(img)
img_array = np.expand_dims(img_array, axis=0)
img_array = imagenet_utils.preprocess_input(img_array)
pre_trained_model = VGG19(weights='imagenet')
prediction = pre_trained_model.predict(img_array)
actual_prediction = imagenet_utils.decode_predictions(prediction)
print('Predicted object is: {} with accuracy of {}'.format(
    actual_prediction[0][0][1], actual_prediction[0][0][2] * 100))

disp_img = cv2.imread(img_path)
cv2.putText(disp_img, actual_prediction[0][0][1], (20, 20),
            cv2.FONT_HERSHEY_TRIPLEX, 0.8, (0, 0, 0))
cv2.imshow('Prediction', disp_img)
cv2.waitKey(0)
Пример #48
0
from keras.applications.inception_v3 import preprocess_input
from keras.preprocessing.image import img_to_array
from keras.preprocessing.image import load_img
import numpy as np
import argparse
import cv2
##################################################################
## initialize the input image shape (224x224 pixels) along with the pre-processing function
# inputShape = (224, 224)
# preprocess = imagenet_utils.preprocess_input
## inception, xception 用下面的
inputShape = (299, 299)
preprocess = preprocess_input
##################################################################
## loading model
model = Xception(weights="imagenet")
##################################################################
## loading image; 尺寸要和上面的一样
image = load_img("/Users/coder352/github/jImage/Dream_Afar/Acanalonia conica planthopper.jpg", target_size=inputShape)
image = img_to_array(image)  # a NumPy array of shape (inputShape[0], inputShape[1], 3)
image = np.expand_dims(image, axis=0)  # we need to expand the dimension by making the shape (1, inputShape[0], inputShape[1], 3)
image = preprocess(image)  # pre-process the image using the appropriate function based on the model that has been loaded (i.e., mean subtraction, scaling, etc.)

preds = model.predict(image)
P = imagenet_utils.decode_predictions(preds)

# loop over the predictions and display the rank-5 predictions +
# probabilities to our terminal
for (i, (imagenetID, label, prob)) in enumerate(P[0]):
	print("{}. {}: {:.2f}%".format(i + 1, label, prob * 100))
from keras.applications.vgg16 import VGG16
from keras.applications.vgg16 import preprocess_input
from keras.preprocessing import image
from keras.applications.imagenet_utils import decode_predictions
import scipy.misc, numpy as np


if __name__ == "__main__":
    print "constructing network..."
    model = VGG16(weights='imagenet', include_top=True)
    print "done"

    # Forced initialization of keras.applications.imagenet_utils.CLASS_INDEX
    # imagenet_utils kind of hides the CLASS_INDEX from us, that's why this hackery is necessary.
    _ = decode_predictions(np.zeros((1, 1000)))
    from keras.applications.imagenet_utils import CLASS_INDEX

    for filename in sys.argv[1:]:
        img = image.load_img(filename, target_size=(224, 224))
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        # preds = model.predict(x)
        # print 'Predicted:', decode_predictions(preds)[0][1]

        out = model.predict(x).flatten()
        prediction = np.argmax(out)
        top5 = sorted(zip(out, range(len(out))), reverse=True)[:5] # (probability, class_id) pairs.
        top5_probs_and_names = ["%s = %f ," % (CLASS_INDEX[str(prediction)][1], probability) for (probability, prediction) in top5]
        top5_names = [CLASS_INDEX[str(prediction)][1] for (probability, prediction) in top5]