Esempio n. 1
0
def reconocerImagen(imageFromUser):
    #changing dimension of image to 299x299 pixels
    imageRedim = image.load_img(imageFromUser, target_size=(299, 299))

    #Creation of array, where each element is a pixel
    #each pixel is represented as an arrau of 3 numbers that range from 0 to 255
    #each element of the three numbers that represent each pixel indicate the RGB color
    x = image.img_to_array(imageRedim)

    #Converting each value of 0 to 255 using the rule of 3 so that they are converted to a range of -1 to 1
    #where -1 = 0 and 1 = 255
    x /= 255
    x -= 0.5
    x *= 2

    #A value of 1 is added to each pixed, requested by the neural network
    x = x.reshape(1, x.shape[0], x.shape[1], x.shape[2])

    #the numerical matrix of pixels is sent to the neural network so that it recognizes the image
    y = iv3.predict(x)

    #we return the prediction along with the % of probability
    result = decode_predictions(y)[0][0]
    element = result[1]
    probability = result[2]
    return {"element": element, "probability": probability}


#Example code to run the function:
#prediction = recognizeImage("img1.jpg")
#print("Image detection: " + prediction["element"] + " with a probability of " + str(int(prediction["probability"]*100)) + "%" )
Esempio n. 2
0
def image_classifier():
    model_ip = os.environ['inception_ip']

    address = 'http://%s:8501/v1/models/inception:predict' % (model_ip)

    #content = request.get_json()
    # from json get img path

    #img_path = content['instances']
    img_path = request.files['file']
    # img loading from path

    img = image.load_img(img_path, target_size=(224, 224))
    # img preprocessing
    x = image.img_to_array(img)

    x = preprocess_input(x)
    data = {"instances": [{'input_1': x.tolist()}]}

    # Making POST request (POST 방식으로 address에 requsets)
    result = requests.post(address, json=data)

    # Decoding results from TensorFlow Serving server
    pred = json.loads(result.content.decode('utf-8'))

    # Returning JSON response to the frontend
    return jsonify(decode_predictions(np.array(pred['predictions']))[0])
Esempio n. 3
0
def classify_images(image_full_path):
    """
    Input a image and it will return the top 3 classes that the networks thinks the picture is.
    The classes is based on the ImageNet and is it is 1k classes in total.
    :return: void
    """

    # Load the desired image
    img = image.load_img(image_full_path, target_size=(299, 299))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)

    model = InceptionV3(weights="imagenet")
    preds = model.predict(x)
    # decode the results into a list of tuples (class, description, probability)
    # (one such list for each sample in the batch)
    tf.keras.backend.clear_session()
    print('Predicted:', decode_predictions(preds, top=3)[0])
    return decode_predictions(preds, top=3)[0]
Esempio n. 4
0
 def _get_predictions(self, image_path: str):
     x = image.img_to_array(image.load_img(image_path, target_size=(299, 299)))
     # acondiciono x segun las indicaciones de tensorflow para poder trabajar con ella
     x /= 255
     x -= 0.5
     x *= 2
     # añado otro parametro a la matriz para albelgar el tensor segun la documentacion
     x = x.reshape([1, x.shape[0], x.shape[1], x.shape[2]])
     # cargo el modelo preentrenado de ict3 si no esta en el ordenador se descargara
     ict3 = InceptionV3()
     y = ict3.predict(x)
     # imprimo por pantalla resultado de la predición
     return decode_predictions(y)
Esempio n. 5
0
def target_attack(img_path='./YellowLabradorLooking_new.jpg', label=100, target=True, steps=100, step_alpha=1e-4):
    img, model = prepare(img_path)
    label = tf.one_hot(label, 1000)

    for i in range(steps):
        signed_grad = train_step(model, img, label)
        normed_grad = step_alpha * signed_grad
        img = img + normed_grad
        # img = train_step(model, img, label)
        if np.argmax(label) == np.argmax(model(img)):
            break
    result = model.predict(img)
    print(decode_predictions(result, top=1), i)
    return img
Esempio n. 6
0
def predict(cluster_ip):
    data = get_image_data()
    images = preprocess_input(data)

    payload = {"instances": [images[0].tolist()]}

    # sending post request to TensorFlow Serving server
    headers = {'Host': 'imagenet.default.example.com'}
    url = PREDICT_TEMPLATE.format(cluster_ip)
    print("Calling ", url)
    r = requests.post(url, json=payload, headers=headers)
    resp_json = json.loads(r.content.decode('utf-8'))
    preds = np.array(resp_json["predictions"])
    label = decode_predictions(preds, top=1)

    plt.imshow(data[0])
    plt.title(label[0])
    plt.show()
Esempio n. 7
0
def predict():
    if request.method == 'POST':
        if 'img' not in request.files:
            return "No File Found", 400
        x = load_img(request.files['img'], target_size=(299, 299))
        x = img_to_array(x)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        pred = model.predict(x)
        decode_pred = decode_predictions(pred, top=1)
        name = np.array(decode_pred)
        output = name[0][0][1]
        print('Output: ', output)
        result = output
        total_predict = predict_views()

        return render_template("index.html",
                               output="You have predicted : " + str(output),
                               total_view=total_view,
                               total_predict=total_predict)
def upload():
    if request.method == 'POST':
        f = request.files['file']
        print(f)
        # Save the file to './uploads
        basepath = os.path.dirname(__file__)
        file_path = os.path.join(basepath, 'uploads',
                                 secure_filename(f.filename))
        f.save(file_path)

        # Make prediction
        preds = model_predict(file_path, model)

        # Process your result for human
        # pred_class = pred.argmax(axis=-1)
        pred_class = decode_predictions(preds, top=1)
        result = str(pred_class[0][0][1])
        return render_template('./predict.html', result=result)
    else:
        return render_template('./index.html')
Esempio n. 9
0
def GradCam(original_image, intensity=0.5, resolution=250):
    img = image.load_img(original_image, target_size=(DIM, DIM))

    X = image.img_to_array(img)
    X = np.expand_dims(X, axis=0)
    X = preprocess_input(X)

    predictions = model.predict(X)
    print(decode_predictions(predictions)[0][0][1])

    with tf.GradientTape() as tape:
        last_conv_layer = model.get_layer('conv2d_93')
        iterate = tf.keras.models.Model([model.inputs],
                                        [model.output, last_conv_layer.output])
        model_out, last_conv_layer = iterate(X)
        class_out = model_out[:, np.argmax(model_out[0])]
        grads = tape.gradient(class_out, last_conv_layer)
        pooled_grads = keras.backend.mean(grads, axis=(0, 1, 2))

    heatmap = tf.reduce_mean(tf.multiply(pooled_grads, last_conv_layer),
                             axis=-1)
    heatmap = np.maximum(heatmap, 0)
    heatmap /= np.max(heatmap)
    heatmap = heatmap.reshape(8, 8)

    img = cv2.imread(original_image)

    heatmap = cv2.resize(heatmap, (img.shape[1], img.shape[0]))

    heatmap = cv2.applyColorMap(np.uint8(255 * heatmap), cv2.COLORMAP_JET)

    img = heatmap * intensity + img

    cv2.imshow(
        'original image',
        cv2.resize(cv2.imread(original_image), (resolution, resolution)))
    cv2.imshow('image with heatmap', cv2.resize(img, (resolution, resolution)))
Esempio n. 10
0
os.makedirs(model_path, exist_ok=True)

img_height = 299

model = Net(weights="imagenet", input_shape=(img_height, img_height, 3))

# Load the image for prediction.
img = image.load_img(img_path, target_size=(img_height, img_height))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)

preds = model.predict(x)
# decode the results into a list of tuples (class, description, probability)
# (one such list for each sample in the batch)
print("Predicted:", decode_predictions(preds, top=3)[0])
# Predicted: [(u'n02504013', u'Indian_elephant', 0.82658225), (u'n01871265', u'tusker', 0.1122357), (u'n02504458', u'African_elephant', 0.061040461)]

# Save the h5 file to path specified.
model.save(model_fname)

# ## Benchmark Keras prediction speed.

# In[2]:

import time

times = []
for i in range(20):
    start_time = time.time()
    preds = model.predict(x)
Esempio n. 11
0
from tensorflow.keras.applications.inception_v3 import InceptionV3
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.inception_v3 import preprocess_input, decode_predictions
import numpy as np


MODEL_NAME = "inception_v3"
MODEL_VERSION = 1

model = InceptionV3(weights='imagenet')
target_size = (299, 299)

img_path = 'images/lion.jpg'
img = image.load_img(img_path, target_size=target_size)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)

preds = model.predict(x)
# decode the results into a list of tuples (class, description, probability)
# (one such list for each sample in the batch)
print('Predicted:', decode_predictions(preds, top=3)[0])

model.save(f'models/{MODEL_NAME}/{MODEL_VERSION}')
Esempio n. 12
0
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.applications.inception_v3 import InceptionV3
from tensorflow.keras.applications.inception_v3 import preprocess_input
from tensorflow.keras.applications.inception_v3 import decode_predictions

# 建立 InceptionV3 模型
model = InceptionV3(weights="imagenet", include_top=True)
# 載入測試圖片
img = load_img("koala.png", target_size=(299, 299))
x = img_to_array(img)  # 轉換成 Numpy陣列
print("x.shape: ", x.shape)
# Reshape (1, 299, 299, 3)
img = x.reshape((1, x.shape[0], x.shape[1], x.shape[2]))
# 資料預處理
img = preprocess_input(img)
print("img.shape: ", img.shape)
# 使用模型進行預測
Y_pred = model.predict(img)
# 解碼預測結果
label = decode_predictions(Y_pred)
result = label[0][0]  # 取得最可能的結果
print("%s (%.2f%%)" % (result[1], result[2] * 100))
Esempio n. 13
0
 def decode_predict(self, prediction):
     return decode_predictions(prediction)
Esempio n. 14
0
    def DeepExplain(self):
        import keras
        from keras.datasets import mnist
        from keras.models import Sequential, Model
        from keras.layers import Dense, Dropout, Flatten, Activation
        from keras.layers import Conv2D, MaxPooling2D
        from keras import backend as K
        from imageio import imread
        import tensorflow as tf
        from tensorflow.contrib.slim.nets import inception

        slim = tf.contrib.slim

        # Import DeepExplain
        from deepexplain.tensorflow import DeepExplain

        if self.exp_model == "InceptionV3" or self.dataset_name == "imagenet":
            # Assigning labels
            K.clear_session()
            self.exp_model = InceptionV3(weights='imagenet')
            image_shape = (299, 299, 3)

            self.exp_data = preprocess_input(self.exp_data)

            preds = self.exp_model.predict(self.exp_data)
            labels = decode_predictions(preds, top=1000)

            N, row, col, ch = self.exp_data.shape
            for i in range(N):
                tmp = []
                for j in range(1000):
                    tmp.append(labels[i][j][1:])
                self.explanations_labels['labels'].append(tmp)

            ## TODO
            import sys, os
            sys.path.append(os.getcwd())

            # Load Inception V3 model from Tensorflow Slim, restore section
            # from checkpoint and run the classifier on the input data
            num_classes = 1001

            # Select the model here. Use adv_inception_v3 to use the weights of
            # an adversarially trained Inception V3. Explanations will be more sparse.

            checkpoint = 'data/models/inception_v3.ckpt'
            # checkpoint = 'data/models/adv_inception_v3.ckpt'

            tf.reset_default_graph()
            sess = tf.Session()

            # Since we will explain it, the model has to be wrapped in a DeepExplain
            # context
            with DeepExplain(session=sess, graph=sess.graph) as de:
                X = tf.placeholder(tf.float32, shape=(None, 299, 299, 3))

                with slim.arg_scope(inception.inception_v3_arg_scope()):
                    tmp, end_points = inception.inception_v3(
                        X, num_classes=num_classes, is_training=False)

                logits = end_points['Logits']
                yi = tf.argmax(logits, 1)

                saver = tf.train.Saver(slim.get_model_variables())
                saver.restore(sess, checkpoint)

                # filenames, xs = load_images()
                # labels = sess.run(yi, feed_dict={X: xs})
                # print (filenames, labels)

            # Compute attributions for the classified images
            # Every DeepExplain method must be called in a DeepExplain context.
            # In this case, we use two different contexts to create the model and to
            # run the explanation methods. This works as long as the same session is
            #  provided.
            print('Generating explanation....')
            start_time = timeit.default_timer()

            with DeepExplain(session=sess) as de:
                if self.exp_method == "occlusion":
                    attributions_exp = de.explain('occlusion',
                                                  tf.reduce_max(logits, 1),
                                                  X,
                                                  self.exp_data,
                                                  window_shape=(15, 15, 3))
                elif self.exp_method == "shapley_sampling":
                    attributions_exp = de.explain('shapley_sampling',
                                                  tf.reduce_max(logits, 1),
                                                  X,
                                                  self.exp_data,
                                                  samples=100)
                else:
                    attributions_exp = de.explain(self.exp_method,
                                                  tf.reduce_max(logits, 1), X,
                                                  self.exp_data)
                    # attributions = {
                    # Gradient-based
                    # NOTE: reduce_max is used to select the output unit for the
                    # class predicted by the classifier
                    # For an example of how to use the ground-truth labels instead,
                    # see mnist_cnn_keras notebook
                    # 'Saliency maps':        de.explain('saliency',
                    #   tf.reduce_max(logits, 1), X, xs),
                    # 'Gradient * Input':     de.explain('grad*input',
                    #   tf.reduce_max(logits, 1), X, xs),
                    # 'Integrated Gradients': de.explain('intgrad',
                    #   tf.reduce_max(logits, 1), X, xs),
                    # 'Epsilon-LRP':          de.explain('elrp',
                    #   tf.reduce_max(logits, 1), X, xs),
                    # 'DeepLIFT (Rescale)':   de.explain('deeplift',
                    #   tf.reduce_max(logits, 1), X, xs),
                    # Perturbation-based (comment out to evaluate, but this will take
                    # a while!)
                    # 'Occlusion [15x15]':    de.explain('occlusion',
                    #   tf.reduce_max(logits, 1), X, xs, window_shape=(15,15,3), step=4)
                    # }
                print('\n\n    Finished took {:.2f}'.format(
                    timeit.default_timer() - start_time))

            # Setting anchor pixels
            for i in range(N):
                pos_anchor = []
                neg_anchor = []

                mean_ = np.mean(attributions_exp[i])
                max_ = np.max(attributions_exp[i])
                tmp = mean_ + (max_ - mean_) / 10

                for r in range(row):
                    for c in range(col):
                        if ch == 1:
                            if attributions_exp[i][r][c] > 0.05:
                                pos_anchor.append([r, c])
                            if attributions_exp[i][r][c] < -0.05:
                                neg_anchor.append([r, c])
                        else:
                            count = 0
                            for k in range(ch):
                                if attributions_exp[i][r][c][k] > tmp:
                                    count += 1
                            if count == 0:
                                pos_anchor.append([r, c])

                self.explanations_labels['explanation_anchor'].append(
                    pos_anchor)
                self.explanations_labels['explanation_anti_anchor'].append(
                    neg_anchor)

        else:
            import sys, os
            sys.path.append(os.getcwd())
            from sample_nn import GetnnModel

            if self.exp_model is None:
                print('Building model for given dataset...')
                self.exp_model = GetnnModel(self.dataset_name)
                print('    Finished.')
            # DeepExplain

            print('Generating explanation....')
            start_time = timeit.default_timer()
            with DeepExplain(
                    session=K.get_session()) as de:  # <- init DeepExplain
                # Need to reconstruct the graph in DeepExplain context, using the
                # same weights.
                # With Keras this is very easy:
                # 1. Get the input tensor to the original model
                input_tensor = self.exp_model.layers[0].input

                # 2. We now target the output of the last dense layer (pre-softmax)
                # To do so, create a new model sharing the same layers untill the last
                # dense (index -2)
                fModel = Model(inputs=input_tensor,
                               outputs=self.exp_model.layers[-2].output)
                target_tensor = fModel(input_tensor)

                xs = self.exp_data
                ys = self.exp_model.predict(xs)

                # Setting explanations_labels['labels']
                N, row, col, ch = self.exp_data.shape
                for i in range(N):
                    tmp = []
                    for j in range(len(ys[0])):
                        tmp.append((str(j), ys[i][j]))
                    self.explanations_labels['labels'].append(tmp)

                attributions_exp = de.explain(self.exp_method,
                                              target_tensor,
                                              input_tensor,
                                              xs,
                                              ys=ys)

                # Setting anchor pixels
                for i in range(N):
                    pos_anchor = []
                    neg_anchor = []
                    # when channels = 3

                    for r in range(row):
                        for c in range(col):
                            if ch == 1:
                                if attributions_exp[i][r][c] > 0.05:
                                    pos_anchor.append([r, c])
                                if attributions_exp[i][r][c] < -0.05:
                                    neg_anchor.append([r, c])
                            else:
                                if np.mean(attributions_exp[i][r][c]) > 0.04:
                                    pos_anchor.append([r, c])

                    self.explanations_labels['explanation_anchor'].append(
                        pos_anchor)
                    self.explanations_labels['explanation_anti_anchor'].append(
                        neg_anchor)

            print(
                '\n\n    Finished. took {:.2f}'.format(timeit.default_timer() -
                                                       start_time))
Esempio n. 15
0
    def LimeImage(self):
        '''
    Give LIME method explanation on given data
    for given model
    '''
        image_shape = self.exp_data.shape[1:]
        if self.predict_fn == None:

            if self.exp_model == "InceptionV3":
                K.clear_session()
                model = InceptionV3(weights='imagenet')
                image_shape = (299, 299, 3)

                self.exp_data = preprocess_input(self.exp_data)

                preds = model.predict(self.exp_data)
                labels = decode_predictions(preds, top=1000)

                N, _, _, _ = self.exp_data.shape
                for i in range(N):
                    tmp = []
                    for j in range(1000):
                        tmp.append(labels[i][j][1:])
                    self.explanations_labels['labels'].append(tmp)

                self.predict_fn = lambda x: model.predict(x)

        else:

            prob = self.predict_fn(self.exp_data)
            N, _, _, _ = self.exp_data.shape
            for i in range(N):
                tmp = []
                for j in range(len(prob[0])):
                    tmp.append((str(j), prob[i][j]))
                self.explanations_labels['labels'].append(tmp)

        if self.predict_fn == None:
            raise Exception(
                "Error no prediction function or valid model is given.")

        from lime import lime_image
        from lime.wrappers.scikit_image import SegmentationAlgorithm

        explainer = lime_image.LimeImageExplainer(verbose=False)
        # segmenter = SegmentationAlgorithm('quickshift', kernel_size=1, max_dist=200, ratio=0.2)

        N, row, col, ch = self.exp_data.shape
        num_samples = 1000
        segmenter = None

        if row <= 128 or col <= 128:
            num_samples = 10000
            if row == 28 and col == 28:
                # MNIST
                segmenter = SegmentationAlgorithm('quickshift',
                                                  kernel_size=1,
                                                  max_dist=200,
                                                  ratio=0.2)
            elif row == 64 and col == 64:
                # olivetti_faces
                segmenter = SegmentationAlgorithm('slic',
                                                  n_segments=100,
                                                  compactness=1,
                                                  sigma=1)

        print('Generating explanation for method {}'.format(self.exp_method))
        for n in range(N):
            # self.explanations_labels['explanation_anchor'][n] = []
            anchor_points = []
            start_time = timeit.default_timer()
            explanation = explainer.explain_instance(
                self.exp_data[n],
                # np.array(pill_transf((transforms.ToPILImage()(self.exp_data[n])))), ### TESTING
                classifier_fn=self.predict_fn,
                top_labels=5,
                hide_color=0,
                num_samples=num_samples,
                segmentation_fn=segmenter)
            print('  \nData Point {} explanation took {:.2f} sec'.format(
                n,
                timeit.default_timer() - start_time))

            temp, mask = explanation.get_image_and_mask(
                explanation.top_labels[0],
                positive_only=False,
                num_features=10,
                hide_rest=True)

            for i in range(row):
                for j in range(col):
                    if np.count_nonzero(temp[i][j]) > 0:
                        anchor_points.append([i, j])

            self.explanations_labels['explanation_anchor'].append(
                anchor_points)
# Argument parser for giving input image_path from command line
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True,
                help="path of the image")
args = vars(ap.parse_args())

image_path = args['image']
# Preprocessing our input image
img = image.img_to_array(image.load_img(image_path, target_size=(224, 224))) / 255.

# this line is added because of a bug in tf_serving(1.10.0-dev)
img = img.astype('float16')

print(img.shape)

data = {"instances": [img.tolist()]}
print(type(img.tolist()))
headers = {"content-type": "application/json"}

# sending post request to TensorFlow Serving server
r = requests.post('http://<YOUR AWS EC2 ROUTE>:8080/v1/models/INCEPTION:predict', json=data, headers=headers)
print(r)
predictions = json.loads(r.content.decode('utf-8'))["predictions"]

# Decoding the response
# decode_predictions(preds, top=5) by default gives top 5 results
# You can pass "top=10" to get top 10 predicitons
resultados = inception_v3.decode_predictions(np.array(predictions))[0]

for i in resultados:
    print("Objeto predicho",i[1]," con una probabilidad de",i[2])
Esempio n. 17
0
from tensorflow.keras.applications import inception_v3
from tensorflow.keras.preprocessing import image

# Argument parser for giving input image_path from command line
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="path of the image")
args = vars(ap.parse_args())

image_path = args['image']
# Preprocessing our input image
img = image.img_to_array(image.load_img(image_path,
                                        target_size=(224, 224))) / 255.

# this line is added because of a bug in tf_serving(1.10.0-dev)
img = img.astype('float16')

payload = {"instances": [{'input_image': img.tolist()}]}

# sending post request to TensorFlow Serving server
r = requests.post('http:/192.168.137.8:9000/v1/models/ImageClassifier:predict',
                  json=payload)
# r = requests.post('http://localhost:9000/v1/models/ImageClassifier:predict', json=payload)
pred = json.loads(r.content.decode('utf-8'))

# Decoding the response
# decode_predictions(preds, top=5) by default gives top 5 results
# You can pass "top=10" to get top 10 predicitons
print(
    json.dumps(
        inception_v3.decode_predictions(np.array(pred['predictions']))[0]))
Esempio n. 18
0
    def set_model(self, model_name, top_n=5):
        if model_name == 'densenet':
            self.model = densenet.DenseNet121(include_top=True,
                                              weights='imagenet',
                                              input_tensor=None,
                                              input_shape=None,
                                              pooling=None,
                                              classes=1000)
            self.target_size = (224, 224)
            self.decoder = lambda x: densenet.decode_predictions(x, top=top_n)
            self.ref = """
                <ul>
                <li><a href='https://arxiv.org/abs/1608.06993' target='_blank'>
                Densely Connected Convolutional Networks</a> (CVPR 2017 Best Paper Award)</li>
                </ul>
                """

        elif model_name == 'inception_resnet_v2':
            self.model = inception_resnet_v2.InceptionResNetV2(
                include_top=True,
                weights='imagenet',
                input_tensor=None,
                input_shape=None,
                pooling=None,
                classes=1000)
            self.target_size = (299, 299)
            self.decoder = lambda x: inception_resnet_v2.decode_predictions(
                x, top=top_n)
            self.ref = """
                <ul>
                <li><a href='https://arxiv.org/abs/1602.07261' target='_blank'>
                Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning</a></li>
                </ul>
                """

        elif model_name == 'inception_v3':
            self.model = inception_v3.InceptionV3(include_top=True,
                                                  weights='imagenet',
                                                  input_tensor=None,
                                                  input_shape=None,
                                                  pooling=None,
                                                  classes=1000)
            self.target_size = (299, 299)
            self.decoder = lambda x: inception_v3.decode_predictions(x,
                                                                     top=top_n)
            self.ref = """<ul>
                <li><a href='https://arxiv.org/abs/1512.00567' target='_blank'>
                Rethinking the Inception Architecture for Computer Vision</a></li>
                </ul>
                """

        elif model_name == 'mobilenet':
            self.model = mobilenet.MobileNet(input_shape=None,
                                             alpha=1.0,
                                             depth_multiplier=1,
                                             dropout=1e-3,
                                             include_top=True,
                                             weights='imagenet',
                                             input_tensor=None,
                                             pooling=None,
                                             classes=1000)
            self.target_size = (224, 224)
            self.decoder = lambda x: mobilenet.decode_predictions(x, top=top_n)
            self.ref = """<ul>
                <li><a href='https://arxiv.org/abs/1704.04861' target='_blank'>
                MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications</a></li>
                </ul>
                """

        elif model_name == 'mobilenet_v2':
            self.model = mobilenet_v2.MobileNetV2(input_shape=None,
                                                  alpha=1.0,
                                                  include_top=True,
                                                  weights='imagenet',
                                                  input_tensor=None,
                                                  pooling=None,
                                                  classes=1000)
            self.target_size = (224, 224)
            self.decoder = lambda x: mobilenet_v2.decode_predictions(x,
                                                                     top=top_n)
            self.ref = """<ul>
                <li><a href='https://arxiv.org/abs/1801.04381' target='_blank'>
                MobileNetV2: Inverted Residuals and Linear Bottlenecks</a></li>
                </ul>
                """

        elif model_name == 'nasnet':
            self.model = nasnet.NASNetLarge(input_shape=None,
                                            include_top=True,
                                            weights='imagenet',
                                            input_tensor=None,
                                            pooling=None,
                                            classes=1000)
            self.target_size = (224, 224)
            self.decoder = lambda x: nasnet.decode_predictions(x, top=top_n)
            self.ref = """<ul>
                <li><a href='https://arxiv.org/abs/1707.07012' target='_blank'>
                Learning Transferable Architectures for Scalable Image Recognition</a></li>
                </ul>
                """

        elif model_name == 'resnet50':
            self.model = resnet50.ResNet50(include_top=True,
                                           weights='imagenet',
                                           input_tensor=None,
                                           input_shape=None,
                                           pooling=None,
                                           classes=1000)
            self.target_size = (224, 224)
            self.decoder = lambda x: resnet50.decode_predictions(x, top=top_n)
            self.ref = """<ul>
                <li>ResNet : 
                <a href='https://arxiv.org/abs/1512.03385' target='_blank'>Deep Residual Learning for Image Recognition
                </a></li>
                </ul>
                """

        elif model_name == 'vgg16':
            self.model = vgg16.VGG16(include_top=True,
                                     weights='imagenet',
                                     input_tensor=None,
                                     input_shape=None,
                                     pooling=None,
                                     classes=1000)
            self.target_size = (224, 224)
            self.decoder = lambda x: vgg16.decode_predictions(x, top=top_n)
            self.ref = """<ul>
            <li><a href='https://arxiv.org/abs/1409.1556' target='_blank'>
            Very Deep Convolutional Networks for Large-Scale Image Recognition</a></li>
            </ul>"""

        elif model_name == 'vgg19':
            self.model = vgg19.VGG19(include_top=True,
                                     weights='imagenet',
                                     input_tensor=None,
                                     input_shape=None,
                                     pooling=None,
                                     classes=1000)
            self.target_size = (224, 224)
            self.decoder = lambda x: vgg19.decode_predictions(x, top=top_n)
            self.ref = """<ul>
            <li><a href='https://arxiv.org/abs/1409.1556' target='_blank'>Very Deep Convolutional Networks for Large-Scale Image Recognition</a></li>
            </ul>"""

        elif model_name == 'xception':
            self.model = xception.Xception(include_top=True,
                                           weights='imagenet',
                                           input_tensor=None,
                                           input_shape=None,
                                           pooling=None,
                                           classes=1000)
            self.target_size = (299, 299)
            self.decoder = lambda x: xception.decode_predictions(x, top=top_n)
            self.ref = """<ul>
            <li><a href='https://arxiv.org/abs/1610.02357' target='_blank'>Xception: Deep Learning with Depthwise Separable Convolutions</a></li>
            </ul>"""

        else:
            logger.ERROR('There has no model name !!!')
from io import BytesIO

import requests
from PIL import Image
from tensorflow.keras.applications.inception_v3 import InceptionV3
from tensorflow.keras.applications.inception_v3 import decode_predictions
from tensorflow.keras.applications.inception_v3 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array

import keract

model = InceptionV3()

url = 'https://upload.wikimedia.org/wikipedia/commons/thumb/1/14/Gatto_europeo4.jpg/250px-Gatto_europeo4.jpg'
response = requests.get(url)
image = Image.open(BytesIO(response.content))
image = image.crop((0, 0, 299, 299))
image = img_to_array(image)
image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
image = preprocess_input(image)
yhat = model.predict(image)
label = decode_predictions(yhat)
label = label[0][0]
print('{} ({})'.format(label[1], label[2] * 100))  # a tabby is a cat!

model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
activations = keract.get_activations(model, image)
keract.display_activations(activations)
Esempio n. 20
0
    def AnchorImage(self):
        '''
    Give Anchor method explanation on given data
    for given model
    '''
        image_shape = self.exp_data.shape[1:]
        if self.predict_fn == None:

            if self.exp_model == "InceptionV3":
                K.clear_session()
                model = InceptionV3(weights='imagenet')
                image_shape = (299, 299, 3)

                self.exp_data = preprocess_input(self.exp_data)

                preds = model.predict(self.exp_data)
                labels = decode_predictions(preds, top=1000)

                N, _, _, _ = self.exp_data.shape
                for i in range(N):
                    tmp = []
                    for j in range(1000):
                        tmp.append(labels[i][j][1:])
                    self.explanations_labels['labels'].append(tmp)

                self.predict_fn = lambda x: model.predict(x)

            # else:
            #   self.predict_fn = model.predict

        else:
            prob = self.predict_fn(self.exp_data)
            N, _, _, _ = self.exp_data.shape
            for i in range(N):
                tmp = []
                for j in range(len(prob[0])):
                    tmp.append((str(j), prob[i][j]))
                self.explanations_labels['labels'].append(tmp)

        if self.predict_fn == None:
            raise Exception(
                "Error no prediction function or valid model is given.")

        ###

        beam_size = 1
        threshold = .95
        segmentation_fn = 'slic'
        kwargs = {'n_segments': 15, 'compactness': 20, 'sigma': .5}
        N, row, col, ch = self.exp_data.shape

        if row <= 128 or col <= 128:
            from skimage import segmentation

            if row == 28 and col == 28:
                # MNIST
                beam_size = 2
                threshold = .98
                segmentation_fn = segment = segmentation.felzenszwalb
                kwargs = {'scale': 50, 'min_size': 50, 'sigma': .1}
            elif row == 64 and col == 64:
                # olivetti_faces
                beam_size = 2
                threshold = .98
        ###

        explainer = AnchorImage(self.predict_fn,
                                image_shape,
                                segmentation_fn=segmentation_fn,
                                segmentation_kwargs=kwargs,
                                images_background=None)

        print('Generating explanation for method {}'.format(self.exp_method))
        for n in range(N):
            # self.explanations_labels['explanation_anchor'][n] = []
            anchor_points = []
            # self.explanations_labels['labels'] = self.predict_fn(self.exp_data[n])
            start_time = timeit.default_timer()
            explanation = explainer.explain(self.exp_data[n],
                                            threshold=threshold,
                                            p_sample=.5,
                                            tau=0.25,
                                            beam_size=beam_size)
            print('  Data Point {} explanation took {:.2f} sec'.format(
                n,
                timeit.default_timer() - start_time))

            segments_array = explanation.data['segments']
            superpixels_in_anchor = explanation.data['raw']['feature']
            for i in range(row):
                for j in range(col):
                    if segments_array[i][j] in superpixels_in_anchor:
                        anchor_points.append([i, j])
            self.explanations_labels['explanation_anchor'].append(
                anchor_points)