Beispiel #1
0
def make_anchor_image(dirname: Optional[Path] = None) -> AnchorImage:
    url = "https://storage.googleapis.com/seldon-models/alibi-detect/classifier/"
    path_model = os.path.join(url, "cifar10", "resnet32", "model.h5")
    save_path = tf.keras.utils.get_file("resnet32", path_model)
    model = tf.keras.models.load_model(save_path)

    # we drop the first batch dimension because AnchorImage expects a single image
    image_shape = model.get_layer(index=0).input_shape[0][1:]

    alibi_model = AnchorImage(predictor=model.predict, image_shape=image_shape)

    if dirname is not None:
        alibi_model.save(dirname)
    return alibi_model
def test_anchor_image():
    # load and prepare fashion MNIST data
    (x_train, y_train), (_, _) = keras.datasets.fashion_mnist.load_data()
    x_train = x_train.astype('float32') / 255
    x_train = np.reshape(x_train, x_train.shape + (1,))
    y_train = to_categorical(y_train)

    # define and train model
    def model():
        x_in = Input(shape=(28, 28, 1))
        x = Conv2D(filters=8, kernel_size=2, padding='same', activation='relu')(x_in)
        x = MaxPooling2D(pool_size=2)(x)
        x = Dropout(0.3)(x)
        x = Flatten()(x)
        x_out = Dense(10, activation='softmax')(x)
        cnn = Model(inputs=x_in, outputs=x_out)
        cnn.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
        return cnn
    cnn = model()
    cnn.fit(x_train, y_train, batch_size=256, epochs=1)

    # test explainer initialization
    predict_fn = lambda x: cnn.predict(x)
    segmentation_fn = 'slic'
    segmentation_kwargs = {'n_segments': 10, 'compactness': 10, 'sigma': .5}
    image_shape = (28, 28, 1)
    explainer = AnchorImage(predict_fn, image_shape, segmentation_fn=segmentation_fn,
                            segmentation_kwargs=segmentation_kwargs)
    assert explainer.predict_fn(np.zeros((1,) + image_shape)).shape == (1,)

    # test sampling and segmentation functions
    image = x_train[0]
    segments, sample_fn = explainer.get_sample_fn(image, p_sample=.5)
    raw_data, data, labels = sample_fn([], 10)
    assert raw_data.shape == data.shape
    assert data.shape[0] == labels.shape[0]
    assert data.shape[1] == len(np.unique(segments))

    # test explanation
    threshold = .95
    explanation = explainer.explain(image, threshold=threshold)
    assert explanation['anchor'].shape == image_shape
    assert explanation['precision'] >= threshold
    assert len(np.unique(explanation['segments'])) == len(np.unique(segments))
def test_anchor_image(conv_net):

    segmentation_fn = 'slic'
    segmentation_kwargs = {'n_segments': 10, 'compactness': 10, 'sigma': .5}
    image_shape = (28, 28, 1)
    p_sample = 0.5  # probability of perturbing a superpixel
    num_samples = 10
    # img scaling settings
    scaling_offset = 260
    min_val = 0
    max_val = 255
    eps = 0.0001  # tolerance for tensor comparisons
    n_covered_ex = 3  # nb of examples where the anchor applies that are saved

    # define and train model
    clf = conv_net
    predict_fn = lambda x: clf.predict(x)

    explainer = AnchorImage(
        predict_fn,
        image_shape,
        segmentation_fn=segmentation_fn,
        segmentation_kwargs=segmentation_kwargs,
    )
    # test explainer initialization
    assert explainer.predictor(np.zeros((1,) + image_shape)).shape == (1,)
    assert explainer.custom_segmentation == False

    # test sampling and segmentation functions
    image = x_train[0]
    explainer.instance_label = predict_fn(image[np.newaxis, ...])[0]
    explainer.image = image
    explainer.n_covered_ex = n_covered_ex
    explainer.p_sample = p_sample
    segments = explainer.generate_superpixels(image)
    explainer.segments = segments
    image_preproc = explainer._preprocess_img(image)
    explainer.segment_labels = list(np.unique(segments))
    superpixels_mask = explainer._choose_superpixels(num_samples=num_samples)

    # grayscale image should be replicated across channel dim before segmentation
    assert image_preproc.shape[-1] == 3
    for channel in range(image_preproc.shape[-1]):
        assert (image.squeeze() - image_preproc[..., channel] <= eps).all() == True
    # check superpixels mask
    assert superpixels_mask.shape[0] == num_samples
    assert superpixels_mask.shape[1] == len(list(np.unique(segments)))
    assert superpixels_mask.sum(axis=1).any() <= segmentation_kwargs['n_segments']
    assert superpixels_mask.any() <= 1

    cov_true, cov_false, labels, data, coverage, _ = explainer.sampler((0, ()), num_samples)
    assert data.shape[0] == labels.shape[0]
    assert data.shape[1] == len(np.unique(segments))
    assert coverage == -1

    # test explanation
    threshold = .95
    explanation = explainer.explain(image, threshold=threshold)

    if explanation.raw['feature']:
        assert explanation.raw['examples'][-1]['covered_true'].shape[0] <= explainer.n_covered_ex
        assert explanation.raw['examples'][-1]['covered_false'].shape[0] <= explainer.n_covered_ex
    else:
        assert not explanation.raw['examples']
    assert explanation.anchor.shape == image_shape
    assert explanation.precision >= threshold
    assert len(np.unique(explanation.segments)) == len(np.unique(segments))
    assert explanation.meta.keys() == DEFAULT_META_ANCHOR.keys()
    assert explanation.data.keys() == DEFAULT_DATA_ANCHOR_IMG.keys()

    # test scaling
    fake_img = np.random.random(size=image_shape) + scaling_offset
    scaled_img = explainer._scale(fake_img, scale=(min_val, max_val))
    assert (scaled_img <= max_val).all()
    assert (scaled_img >= min_val).all()
from tensorflow.keras.applications.mobilenet import MobileNet
from alibi.datasets import fetch_imagenet
from alibi.explainers import AnchorImage
import dill
import alibi

# Be careful when you're using Jupyter Kernel
# Better to use Docker Container Environment
print(alibi.__version__)
print(dill.__version__)

model = MobileNet(weights='imagenet')

predict_fn = lambda x: model.predict(x)

segmentation_fn = 'slic'
kwargs = {'n_segments': 15, 'compactness': 20, 'sigma': .5}
image_shape = (224, 224, 3)
explainer = AnchorImage(predict_fn,
                        image_shape,
                        segmentation_fn=segmentation_fn,
                        segmentation_kwargs=kwargs,
                        images_background=None)

explainer.predict_fn = None  # Clear explainer predict_fn as its a lambda and will be reset when loaded
with open("explainer.dill", 'wb') as f:
    dill.dump(explainer, f)
Beispiel #5
0
    img = image.load_img(img_path, target_size=(299, 299))
    x = image.img_to_array(img)
    x = preprocess_input(x)
    return x


if __name__ == '__main__':
    setup_folder_structure()

    print('LOADING MODELS AND EXPLAINERS')
    model = InceptionV3(weights='imagenet')
    predict_fn = lambda x: model.predict(x)
    kwargs = {'n_segments': 15, 'compactness': 20, 'sigma': .5}
    explainer = AnchorImage(predict_fn,
                            image_shape=(299, 299, 3),
                            segmentation_fn='slic',
                            segmentation_kwargs=kwargs,
                            images_background=None)

    print('GENERATING LIME IMAGES')
    count = 1
    img_paths = load_img_paths()
    for img_name, img_path in img_paths.items():
        img = transform_img_fn(img_path)
        np.random.seed(0)  # Take note
        explanation = explainer.explain(img,
                                        threshold=.95,
                                        p_sample=0.5,
                                        tau=0.25)
        new_img_name = get_name_without_ext(img_name)
        new_img_path = os.path.join(ANCHOR_FOLDER, f'{new_img_name}.png')
Beispiel #6
0
def ai_explainer(mnist_predictor, request):
    segmentation_fn = request.param
    ai = AnchorImage(predictor=mnist_predictor,
                     image_shape=(28, 28, 1),
                     segmentation_fn=segmentation_fn)
    return ai
Beispiel #7
0
import dill
import tensorflow as tf

print('tensorflow: ', tf.__version__)

# model = tf.saved_model.load('../predictor/mobilenet_saved_model')
model = tf.keras.models.load_model('../predictor/mobilenet_saved_model')

predict_fn = lambda x: model.predict(x)
kwargs = {'n_segments': 15, 'compactness': 20, 'sigma': .5}
image_shape = (224, 224, 3)

explainer = AnchorImage(
    predict_fn,
    image_shape,
    segmentation_fn='slic',
    segmentation_kwargs=kwargs,
    images_background=None,
)

categories = ['Persian cat', 'volcano', 'strawberry', 'jellyfish', 'centipede']
full_data = []
full_labels = []
for category in categories:
    data, labels = fetch_imagenet(
        category,
        nb_images=10,
        target_size=image_shape[:2],
        seed=0,
        return_X_y=True,
    )
Beispiel #8
0
    def AnchorImage(self):
        '''
    Give Anchor method explanation on given data
    for given model
    '''
        image_shape = self.exp_data.shape[1:]
        if self.predict_fn == None:

            if self.exp_model == "InceptionV3":
                K.clear_session()
                model = InceptionV3(weights='imagenet')
                image_shape = (299, 299, 3)

                self.exp_data = preprocess_input(self.exp_data)

                preds = model.predict(self.exp_data)
                labels = decode_predictions(preds, top=1000)

                N, _, _, _ = self.exp_data.shape
                for i in range(N):
                    tmp = []
                    for j in range(1000):
                        tmp.append(labels[i][j][1:])
                    self.explanations_labels['labels'].append(tmp)

                self.predict_fn = lambda x: model.predict(x)

            # else:
            #   self.predict_fn = model.predict

        else:
            prob = self.predict_fn(self.exp_data)
            N, _, _, _ = self.exp_data.shape
            for i in range(N):
                tmp = []
                for j in range(len(prob[0])):
                    tmp.append((str(j), prob[i][j]))
                self.explanations_labels['labels'].append(tmp)

        if self.predict_fn == None:
            raise Exception(
                "Error no prediction function or valid model is given.")

        ###

        beam_size = 1
        threshold = .95
        segmentation_fn = 'slic'
        kwargs = {'n_segments': 15, 'compactness': 20, 'sigma': .5}
        N, row, col, ch = self.exp_data.shape

        if row <= 128 or col <= 128:
            from skimage import segmentation

            if row == 28 and col == 28:
                # MNIST
                beam_size = 2
                threshold = .98
                segmentation_fn = segment = segmentation.felzenszwalb
                kwargs = {'scale': 50, 'min_size': 50, 'sigma': .1}
            elif row == 64 and col == 64:
                # olivetti_faces
                beam_size = 2
                threshold = .98
        ###

        explainer = AnchorImage(self.predict_fn,
                                image_shape,
                                segmentation_fn=segmentation_fn,
                                segmentation_kwargs=kwargs,
                                images_background=None)

        print('Generating explanation for method {}'.format(self.exp_method))
        for n in range(N):
            # self.explanations_labels['explanation_anchor'][n] = []
            anchor_points = []
            # self.explanations_labels['labels'] = self.predict_fn(self.exp_data[n])
            start_time = timeit.default_timer()
            explanation = explainer.explain(self.exp_data[n],
                                            threshold=threshold,
                                            p_sample=.5,
                                            tau=0.25,
                                            beam_size=beam_size)
            print('  Data Point {} explanation took {:.2f} sec'.format(
                n,
                timeit.default_timer() - start_time))

            segments_array = explanation.data['segments']
            superpixels_in_anchor = explanation.data['raw']['feature']
            for i in range(row):
                for j in range(col):
                    if segments_array[i][j] in superpixels_in_anchor:
                        anchor_points.append([i, j])
            self.explanations_labels['explanation_anchor'].append(
                anchor_points)