Exemple #1
0
def layer_analysis(model, layer_num):
    """Implements Layer-wise relevance propagation.

  This is cutting off layers in the network to obtain heatmap 
  vectors for every layer and then merges them together to form
  a heatmap matrix/tensor. Then saves them using numpy.

  Args:
      model: The neural network to perform analysis upon.
      layer_num: A list, with index of layers to cuttoff for LRP.
  """
    analyzer = innvestigate.create_analyzer("lrp.z", model)
    analysis = analyzer.analyze(x_train)
    print("analysis: " + str(analysis) + "\n\n\n")

    model.summary()
    for i in layer_num:
        print("New model ", i)

        new_model = Model(model.inputs, model.layers[-i].output)
        new_model.set_weights(model.get_weights())
        new_model.summary()

        analyzer = innvestigate.create_analyzer("lrp.z", new_model)
        analysis = analyzer.analyze(x_train)
        print("analysis: " + str(analysis))
        name = "out_lrp_" + str(i)
        np.save(name, analysis)
Exemple #2
0
def test_fast__create_analyzers_wrong_name():
    """
    Test 'innvestigate.create_analyzer':
    'KeyError' should be thrown when passing wrong keys.
    """
    fake_model = keras.models.Sequential([keras.layers.Dense(10, input_shape=(10,))])
    with pytest.raises(KeyError):
        create_analyzer("wrong name", fake_model)
    def apply_analyzer_algm(self, image, algm, image_name, trained_output_temp, activation):
        print("Started Algorithm: %s" %(algm))
        # Get model
        model, preprocess = self.model, self.preprocess
        # Strip softmax layer
        # model = innvestigate.utils.model_wo_softmax(model)
        model = innvestigate.utils.model_activation_fn(model, activation[0])
        # sklearn.log_model(sk_model=model,
        #                   artifact_path="model after softmax",
        #                   registered_model_name="innvestigate-vgg16-model")
        kwargs = {}
        if algm == "lrp":
            kwargs["rule"] = "Z" # Ref https://innvestigate.readthedocs.io/en/latest/modules/analyzer.html
            analyzer = innvestigate.create_analyzer(algm, model, **kwargs)
        elif algm == "lrp.alpha_beta":
            analyzer = innvestigate.create_analyzer(algm, model, alpha=1, **kwargs)
        elif algm == "deep_taylor.bounded":
            analyzer = innvestigate.create_analyzer(algm, model, low=1, high=1, **kwargs)
        elif algm in ["pattern.net", "pattern.attribution"]:
            patterns = [x for x in model.get_weights()
                    if len(x.shape) > 1]
            analyzer = innvestigate.create_analyzer(algm, model, patterns= patterns, pattern_type = "relu", **kwargs)
        else:
            analyzer = innvestigate.create_analyzer(algm, model, **kwargs)
        # Add batch axis and preprocess
        x = preprocess(image[None])

        features_b = model.predict(x)
        before_analyzer_prediction= [{"class":v[0],"description":v[1], "confidence":v[2]} for v in (self.decode_predictions(features_b, top=3)[0])]


        # Apply analyzer w.r.t. maximum activated output-neuron
        a = analyzer.analyze(x)

        features_a = model.predict(a)
        after_analyzer_prediction = [{"class":v[0],"description":v[1], "confidence":v[2]} for v in (self.decode_predictions(features_a, top=3)[0])]
        self.set_color_and_status(before_analyzer_prediction, after_analyzer_prediction)

        # Aggregate along color channels and normalize to [-1, 1]
        a = a.sum(axis=np.argmax(np.asarray(a.shape) == 3))
        a /= np.max(np.abs(a))

        # Plot
        plt.imshow(a[0], cmap="seismic", clim=(-1, 1))
        plt.axis('off')
        output_path= "media/output/{}_{}_analysis.png".format(image_name, algm)
        plt.savefig(output_path)

        trained_output_temp["before_analyzer_prediction"]= before_analyzer_prediction
        trained_output_temp["after_analyzer_prediction"]= after_analyzer_prediction
        trained_output_temp["op_img_path"]= output_path
        trained_output_temp["op_img_name"]= output_path.split("/")[-1]

        log_artifact("media/output/{}_{}_analysis.png".format(image_name, algm))
        print("Completed Algorithm: %s" %(algm))
        trained_output_temp["status"]= "success"
        print("Prediction completed")
Exemple #4
0
def test_fast__create_analyzers():

    fake_model = tensorflow.keras.models.Sequential(
        [tensorflow.keras.layers.Dense(10, input_shape=(10, ))])
    for name in analyzers.keys():
        try:
            create_analyzer(name, fake_model)
        except KeyError:
            # Name should be found!
            raise
        except:
            # Some analyzers require parameters...
            pass
Exemple #5
0
def test_fast__create_analyzers():
    """
    Test 'innvestigate.create_analyzer':
    Instantiate analyzers by name using a placeholder Keras model.
    """

    fake_model = keras.models.Sequential([keras.layers.Dense(10, input_shape=(10,))])
    for name in analyzers:
        try:
            create_analyzer(name, fake_model)
        except KeyError:
            print("Key not found when creating analyzer from name.")
        except Exception:
            logging.error("Error when creating analyzer from name.", exc_info=True)
Exemple #6
0
    def innvestigate_pred(self,\
                omn_train_params=["Bx", "By", "Bz", "Vx", "Np"]):
        """
        Use the innvestigate lib to analyze the network!
        """
        import innvestigate
        import innvestigate.utils as iutils

        omn_end_time = self.sw_imf_df.index.max()
        omn_begin_time = (omn_end_time - datetime.timedelta(\
                    minutes=self.omn_pred_hist) ).strftime(\
                    "%Y-%m-%d %H:%M:%S")
        inp_omn_vals = self.sw_imf_df.loc[\
                         omn_begin_time : omn_end_time \
                         ][omn_train_params].values
        inp_omn_vals = inp_omn_vals.reshape(1,inp_omn_vals.shape[0],\
                             inp_omn_vals.shape[1])
        # innvestigate now
        model_wo_softmax = iutils.keras.graph.model_wo_softmax(self.model)
        analyzer = innvestigate.create_analyzer("deep_taylor",
                                                model_wo_softmax)
        anlyz_res = analyzer.analyze(inp_omn_vals)
        # Aggregate along color channels and normalize to [-1, 1]
        #         a = a.sum(axis=numpy.argmax(numpy.asarray(a.shape) == 3))
        anlyz_res = numpy.squeeze(anlyz_res, axis=0)
        anlyz_res /= numpy.max(numpy.abs(anlyz_res))
        # Now also get the imf/sw values!
        swimf_data = self.original_sw_imf_data.loc[\
                         omn_begin_time : omn_end_time \
                         ][omn_train_params]
        return anlyz_res, swimf_data
    def test(self):
        np.random.seed(234354346)
        model_class = innvestigate.utils.tests.networks.base.mlp_2dense

        data = fetch_data()
        model, modelp = create_model(model_class)
        train_model(modelp, data, epochs=10)
        model.set_weights(modelp.get_weights())

        analyzer = innvestigate.create_analyzer("pattern.net", model)
        analyzer.fit(data[0], pattern_type="linear", batch_size=256, verbose=0)

        patterns = analyzer._patterns
        W = model.get_weights()[0]
        W2D = W.reshape((-1, W.shape[-1]))
        X = data[0].reshape((data[0].shape[0], -1))
        Y = np.dot(X, W2D)

        def safe_divide(a, b):
            return a / (b + (b == 0))

        mean_x = X.mean(axis=0)
        mean_y = Y.mean(axis=0)
        mean_xy = np.dot(X.T, Y) / Y.shape[0]
        ExEy = mean_x[:, None] * mean_y[None, :]
        cov_xy = mean_xy - ExEy
        w_cov_xy = np.diag(np.dot(W2D.T, cov_xy))
        A = safe_divide(cov_xy, w_cov_xy[None, :])

        def allclose(a, b):
            return np.allclose(a, b, rtol=0.05, atol=0.05)

        #print(A.sum(), patterns[0].sum())
        self.assertTrue(allclose(A.ravel(), patterns[0].ravel()))
    def explain_image_innvestigate(self, model, data):

        try:
            # Build the model
            model = keras.models.Model(inputs=model.inputs,
                                       outputs=model.outputs)
            model.compile(optimizer="adam", loss="categorical_crossentropy")

            model_wo_sm = iutils.keras.graph.model_wo_softmax(model)

            analyzer = innvestigate.create_analyzer(self.gradient_method,
                                                    model_wo_sm)
            analysis = analyzer.analyze(data)
            analysis = iutils.postprocess_images(analysis,
                                                 color_coding='BGRtoRGB',
                                                 channels_first=False)

            analysis = ivis.gamma(analysis, minamp=0, gamma=0.95)
            analysis = ivis.heatmap(analysis)

            return analysis[0]

        except innvestigate.NotAnalyzeableModelException:
            return None

        except Exception:
            return None
Exemple #9
0
    def test_lrp(self):

        weights = [
            np.array([[0.25, -0.5], [0.25, 1], [0.25, -5 / 12]]),
            np.array([0., 0.]),
            np.array([[2 / 3], [-1]]),
            np.array([0.])
        ]
        tm = TestModel(weights=weights)

        tm_keras = kmodels.Sequential()
        tm_keras.add(klayers.Dense(2, activation='relu', input_shape=(3, )))
        tm_keras.add(klayers.Dense(1, activation='linear'))
        tm_keras.compile(optimizer='adam', loss='mean_squared_error')
        tm_keras.set_weights(weights=weights)

        data = np.array([24, 24, 24])

        # the _IB stands for 'ignore bias'.
        lrp21 = innv.create_analyzer(name='lrp.alpha_2_beta_1_IB',
                                     model=tm_keras)

        np.testing.assert_allclose(
            bcktrck.mlp_backtracking_relevance(model=tm,
                                               data_in=data,
                                               alpha=2,
                                               beta=1)[0],
            lrp21.analyze(np.array([data]))[0],
            rtol=1e-4,
            atol=0.01,
        )
Exemple #10
0
def get_model_and_analyser(sequence_length, embedding_dim_target,
                           embedding_dim_source, num_filters, filter_sizes,
                           drop, model_params, analyser_name, have_activation):
    """Constructs a model and initializes an analyser with it.
    :param sequence_length: The sequence length of an input text.
    :param embedding_dim_target: The word vector dimension of the target language.
    :param embedding_dim_source: The word vector dimension of the source language.
    :param num_filters: The number of convolution filters per n-gram.
    :param filter_sizes: The n-gram filter sizes.
    :param drop: The drop out probability.
    :param have_activation: Whether or not there should be a final non-linear activation.
    :param model_params: Location of the model weights.
    :param analyser_name: The name of the analyser (explainability method).
    :returns model, analyser: A model and its analyser.
    """
    model = train.construct_model(sequence_length=sequence_length,
                                  embedding_dim_target=embedding_dim_target,
                                  embedding_dim_source=embedding_dim_source,
                                  num_filters=num_filters,
                                  filter_sizes=filter_sizes,
                                  drop=drop,
                                  have_activation=have_activation)
    model.load_weights(model_params)
    analyser = innvestigate.create_analyzer(analyser_name,
                                            model,
                                            neuron_selection_mode="index")

    return model, analyser
Exemple #11
0
    def __init__(self,
                 env,
                 policy_net,
                 target_net,
                 optimizer,
                 scheduler,
                 memory,
                 fake_memory,
                 state=None):
        self.env = env
        self.policy_net = policy_net
        self.target_net = target_net
        self.optimizer = optimizer
        self.scheduler = scheduler
        self.memory = memory
        self.fake_memory = fake_memory
        self.state = state

        self.consecutive_noreward = 0
        self.total_reward = 0
        self.screen = None
        self.screen_tensor = None

        self.lrp_output = None

        self.model_keras = torch_to_keras(policy_net, image_shape=[3, 40, 60])
        name = {
            0: 'lrp.sequential_preset_a_flat',
            1: 'guided_backprop',
            2: 'gradient',
        }[0]
        self.analyzer = innvestigate.create_analyzer(name, self.model_keras)
def gen_explanations_args(args):
    data = pkl.load(open(args.data_path, 'rb'))
    labels = np.load(args.label_path)
    filenames = open(args.filename_path).read().splitlines()
    calls = open(args.glog_call_path).read().splitlines()
    no_labels = labels.shape[1]
    no_tokens = len(calls)
    print('no tokens', no_tokens)
    model_w_softmax = get_damd_cnn(no_tokens, no_labels, final_nonlinearity=args.nonlinearity)
    model_wo_softmax = get_damd_cnn(no_tokens, no_labels, final_nonlinearity=None)
    model_w_softmax.load_weights(args.model_path)
    model_wo_softmax.load_weights(args.model_path)
    if args.calculate_raw:
        print('Predicting samples ...')
        dims_to_explain = []
        for sample in tqdm(data):
            s_arr = np.array(sample).reshape(1, -1)
            prediction = model_w_softmax.predict(s_arr)
            if args.nonlinearity == 'softmax':
                dims_to_explain.append([np.argmax(prediction[0])])
            else:
                dims_to_explain.append(np.where(prediction > 0.5)[1])
        analyzer = innvestigate.create_analyzer('lrp.epsilon', model_wo_softmax,
                                                neuron_selection_mode='index', epsilon=1e-2)
    tag_names = open(args.tag_names).read().splitlines() if args.tag_names is not None else None
    idx_to_call = dict(zip(range(1, len(calls) + 1), calls))
    idx_2_tag = dict(zip(range(len(tag_names)), tag_names)) if tag_names is not None \
        else dict(zip(range(no_labels), [str(x) for x in range(no_labels)]))
    if args.calculate_raw:
        explain_behavior(analyzer, data, labels, dims_to_explain, filenames, args.save_path)
    get_explanations_for_behavior(filenames, data, idx_to_call, idx_2_tag, args.save_path)
Exemple #13
0
    def Baselinefidelity(self, i_num, num=10):
        importIndex = np.zeros([len(self.x), 200], dtype=np.float32)
        step = int(self.testNum / num)
        analyzer = innvestigate.create_analyzer(self.baseline[i_num],
                                                self.model)
        for i in range(num):
            st = int((i) * step)
            ed = min(int((i + 1) * step), len(self.x))
            analysis = analyzer.analyze(self.x[st:ed])
            importIndex[st:ed] = np.argsort(analysis * -1, axis=1)[:, :200]

        importIndex = np.int32(importIndex)
        print(self.baseline[i_num], "finish explanation")

        if DEBUG:
            RuleSet = []
            for i in range(len(self.x)):
                rule = [[j, self.x[i, j]] for j in importIndex[i]]
                RuleSet.append(rule)
            f = open('../RuleSet/' + self.baseline[i_num] + '.pkl', 'wb')
            pickle.dump(RuleSet, f)
            f.close()
        metric = FidelityMetric(self.x, self.model, importIndex, self.selNum,
                                self.neg_x)
        a = metric.AugmentTest()
        b = metric.DeductionTest()
        return a, b
    def Baselinefidelity(self, i_num, num=2):
        analysis = np.zeros_like(self.embed_x, dtype=np.float32)
        step = int(self.testNum / num)
        ig = innvestigate.create_analyzer(self.baseline[i_num],
                                          self.EmbeddingModel)
        for i in range(num):
            st = int((i) * step)
            ed = int((i + 1) * step)
            analysis[st:ed] = ig.analyze(self.embed_x[st:ed])

        analysis = np.sum(analysis, 2)
        importIndex = np.argsort(-analysis, axis=1)
        RuleSet = []
        for i in range(len(self.x)):
            rule = [[j, self.x[i, j]] for j in importIndex[i]]
            RuleSet.append(rule)
        f = open('../RuleSet/' + self.baseline[i_num] + '.pkl', 'wb')
        pickle.dump(RuleSet, f)
        f.close()

        print(self.baseline[i_num], "finish explanation")
        metric = FidelityMetric(self.x, self.model, importIndex, self.selNum,
                                self.neg_x)
        a = metric.AugmentTest()
        b = metric.DeductionTest()
        return a, b
def experiment(dl_params, model_params, explainer_type, save_dir=""):

    keras.backend.clear_session()

    #   create data
    print("Loading data...")
    dataloader = Dataloader(dl_params, rseed=0)
    #X_train, y_train = dataloader.get_dataset("train")
    #X_valid, y_valid = dataloader.get_dataset("valid")
    X_test, y_test = dataloader.get_dataset("test")
    del dataloader  # save some memory

    #   convert to np.array
    #X_train = np.stack(X_train, axis=0)
    #X_valid = np.stack(X_valid, axis=0)
    X_test = np.stack(X_test, axis=0)
    #y_train = np.asarray(y_train)
    #y_valid = np.asarray(y_valid)
    y_test = np.asarray(y_test)

    #   normalize to between 0 and 1
    #X_train = X_train.astype("float") / 255.0
    #X_valid = X_valid.astype("float") / 255.0
    X_test = X_test.astype("float") / 255.0

    #image = expand_dims(X_test[0], axis=0)
    image = X_test[70]
    print(image.shape)

    print(matplotlib.get_backend())

    print("Building classifier...")
    #   add this line to prevent some Keras serializer error
    with CustomObjectScope({'GlorotUniform': glorot_uniform()}):
        model = load_model(model_params['load_location'])

    print("Predicting image...")
    label = model.predict(np.array([
        image,
    ]))

    print("The inputted image is predicted to be ", label)

    print("Building explainer...")
    if model_params['output_dim'] > 2:
        model_wo_sm = iutils.keras.graph.model_wo_softmax(
            model)  #   remove softmax
    else:
        model_wo_sm = model

    explainer = innvestigate.create_analyzer(explainer_type, model_wo_sm)
    print("Explainer type: ", type(explainer))
    explain_innvestigate(image,
                         label,
                         explainer,
                         save_name=explainer_type,
                         save_dir=save_dir)

    keras.backend.clear_session()
Exemple #16
0
def calc_deep_taylor_values(model):
    """
    Calculates deep taylor decomposition values for the given training set and
    Multilayer Perceptron (MLP) model.

    :param dataset: Training or test data.
    :param model: Trained MLP model.
    :return: Deep taylor values
    """
    # Predict training and test probabilities
    test_probs = predict_probability(model.X_te, model.best_model, "MLP")
    train_probs = predict_probability(model.X_tr, model.best_model, "MLP")

    # Set last layer activation to linear. If this swapping is not done, the
    # results might be suboptimal
    model.best_model.layers[-1].activation = activations.linear
    stripped_model = utils.apply_modifications(model.best_model)

    # Calculate class weights
    train_input_weights = train_probs
    train_input_weights[np.where(
        model.y_tr == 0)] = (1 -
                             train_input_weights[np.where(model.y_tr == 0)])

    # Get last layer index
    class_idx = 0  # if the activation of last layer was sigmoid
    last_layer_idx = utils.find_layer_idx(model.best_model, "dense_2")

    # Get the input the model was trained on
    seed_input = model.X_tr.values
    # The deep taylor is bounded to a range which should be defined based on
    # the input range:
    input_range = [min(seed_input.flatten()), max(seed_input.flatten())]

    # Calculate global gradients of all patients (deep taylor)
    gradient_analyzer = innvestigate.create_analyzer(
        "deep_taylor.bounded",  # analysis method identifier
        stripped_model,  # model without softmax output
        low=input_range[0],
        high=input_range[1],
    )

    analysis = gradient_analyzer.analyze(seed_input)

    # Calculate score based average
    t_analysis = np.transpose(analysis, (1, 0))
    train_input_weights_s = np.squeeze(train_input_weights)
    score_avg_analysis = np.expand_dims(np.dot(t_analysis,
                                               train_input_weights_s),
                                        axis=0)

    return score_avg_analysis
 def LRPAnalysis(self):
     logging.info("Processing network LRP")
     analyzers = {
         "Guided backpropagation": "guided_backprop",
         "LRP (z method)": "lrp.z"
     }
     for title, analyzer_func in analyzers.items():
         analyzer = innvestigate.create_analyzer(analyzer_func, self.model)
         analysis = analyzer.analyze(self.norm_inputs)
         result = np.sum(analysis, axis=0) / analysis.shape[0]
         # Add to scores DF #
         self.addScore(result, title)
     logging.info("... done")
Exemple #18
0
def explain_example_innvestigate(cnn_model, input_text, method, explain_level = "word", actual_class = None, is_support = True, print_results = True, print_k = 5):
	target_names = cnn_model.target_names
	fe_input = utils.get_data_matrix([input_text], cnn_model.word2index, cnn_model.max_len, use_tqdm = False)[0]
	embedded_matrix = cnn_model.embeddings_func([np.array([fe_input])])[0]
	features = cnn_model.features_func([np.array([fe_input])])[0]
	tokenized_text = [str(w) for w in list(utils.tokenizer(input_text))]
	processed_text = utils.seq_id2text(cnn_model.word_index, fe_input)
	predicted_class = cnn_model.predict(np.array([fe_input]))

	analyzer = innvestigate.create_analyzer(method, innvestigate.utils.model_wo_softmax(cnn_model.partial_model))
	criterion = analyzer.analyze(embedded_matrix)[0] 
	word_level_relevance = np.sum(criterion, axis = 1)[:len(processed_text.split())]
	heatmap = word_level_relevance / np.max(np.abs(word_level_relevance))
	
	if explain_level == "word":
		if is_support:
			non_overlapping_ngrams = [(utils.seq_id2text(cnn_model.word_index, fe_input[[idx]], pad = True), [idx], word_level_relevance[idx]) for idx in np.argsort(-word_level_relevance)[:print_k] if word_level_relevance[idx] > 0]
		else:
			non_overlapping_ngrams = [(utils.seq_id2text(cnn_model.word_index, fe_input[[idx]], pad = True), [idx], -word_level_relevance[idx]) for idx in np.argsort(word_level_relevance)[:print_k] if -word_level_relevance[idx] > 0]
	elif explain_level == "ngram":
		candidate_ngrams = [list(range(start_pos, start_pos + f[0])) for f in cnn_model.filters for start_pos in range(min(len(tokenized_text), cnn_model.max_len)-f[0]+1)]
		candidates = [(ng, sum(np.sum(criterion, axis = 1)[list(ng)])) for ng in candidate_ngrams]
		if is_support:
			candidates = [ng for ng in candidates if ng[1] > 0]
		else:
			candidates = [(ng[0], -ng[1]) for ng in candidates if ng[1] < 0]
		candidates = sorted(candidates, key = lambda x: x[1], reverse = True)
		non_overlapping_ngrams = explain.get_non_overlapping_ngrams(candidates, fe_input, cnn_model.word_index, print_k)

	if print_results:
		print("Input text:", input_text)
		print("----------------------------------------------------------------")
		print("Processed text:", processed_text)
		print("----------------------------------------------------------------")
		if actual_class is not None:
			print("Actual class: {} (class id: {})".format(target_names[actual_class], actual_class))
		print("Predicted class: {} (class id: {})".format(target_names[predicted_class], predicted_class))
		print("----------------------------------------------------------------")
		s = utils.colorize_twoway(processed_text.split(), heatmap)
		display(HTML(s))
		print("----------------------------------------------------------------")
		exp_type = 'evidence' if is_support else 'counter-evidence'
		print("Non-overlapping ngrams %s:" %(exp_type))
		for idx, ngram in enumerate(non_overlapping_ngrams):
			print("{} (location: {})".format(ngram[0], ngram[1]))
	return non_overlapping_ngrams
	
def run_interpretation_methods(model,
                               methods,
                               data,
                               X_train_blob=None,
                               normalize=False,
                               **kwargs):
    """This function applies all interpretation methods given in methods (as implemented in innvestigate) to the
    trained model.

    Input:
    Model : trained model implemented in keras
    Methods : list of interpretation methods (implemented in innvestigate) to apply to the model
    data : test data, not used for training the model
    X_train_blob : training data, only use for pattern.net and pattern.attribution
    normalize : whether to normalize the heatmaps to a [0, 1] or [-1, 1] (in case of gradient, pattern.net) range

    Output:
    dict_results : dictionary with methods as keys, containing heatmaps for each sample for each method
    """
    model = innvestigate.utils.model_wo_softmax(model)
    data = data.reshape(len(data), 64)

    dict_results = {}

    for method in methods:
        analyzer = innvestigate.create_analyzer(method[0], model, **method[1])
        if method[0] == 'pattern.net' or method[0] == 'pattern.attribution':
            analyzer.fit(X_train_blob)
            heatmaps = analyzer.analyze(data)
        else:
            heatmaps = analyzer.analyze(data)

        if normalize is True:
            if method[0] == 'pattern.net' or method[0] == 'gradient':
                heatmaps = np.array([
                    minmax_scale(heatmap.flatten(), feature_range=(-1, 1))
                    for heatmap in heatmaps
                ])
            else:
                heatmaps = np.array([
                    minmax_scale(heatmap.flatten(), feature_range=(0, 1))
                    for heatmap in heatmaps
                ])

        dict_results[method[0]] = heatmaps

    return dict_results
def pp(tk):
    xx=x[:15000]
    yy=y[:15000]
    analyzer = innvestigate.create_analyzer("lrp.z",model)
    logg=[]
    for i in range(len(xx)):
        a = analyzer.analyze(xx[i].reshape(-1,2))
        logg.append(a)
    gg=np.array(logg).reshape(-1,2)
    d3,_,_,_=norm(gg.reshape(-1,2))

    fig,axes=plt.subplots(nrows=2,ncols=2,figsize=(8,8),dpi=200)
    axes[0,0].scatter(xx[:,0],xx[:,1],c=d3[:,0],s=0.9)
    axes[0,1].scatter(xx[:,0],xx[:,1],c=d3[:,1],s=0.9)
    axes[1,0].scatter(yy[:,0],yy[:,1],c=d3[:,0],s=0.9)
    axes[1,1].scatter(yy[:,0],yy[:,1],c=d3[:,1],s=0.9)
    plt.savefig('1NN_hist_%d.png'%(tk))
 def _create_analyzer(analyzer_name):
     meta = methods_metadata[analyzer_name]
     if 'net_args' in meta:
         a_kwargs = {
             net_arg_name: net[net_arg_name]
             for net_arg_name in meta['net_args']
         }
     else:
         a_kwargs = {}
     if 'kwargs' in meta:
         a_kwargs.update(meta['kwargs'])
     if 'net_getter_kwargs' in meta:
         for arg_name in meta['net_getter_kwargs']:
             arg_func = meta['net_getter_kwargs'][arg_name]
             a_kwargs[arg_name] = arg_func(net)
     return innvestigate.create_analyzer(analyzer_name, model_wo_softmax,
                                         **a_kwargs)
def load_model_from_disk_into_cache(model_path):
    global model_cache
    print("Loading " + model_path + " from disk...")
    model_cache[model_path] = dict()
    model_cache[model_path]["mymodel"] = load_model(model_path)
    model_cache[model_path]["mymodel"].layers[
        -1].activation = tf.keras.activations.linear
    model_cache[model_path]["mymodel"].save('tmp_wo_softmax.hdf5')
    model_wo_softmax = load_model('tmp_wo_softmax.hdf5')
    os.remove('tmp_wo_softmax.hdf5')
    print("model_wo_softmax loaded.")
    print('Creating analyzer...')
    # create analyzer -> only one selected here!
    for method in methods:
        model_cache[model_path]["analyzer"] = innvestigate.create_analyzer(
            method[0], model_wo_softmax, **method[1])
    print('Analyzer created.')
    return model_cache[model_path]["mymodel"], model_cache[model_path][
        "analyzer"]
Exemple #23
0
    def analyze_concept(self,
                        cmodel,
                        idx,
                        img_pp,
                        rule="torch_lrp.sequential_preset_a",
                        params={}):
        _, s = cmodel.layers[-2].output_shape

        W = self.get_weights(s, idx)
        cmodel.get_layer("concept").set_weights([W])

        canalyzer = innvestigate.create_analyzer(rule,
                                                 cmodel,
                                                 **params,
                                                 neuron_selection_mode="index")

        R = canalyzer.analyze(img_pp, 0)
        R_dual = canalyzer.analyze(img_pp, 1)

        return R, R_dual
Exemple #24
0
def get_mask_stat(gen, gen_seg, model, batch_size=32):
    analyzer = innvestigate.create_analyzer("lrp.epsilon", model, epsilon=1)
    num_batches = math.ceil(gen.samples / batch_size)
    labels = []
    pred = []
    mask_values = []
    for i, ((images, y), (images_seg, _)) in enumerate(zip(gen, gen_seg)):
        if i >= num_batches:
            break
        prob = model.predict(images)
        analysis = analyzer.analyze(images)["input_layer"]
        mask = [
            mask_value(i_a, i, get_mask_of_seg_rgb(i_s))
            for i, i_a, i_s in zip(images, analysis, images_seg)
        ]
        p = prob.argmax(axis=1)
        pred.extend(p)
        labels.extend(y)
        mask_values.extend(mask)
    return np.array(mask_values), np.array(pred), np.array(labels)
Exemple #25
0
def generateHeatmap(model, x):
    model_noSoftMax = innvestigate.utils.model_wo_softmax(
        model)  # strip the softmax layer
    analyzer = innvestigate.create_analyzer("lrp.alpha_1_beta_0",
                                            model_noSoftMax)
    a = analyzer.analyze(x)
    # Aggregate along color channels and normalize to [-1, 1]
    a = a.sum(axis=np.argmax(np.asarray(a.shape) == 3))
    a /= np.max(np.abs(a))
    (h, w) = a[0].shape[:2]
    center = (w / 2, h / 2)
    # Plot
    M = cv2.getRotationMatrix2D(center, 0, 1)
    rotated270 = cv2.warpAffine(a[0], M, (h, w))
    flipped = cv2.flip(rotated270, 1)
    flipped = cv2.flip(flipped, 1)
    # plt.figure()
    # print(flipped)
    # plt.imshow(flipped, cmap="seismic", clim=(-1, 1))
    # plt.show()
    return flipped
Exemple #26
0
    def test(self):
        np.random.seed(234354346)
        model_class = base.mlp_2dense

        data = fetch_data()
        model, modelp = create_model(model_class)
        train_model(modelp, data, epochs=10)
        model.set_weights(modelp.get_weights())

        analyzer = innvestigate.create_analyzer("pattern.net",
                                                model,
                                                pattern_type="relu")
        analyzer.fit(data[0], batch_size=256, verbose=0)
        patterns = analyzer._patterns
        W, b = model.get_weights()[:2]
        W2D = W.reshape((-1, W.shape[-1]))
        X = data[0].reshape((data[0].shape[0], -1))
        Y = np.dot(X, W2D)

        mask = np.dot(X, W2D) + b > 0
        count = mask.sum(axis=0)

        def safe_divide(a, b):
            return a / (b + (b == 0))

        mean_x = safe_divide(np.dot(X.T, mask), count)
        mean_y = Y.mean(axis=0)
        mean_xy = safe_divide(np.dot(X.T, Y * mask), count)

        ExEy = mean_x * mean_y

        cov_xy = mean_xy - ExEy
        w_cov_xy = np.diag(np.dot(W2D.T, cov_xy))
        A = safe_divide(cov_xy, w_cov_xy[None, :])

        def allclose(a, b):
            return np.allclose(a, b, rtol=0.05, atol=0.05)

        # print(A.sum(), patterns[0].sum())
        self.assertTrue(allclose(A.ravel(), patterns[0].ravel()))
Exemple #27
0
    def __init__(self, model, data_loader, batch_size, label_to_class_name):
        """

        :param model: trained model
        :param data_loader: data loader object (image set, functions, etc.)
        :param batch_size: batch size to fit analyzer
        :param label_to_class_name: dictionary of label names
        """
        self.model = model
        self.data_loader = data_loader
        self.label_to_class_name = label_to_class_name
        self.methods = [
            # NAME, OPT.PARAMS, POSTPROC FUNC, TITLE
            ("input", {}, self.data_loader.preprocess[0], "Input"),

            # Signal
            ("deconvnet", {}, utils.bk_proj, "Deconvnet"),
            ("torch_lrp.z", {}, utils.heatmap, "LRP-Z"),
            ("torch_lrp.epsilon", {
                "epsilon": 1
            }, utils.heatmap, "LRP-Epsilon")
        ]

        # Create model without trailing softmax
        self.model_wo_softmax = iutils.keras.graph.model_wo_softmax(self.model)
        # Create analyzers.
        self.analyzers = []
        for method in self.methods:
            analyzer = innvestigate.create_analyzer(
                method[0],  # analysis method identifier
                self.model_wo_softmax,  # model without softmax output
                **method[1])  # optional analysis parameters

            # Some analyzers require training.
            analyzer.fit(self.data_loader.data[0],
                         batch_size=batch_size,
                         verbose=1)
            self.analyzers.append(analyzer)
Exemple #28
0
    # Code snippet.
    plt.imshow(image / 255)
    plt.axis('off')
    plt.savefig("readme_example_input.png")

    import innvestigate
    import innvestigate.utils
    import tensorflow.keras.applications.vgg16 as vgg16

    # Get model
    model, preprocess = vgg16.VGG16(), vgg16.preprocess_input
    # Strip softmax layer
    model = innvestigate.utils.model_wo_softmax(model)

    # Create analyzer
    analyzer = innvestigate.create_analyzer("deep_taylor", model)

    # Add batch axis and preprocess
    x = preprocess(image[None])
    # Apply analyzer w.r.t. maximum activated output-neuron
    a = analyzer.analyze(x)

    # Aggregate along color channels and normalize to [-1, 1]
    a = a.sum(axis=np.argmax(np.asarray(a.shape) == 3))
    a /= np.max(np.abs(a))
    # Plot
    plt.imshow(a[0], cmap="seismic", clim=(-1, 1))
    plt.axis('off')
    plt.savefig("readme_example_analysis.png")
    print('Test loss: %s\nTest accuracy: %s'%(str(score[0]), str(score[1])))

    weights = model_smax.get_weights()
    with h5py.File(fname, 'w') as fp:
        fp.update([('weights/%02d'%i,x) for i,x in enumerate(weights)])
else:
    with h5py.File(fname, 'r') as fp:
        weights = [arr[1][:] for arr in sorted(fp['weights'].items())]
model.set_weights(weights)

#analysis
from innvestigate import create_analyzer
from innvestigate.utils import BatchSequence
from innvestigate.tools.pattern import PatternComputer

analyzer = create_analyzer('pattern.attribution', model)
#analyzer.fit(x_train, pattern_type='relu', batch_size=bsize, verbose=1)
#rel = analyzer.analyze(x_test[:30])
bsize = 256
generator = BatchSequence(x_train, bsize)
computer = PatternComputer(model, pattern_type='relu')
patterns = computer.compute_generator(generator, keep_pattern_instances=True)

#comparison
import mxnet
from mxnet import nd
from ecGAN.net import mlp_3dense as mlp_3dense_mx
from ecGAN.explain.pattern.estimator import estimators

net_mx = mlp_3dense_mx(outnum=K, numhid=512, droprate=0.25, use_bias=True, patest={'relu': 'relu', 'out': 'linear'})
net_mx.collect_params().initialize()
            # x_flow += np.random.normal(size=x_flow.shape, scale=1) # 0.000001 does not work, but 1 does.
            kept_idxs = []

            ################# SPATIAL ####################

            print("Generating spatial analysis ...")

            input_range = (0,255)


            model_s = cnn_m_2048(inits_spatial)
            model_s.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
            model_s.build(input_shape=(n, 1, 224, 224))
            if abbr == '14f_sens' or abbr == '12f_sens':
                opt_params = {"postprocess": "abs"}
                analyzer = innvestigate.create_analyzer("gradient", model_s, **opt_params)
            elif abbr == '14f_dec' or abbr == '12f_dec':
                analyzer = innvestigate.create_analyzer("deconvnet", model_s)
            elif abbr == '14f_gui' or abbr == '12f_gui':
                analyzer = innvestigate.create_analyzer("guided_backprop", model_s)
            elif abbr == 'deep_taylor':
                opt_params = {"low": input_range[0], "high": input_range[1]}
                analyzer = innvestigate.create_analyzer("deep_taylor.bounded", model_s, **opt_params)
            else:
                print('wrong abbreviation')
                exit(1)
            analysis_s = analyzer.analyze(x)

            # print("analysis_s {}".format(analysis_s))
            analysis_s = np.squeeze(analysis_s)
# see https://github.com/albermax/innvestigate/blob/master/examples/notebooks/imagenet_compare_methods.ipynb for a list of alternative methods
methods = [  # tuple with method,     params,                  label
    #            ("deconvnet",            {},                      "Deconvnet"),
    #            ("guided_backprop",      {},                      "Guided Backprop"),
    #            ("deep_taylor.bounded",  {"low": -1, "high": 1},  "DeepTaylor"),
    #            ("input_t_gradient",     {},                      "Input * Gradient"),
    #            ("lrp.z",                {},                      "LRP-Z"),
    #            ("lrp.epsilon",          {"epsilon": 1},          "LRP-epsilon"),
    ("lrp.alpha_1_beta_0", {
        "neuron_selection_mode": "index"
    }, "LRP-alpha1beta0"),
]

# create analyzer -> only one selected here!
for method in methods:
    analyzer = innvestigate.create_analyzer(method[0], model_wo_softmax,
                                            **method[1])


# callback for a new subject being selected
def set_subject(subj_idx):
    global test_orig, test_img, pred, a  # define global variables to store subject data
    test_img = testdat[subj_idx]
    test_img = np.reshape(
        test_img, (1, ) + test_img.shape
    )  # add first subj index again to mimic original array structure
    test_orig = testdat_orig[subj_idx, :, :, :, 0]
    # evaluate/predict diag for selected subject
    pred = (mymodel.predict(test_img)[0, 1] * 100
            )  # scale probability score to percent
    # derive relevance map from CNN model
    a = analyzer.analyze(test_img, neuron_selection=1)