Exemplo n.º 1
0
def main():

    dataset = 'mnist'
    black_box = 'RF'
    gamma = None
    nbr_prototypes = 10
    nbr_criticisms = 10

    path = './'
    path_models = path + 'models/'

    black_box_filename = path_models + '%s_%s' % (dataset, black_box)

    _, _, X_test, Y_test, use_rgb = get_dataset(dataset)
    bb, transform = get_black_box(black_box, black_box_filename, use_rgb, return_model=True)
    bb_predict, _ = get_black_box(black_box, black_box_filename, use_rgb)

    i2e = 0
    img = X_test[i2e]
    Y_pred = bb_predict(X_test)
    bbo = Y_pred[i2e]

    # for label in np.unique(Y_pred):
    X_idx = np.where(Y_pred == bbo)[0]
    Z = transform(X_test[X_idx])
    scaler = MinMaxScaler()
    Z = scaler.fit_transform(Z)
    zimg = scaler.transform(transform(np.array([img])))
    dist = cdist(zimg.reshape(1, -1), Z)
    idx = np.argsort(dist)
    idx = np.array([X_idx[i] for i in idx])
    idx = idx[np.where(idx != i2e)]


    print(Y_pred[i2e])
    plt.imshow(X_test[i2e])
    plt.show()

    plt.imshow(X_test[idx[0]])
    plt.show()

    plt.imshow(X_test[idx[-1]])
    plt.show()
def main():

    ae_name = 'aae'

    for dataset in ['mnist', 'fashion', 'cifar10']:

        path = './'
        path_aemodels = path + 'aemodels/%s/%s/' % (dataset, ae_name)

        X_train, _, X_test, _, use_rgb = get_dataset(dataset)
        ae = get_autoencoder(X_test, ae_name, dataset, path_aemodels)
        ae.load_model()

        X_train_ae = ae.decode(ae.encode(X_train))
        X_test_ae = ae.decode(ae.encode(X_test))

        print('dataset: ', dataset)
        print('train rmse:', rmse(X_train, X_train_ae))
        print('test rmse:', rmse(X_test, X_test_ae))
        print('')
Exemplo n.º 3
0
def main():

    dataset = 'mnist'
    ae_name = 'aae'

    epochs = 10000
    batch_size = 256
    sample_interval = 200

    path = './'
    path_aemodels = path + 'aemodels/%s/%s/' % (dataset, ae_name)

    _, _, X_test, Y_test, use_rgb = get_dataset(dataset)
    ae = get_autoencoder(X_test, ae_name, dataset, path_aemodels)

    ae.fit(X_test,
           epochs=epochs,
           batch_size=batch_size,
           sample_interval=sample_interval)
    ae.save_model()
    ae.sample_images(epochs)
Exemplo n.º 4
0
def main():

    random_state = 0
    dataset = 'fashion'
    black_box = 'RF'
    print(dataset, black_box)

    path = './'
    path_models = path + 'models/'
    path_results = path + 'results/bb/'

    black_box_filename = path_models + '%s_%s' % (dataset, black_box)
    results_filename = path_results + '%s_%s.json' % (dataset, black_box)

    X_train, Y_train, X_test, Y_test, use_rgb = get_dataset(dataset)
    train_black_box(X_train, Y_train, dataset, black_box, black_box_filename,
                    use_rgb, random_state)
    bb_predict, bb_predict_proba = get_black_box(black_box, black_box_filename,
                                                 use_rgb)

    Y_pred = bb_predict(X_test)

    acc = accuracy_score(Y_test, Y_pred)
    cr = classification_report(Y_test, Y_pred)
    print('Accuracy: %.2f' % acc)
    print('Classification Report')
    print(cr)
    cr = classification_report(Y_test, Y_pred, output_dict=True)
    res = {
        'dataset': dataset,
        'black_box': black_box,
        'accuracy': acc,
        'report': cr
    }
    results = open(results_filename, 'w')
    results.write('%s\n' % json.dumps(res))
    results.close()
Exemplo n.º 5
0
def main():

    dataset = sys.argv[1]

    black_box = 'RF'
    neigh_type = 'hrgp'

    random_state = 0
    ae_name = 'aae'
    num_classes = 10

    nbr_experiments = 200

    if dataset not in ['mnist', 'cifar10', 'fashion']:
        print('unknown dataset %s' % dataset)
        return -1

    if black_box not in ['RF', 'AB', 'DNN']:
        print('unknown black box %s' % black_box)
        return -1

    if neigh_type not in ['rnd', 'gntp', 'hrgp']:
        print('unknown neigh type %s' % neigh_type)
        return -1

    path = './'
    path_models = path + 'models/'
    path_results = path + 'results/rel/'
    path_aemodels = path + 'aemodels/%s/%s/' % (dataset, ae_name)
    path_expl = './expl/'

    black_box_filename = path_models + '%s_%s' % (dataset, black_box)
    results_filename = path_results + 'rel_%s_%s_%s.json' % (dataset, black_box, neigh_type)
    expl_filename = path_expl + 'alore_%s_%s_%s.json.gz' % (dataset, black_box, neigh_type)

    _, _, X_test, Y_test, use_rgb = get_dataset(dataset)
    bb, transform = get_black_box(black_box, black_box_filename, use_rgb, return_model=True)
    bb_predict, bb_predict_proba = get_black_box(black_box, black_box_filename, use_rgb)
    ae = get_autoencoder(X_test, ae_name, dataset, path_aemodels)
    ae.load_model()

    class_name = 'class'
    class_values = ['%s' % i for i in range(len(np.unique(Y_test)))]

    explainer = ILOREM(bb_predict, class_name, class_values, neigh_type=neigh_type, use_prob=True, size=1000, ocr=0.1,
                       kernel_width=None, kernel=None, autoencoder=ae, use_rgb=use_rgb, valid_thr=0.5,
                       filter_crules=True, random_state=random_state, verbose=False, alpha1=0.5, alpha2=0.5,
                       metric=neuclidean, ngen=10, mutpb=0.2, cxpb=0.5, tournsize=3, halloffame_ratio=0.1,
                       bb_predict_proba=bb_predict_proba)

    errors = open(path_results + 'errors_relevancy_%s_%s.csv' % (dataset, black_box), 'w')

    for i2e in range(nbr_experiments):

        jrow_rel_o = {'i2e': i2e, 'dataset': dataset, 'black_box': black_box}

        img = X_test[i2e]
        bbo = bb_predict(np.array([img]))
        jrow_list = list()

        try:
            # Alore
            exp = explainer.explain_instance(img, num_samples=1000, use_weights=True, metric=neuclidean)
            _, diff = exp.get_image_rule(features=None, samples=100)
            relevancy = 1.0 - np.abs(diff - 127.5)/127.5

            for color in [0, 127, 255]:
                jrow_rel = copy.deepcopy(jrow_rel_o)
                jrow_rel['method'] = 'alore'
                jrow_rel['color'] = color
                jrow_rel = apply_relevancy(jrow_rel, img, bb_predict, bbo[0], relevancy, color)
                jrow_list.append(jrow_rel)
                print(datetime.datetime.now(),
                      '[%s/%s] %s %s %s %s - 25: %d, 50: %d, 75: %d' % (i2e, nbr_experiments, dataset, black_box,
                                                                        'alore', color, jrow_rel['color%s_c25' % color],
                                                                        jrow_rel['color%s_c50' % color], jrow_rel['color%s_c75' % color]))

            jrow_neigh = {'i2e': i2e, 'dataset': dataset, 'black_box': black_box, 'expl': diff,
                          'rule': str(exp.rule)}

            json_str = ('%s\n' % json.dumps(jrow_neigh, cls=NumpyEncoder)).encode('utf-8')
            with gzip.GzipFile(expl_filename, 'a') as fout:
                fout.write(json_str)

        except Exception:
            print('error instance to explain: %d' % i2e)
            errors.write('%d\n' % i2e)
            continue

        results = open(results_filename, 'a')
        for jrow in jrow_list:
            results.write('%s\n' % json.dumps(jrow))
        results.close()

    errors.close()
Exemplo n.º 6
0
def main():

    dataset = sys.argv[1]

    black_box = 'DNN'
    neigh_type = 'hrgp'

    if len(sys.argv) > 2:
        start_from = int(sys.argv[2])
    else:
        start_from = 0

    random_state = 0
    ae_name = 'aae'
    num_classes = 10

    nbr_experiments = 200

    if dataset not in ['mnist', 'cifar10', 'fashion']:
        print('unknown dataset %s' % dataset)
        return -1

    if black_box not in ['RF', 'AB', 'DNN']:
        print('unknown black box %s' % black_box)
        return -1

    if neigh_type not in ['rnd', 'gntp', 'hrgp']:
        print('unknown neigh type %s' % neigh_type)
        return -1

    path = './'
    path_models = path + 'models/'
    path_results = path + 'results/stability/'
    path_aemodels = path + 'aemodels/%s/%s/' % (dataset, ae_name)

    black_box_filename = path_models + '%s_%s' % (dataset, black_box)
    results_filename = path_results + 'sta_%s_%s_%s.json' % (
        dataset, black_box, neigh_type)

    _, _, X_test, Y_test, use_rgb = get_dataset(dataset)
    bb, transform = get_black_box(black_box,
                                  black_box_filename,
                                  use_rgb,
                                  return_model=True)
    bb_predict, bb_predict_proba = get_black_box(black_box, black_box_filename,
                                                 use_rgb)

    Y_pred = bb_predict(X_test)
    Y_pred_proba = bb_predict_proba(X_test)

    ae = get_autoencoder(X_test, ae_name, dataset, path_aemodels)
    ae.load_model()

    class_name = 'class'
    class_values = ['%s' % i for i in range(len(np.unique(Y_test)))]

    explainer = ILOREM(bb_predict,
                       class_name,
                       class_values,
                       neigh_type=neigh_type,
                       use_prob=True,
                       size=1000,
                       ocr=0.1,
                       kernel_width=None,
                       kernel=None,
                       autoencoder=ae,
                       use_rgb=use_rgb,
                       valid_thr=0.5,
                       filter_crules=True,
                       random_state=random_state,
                       verbose=False,
                       alpha1=0.5,
                       alpha2=0.5,
                       metric=neuclidean,
                       ngen=10,
                       mutpb=0.2,
                       cxpb=0.5,
                       tournsize=3,
                       halloffame_ratio=0.1,
                       bb_predict_proba=bb_predict_proba)

    lime_explainer = lime_image.LimeImageExplainer()
    segmenter = SegmentationAlgorithm('quickshift',
                                      kernel_size=1,
                                      max_dist=200,
                                      ratio=0.2)

    input_tensor = bb.layers[0].input
    last_layer = -2 if dataset == 'mnist' else -1
    bb_model = Model(inputs=input_tensor, outputs=bb.layers[last_layer].output)
    target_tensor = bb_model(input_tensor)
    de_list = ['grad*input', 'saliency', 'intgrad', 'elrp', 'occlusion']

    errors = open(
        path_results + 'errors_stability_%s_%s.csv' % (dataset, black_box),
        'w')

    with DeepExplain(session=K.get_session()) as de:

        for i2e in range(nbr_experiments):

            if i2e < start_from:
                continue

            try:

                print(
                    datetime.datetime.now(),
                    '[%s/%s] %s %s - checking stability' %
                    (i2e, nbr_experiments, dataset, black_box))

                expl_list = list()
                jrow_list = list()

                jrow_coh_o = {
                    'i2e': i2e,
                    'dataset': dataset,
                    'black_box': black_box
                }

                # Crate random noise
                img = X_test[i2e]
                bbo = bb_predict(np.array([img]))
                bbop = Y_pred_proba[i2e]
                X_random_noise = generate_random_noise(img,
                                                       bb_predict,
                                                       bbo[0],
                                                       nbr_samples=20)
                # Y_pred_random_noise = bb_predict(X_random_noise)
                Y_pred_proba_random_noise = bb_predict_proba(X_random_noise)

                # plt.subplot(1, 3, 1)
                # plt.imshow(X_random_noise[0], cmap='gray')
                # plt.subplot(1, 3, 2)
                # plt.imshow(X_random_noise[1], cmap='gray')
                # plt.subplot(1, 3, 3)
                # plt.imshow(X_random_noise[2], cmap='gray')
                # plt.show()

                # Alore
                print(datetime.datetime.now(), 'calculating alore')
                exp = explainer.explain_instance(img,
                                                 num_samples=1000,
                                                 use_weights=True,
                                                 metric=neuclidean)
                _, diff = exp.get_image_rule(features=None, samples=100)
                expl_list.append(diff)

                # Lime
                print(datetime.datetime.now(), 'calculating lime')
                exp = lime_explainer.explain_instance(
                    img,
                    bb_predict_proba,
                    top_labels=1,
                    hide_color=0,
                    num_samples=1000,
                    segmentation_fn=segmenter)
                _, mask = exp.get_image_and_mask(bbo[0],
                                                 positive_only=False,
                                                 num_features=5,
                                                 hide_rest=False,
                                                 min_weight=0.01)
                expl_list.append(mask)

                # Deep Explain
                xs = transform(np.array([img]))
                ys = to_categorical(bbo, num_classes)

                for det in de_list:
                    print(datetime.datetime.now(), 'calculating %s' % det)
                    if det == 'shapley_sampling':
                        maps = de.explain(det,
                                          target_tensor,
                                          input_tensor,
                                          xs,
                                          ys=ys,
                                          samples=10)[0]
                    else:
                        maps = de.explain(det,
                                          target_tensor,
                                          input_tensor,
                                          xs,
                                          ys=ys)[0]
                    maps = np.mean(maps, axis=2)
                    expl_list.append(maps)

                lipschitz_list = defaultdict(list)
                lipschitz_list_bb = defaultdict(list)

                print(datetime.datetime.now(), 'calculating lipschitz')
                for i2e1 in range(len(X_random_noise)):
                    img1 = X_random_noise[i2e1]
                    bbo1 = bb_predict(np.array([img1]))
                    bbop1 = Y_pred_proba_random_noise[i2e1]
                    norm_bb = calculate_lipschitz_factor(bbop, bbop1)
                    norm_x = calculate_lipschitz_factor(img, img1)

                    # Alore
                    exp1 = explainer.explain_instance(img1,
                                                      num_samples=1000,
                                                      use_weights=True,
                                                      metric=neuclidean)
                    _, diff1 = exp1.get_image_rule(features=None, samples=100)

                    norm_exp = calculate_lipschitz_factor(expl_list[0], diff1)
                    lipschitz_list['alore'].append(norm_exp / norm_x)
                    lipschitz_list_bb['alore'].append(norm_exp / norm_bb)
                    print(datetime.datetime.now(), '\talore',
                          norm_exp / norm_x)

                    # Lime
                    exp1 = lime_explainer.explain_instance(
                        img1,
                        bb_predict_proba,
                        top_labels=1,
                        hide_color=0,
                        num_samples=1000,
                        segmentation_fn=segmenter)
                    _, mask1 = exp1.get_image_and_mask(bbo[0],
                                                       positive_only=False,
                                                       num_features=5,
                                                       hide_rest=False,
                                                       min_weight=0.01)
                    norm_exp = calculate_lipschitz_factor(expl_list[1], mask1)
                    lipschitz_list['lime'].append(norm_exp / norm_x)
                    lipschitz_list_bb['lime'].append(norm_exp / norm_bb)
                    print(datetime.datetime.now(), '\tlime', norm_exp / norm_x)

                    # DeepExplain
                    xs1 = transform(np.array([img1]))
                    ys1 = to_categorical(bbo1, num_classes)

                    for i, det in enumerate(de_list):
                        if det == 'shapley_sampling':
                            maps1 = de.explain(det,
                                               target_tensor,
                                               input_tensor,
                                               xs1,
                                               ys=ys1,
                                               samples=10)[0]
                        else:
                            maps1 = de.explain(det,
                                               target_tensor,
                                               input_tensor,
                                               xs1,
                                               ys=ys1)[0]
                        maps1 = np.mean(maps1, axis=2)
                        norm_exp = calculate_lipschitz_factor(
                            expl_list[i + 2], maps1)
                        lipschitz_list[det].append(norm_exp / norm_x)
                        lipschitz_list_bb[det].append(norm_exp / norm_bb)
                        print(datetime.datetime.now(), '\t%s' % det,
                              norm_exp / norm_x)

                for k in lipschitz_list:
                    jrow_coh = copy.deepcopy(jrow_coh_o)
                    jrow_coh['method'] = k
                    jrow_coh['mean'] = float(np.nanmean(lipschitz_list[k]))
                    jrow_coh['std'] = float(np.nanstd(lipschitz_list[k]))
                    jrow_coh['max'] = float(np.nanmax(lipschitz_list[k]))
                    jrow_coh['mean_bb'] = float(
                        np.nanmean(lipschitz_list_bb[k]))
                    jrow_coh['std_bb'] = float(np.nanstd(lipschitz_list_bb[k]))
                    jrow_coh['max_bb'] = float(np.nanmax(lipschitz_list_bb[k]))
                    jrow_list.append(jrow_coh)
                    print(
                        datetime.datetime.now(),
                        '[%s/%s] %s %s %s - mean: %.3f, max: %.3f' %
                        (i2e, nbr_experiments, dataset, black_box, k,
                         jrow_coh['mean'], jrow_coh['max']))

            except Exception:
                print('error instance to explain: %d' % i2e)
                errors.write('%d\n' % i2e)
                continue

            results = open(results_filename, 'a')
            for jrow in jrow_list:
                results.write('%s\n' % json.dumps(jrow))
            results.close()

    errors.close()
Exemplo n.º 7
0
def main():

    dataset = sys.argv[1]
    black_box = sys.argv[2]
    neigh_type = sys.argv[3]

    # dataset = 'mnist'
    # black_box = 'DNN'
    # neigh_type = 'hrgp'
    max_nbr_exemplars = 128

    random_state = 0
    ae_name = 'aae'

    nbr_experiments = 200

    if dataset not in ['mnist', 'cifar10', 'fashion']:
        print('unknown dataset %s' % dataset)
        return -1

    if black_box not in ['RF', 'AB', 'DNN']:
        print('unknown black box %s' % black_box)
        return -1

    if neigh_type not in ['rnd', 'gntp', 'hrgp']:
        print('unknown neigh type %s' % neigh_type)
        return -1

    path = './'
    path_models = path + 'models/'
    path_results = path + 'results/validity/'
    path_aemodels = path + 'aemodels/%s/%s/' % (dataset, ae_name)

    black_box_filename = path_models + '%s_%s' % (dataset, black_box)
    results_filename = path_results + 'validthr_%s_%s_%s.json' % (
        dataset, black_box, neigh_type)

    _, _, X_test, Y_test, use_rgb = get_dataset(dataset)
    bb, transform = get_black_box(black_box,
                                  black_box_filename,
                                  use_rgb,
                                  return_model=True)
    bb_predict, bb_predict_proba = get_black_box(black_box, black_box_filename,
                                                 use_rgb)
    ae = get_autoencoder(X_test, ae_name, dataset, path_aemodels)
    ae.load_model()

    Y_pred = bb_predict(X_test)
    Y_pred_proba = bb_predict_proba(X_test)

    X_test_comp = X_test[nbr_experiments:]
    Y_pred_comp = Y_pred[nbr_experiments:]
    Y_pred_proba_comp = Y_pred_proba[nbr_experiments:]

    class_name = 'class'
    class_values = ['%s' % i for i in range(len(np.unique(Y_test)))]

    errors = open(
        path_results + 'errors_validity_%s_%s_%s.csv' %
        (dataset, black_box, neigh_type), 'w')

    for i2e in range(nbr_experiments):
        print(datetime.datetime.now(),
              '[%s/%s] %s %s' % (i2e, nbr_experiments, dataset, black_box))

        for valid_thr in np.arange(0.0, 1.0, 0.1):

            explainer = ILOREM(bb_predict,
                               class_name,
                               class_values,
                               neigh_type=neigh_type,
                               use_prob=True,
                               size=1000,
                               ocr=0.1,
                               kernel_width=None,
                               kernel=None,
                               autoencoder=ae,
                               use_rgb=use_rgb,
                               valid_thr=valid_thr,
                               filter_crules=True,
                               random_state=random_state,
                               verbose=False,
                               alpha1=0.5,
                               alpha2=0.5,
                               metric=neuclidean,
                               ngen=10,
                               mutpb=0.2,
                               cxpb=0.5,
                               tournsize=3,
                               halloffame_ratio=0.1,
                               bb_predict_proba=bb_predict_proba)

            try:

                jrow_o = {
                    'i2e': i2e,
                    'dataset': dataset,
                    'black_box': black_box,
                    'neigh_type': neigh_type,
                    'valid_thr': valid_thr
                }
                jrow_list = list()

                img = X_test[i2e]
                bbo = bb_predict(np.array([img]))

                start_time = datetime.datetime.now()

                print(datetime.datetime.now(), '\textract explanation',
                      valid_thr)
                exp = explainer.explain_instance(img,
                                                 num_samples=1000,
                                                 use_weights=True,
                                                 metric=neuclidean)

                run_time = (datetime.datetime.now() -
                            start_time).total_seconds()
                jrow_o['time'] = run_time

                fidelity = exp.fidelity
                jrow_o['fidelity'] = fidelity

                print(datetime.datetime.now(), '\tcalculate compactness',
                      valid_thr)
                res_compactness = get_compactness(img, exp, black_box,
                                                  transform)
                compact, compact_var, lcompact, lcompact_var, img_cdist, rdist = res_compactness
                jrow_o['compactness'] = compact
                jrow_o['compactness_var'] = compact_var
                jrow_o['lcompactness'] = lcompact
                jrow_o['lcompactness_var'] = lcompact_var

                print(datetime.datetime.now(), '\tcalculate plausibility',
                      valid_thr)
                plausibility = get_plausibility(img_cdist, black_box,
                                                transform, X_test, rdist)
                for i, p in enumerate(plausibility):
                    jrow_o['plausibility%d' % i] = p

                print(datetime.datetime.now(), '\tcalculate saliency map',
                      valid_thr)
                _, diff = exp.get_image_rule(features=None, samples=100)

                print(datetime.datetime.now(), '\tcalculate relevancy',
                      valid_thr)
                relevancy = 1.0 - np.abs(diff - 127.5) / 127.5
                for color in [0, 127, 255]:
                    jrow_o = apply_relevancy(jrow_o, img, bb_predict, bbo[0],
                                             relevancy, color)

                print(datetime.datetime.now(), '\tcalculate coherence',
                      valid_thr)
                coherence_lipschitz, coherence_lipschitz_bb = get_lipswhitz_coherence(
                    img, i2e, bb_predict, X_test_comp, Y_pred_comp,
                    Y_pred_proba, Y_pred_proba_comp, diff, explainer)
                jrow_o['coherence_mean'] = float(
                    np.nanmean(coherence_lipschitz))
                jrow_o['coherence_std'] = float(np.nanstd(coherence_lipschitz))
                jrow_o['coherence_max'] = float(np.nanmax(coherence_lipschitz))
                jrow_o['coherence_mean_bb'] = float(
                    np.nanmean(coherence_lipschitz_bb))
                jrow_o['coherence_std_bb'] = float(
                    np.nanstd(coherence_lipschitz_bb))
                jrow_o['coherence_max_bb'] = float(
                    np.nanmax(coherence_lipschitz_bb))

                print(datetime.datetime.now(), '\tcalculate stability',
                      valid_thr)
                stability_lipschitz, stability_lipschitz_bb = get_lipswhitz_stability(
                    img, i2e, bb_predict, X_test_comp, Y_pred_proba,
                    Y_pred_proba_comp, diff, explainer)

                jrow_o['stability_mean'] = float(
                    np.nanmean(stability_lipschitz))
                jrow_o['stability_std'] = float(np.nanstd(stability_lipschitz))
                jrow_o['stability_max'] = float(np.nanmax(stability_lipschitz))
                jrow_o['stability_mean_bb'] = float(
                    np.nanmean(stability_lipschitz_bb))
                jrow_o['stability_std_bb'] = float(
                    np.nanstd(stability_lipschitz_bb))
                jrow_o['stability_max_bb'] = float(
                    np.nanmax(stability_lipschitz_bb))

                print(datetime.datetime.now(), '\tcalculate knn', valid_thr)
                exemplars = exp.get_prototypes_respecting_rule(
                    num_prototypes=max_nbr_exemplars)
                cexemplars = exp.get_counterfactual_prototypes(eps=0.01)
                if len(cexemplars) < max_nbr_exemplars:
                    cexemplars2 = exp.get_prototypes_not_respecting_rule(
                        num_prototypes=max_nbr_exemplars - len(cexemplars))
                    cexemplars.extend(cexemplars2)

                X_test_knn, Y_test_knn = prepare_test_for_knn(
                    bbo, X_test_comp, Y_pred_comp, bb_predict, 200)
                for nbr_exemplars in [1, 2, 4, 8, 16, 32, 64, 128]:
                    jrow_e = copy.deepcopy(jrow_o)
                    jrow_e['nbr_exemplars'] = nbr_exemplars

                    X_train_knn, Y_train_knn = prepare_data_for_knn(
                        exemplars[:nbr_exemplars], cexemplars[:nbr_exemplars],
                        bb_predict(np.array(exemplars[:nbr_exemplars])),
                        bb_predict(np.array(cexemplars[:nbr_exemplars])))
                    for k in range(1, min(nbr_exemplars + 1, 11)):
                        acc = evaluate_with_knn(X_train_knn, Y_train_knn,
                                                X_test_knn, Y_test_knn, k)
                        jrow = copy.deepcopy(jrow_e)
                        jrow['k'] = k
                        jrow['accuracy'] = acc
                        jrow_list.append(jrow)

                results = open(results_filename, 'a')
                for jrow in jrow_list:
                    results.write('%s\n' % json.dumps(jrow))
                results.close()

            except Exception:
                print('error instance to explain: %d' % i2e)
                errors.write('%d\n' % i2e)
                continue

    errors.close()
Exemplo n.º 8
0
def main():

    dataset = 'mnist'
    black_box = 'DNN'
    num_classes = 10

    path = './'
    path_models = path + 'models/'

    black_box_filename = path_models + '%s_%s' % (dataset, black_box)

    _, _, X_test, Y_test, use_rgb = get_dataset(dataset)
    bb, transform = get_black_box(black_box,
                                  black_box_filename,
                                  use_rgb,
                                  return_model=True)
    bb_predict, _ = get_black_box(black_box, black_box_filename, use_rgb)

    i2e = 1
    img = X_test[i2e]
    bbo = bb_predict(np.array([img]))

    with DeepExplain(
            session=K.get_session()) as de:  # <-- init DeepExplain context
        # Need to reconstruct the graph in DeepExplain context, using the same weights.
        # With Keras this is very easy:
        # 1. Get the input tensor to the original model
        input_tensor = bb.layers[0].input
        # print(input_tensor)

        # 2. We now target the output of the last dense layer (pre-softmax)
        # To do so, create a new model sharing the same layers untill the last dense (index -2)
        fModel = Model(inputs=input_tensor, outputs=bb.layers[-2].output)
        target_tensor = fModel(input_tensor)
        # print(target_tensor)

        # print(fModel.summary())

        xs = transform(np.array([img]))
        xs = xs.astype(float)
        print(xs.shape, xs.dtype)
        # xs = X_test[0:10]
        # xs = np.array([rgb2gray(x) for x in xs])
        ys = to_categorical(bbo, num_classes)
        print(len(xs), len(ys), xs.shape, ys.shape)

        attributions = de.explain('grad*input',
                                  target_tensor,
                                  input_tensor,
                                  xs,
                                  ys=ys)
        # attributions = de.explain('saliency', target_tensor, input_tensor, xs, ys=ys)
        # attributions = de.explain('intgrad', target_tensor, input_tensor, xs, ys=ys)
        # attributions    = de.explain('deeplift', target_tensor, input_tensor, xs, ys=ys)
        # attributions  = de.explain('elrp', target_tensor, input_tensor, xs, ys=ys)
        # attributions   = de.explain('occlusion', target_tensor, input_tensor, xs, ys=ys)

        # Compare Gradient * Input with approximate Shapley Values
        # Note1: Shapley Value sampling with 100 samples per feature (78400 runs) takes a couple of minutes on a GPU.
        # Note2: 100 samples are not enough for convergence, the result might be affected by sampling variance
        # attributions = de.explain('shapley_sampling', target_tensor, input_tensor, xs, ys=ys, samples=10)

    # print(attributions_gradin)
    # print(attributions_sal)
    # print(attributions_sal.shape)
    plot(attributions[0], xi=xs[0], cmap=plt.cm.BrBG)
    plt.show()
def main():

    dataset = sys.argv[1]

    # dataset = 'mnist'

    if len(sys.argv) > 2:
        start_from = int(sys.argv[2])
    else:
        start_from = 0


    black_box = 'DNN'
    neigh_type = 'hrgp'
    max_nbr_exemplars = 128

    random_state = 0
    ae_name = 'aae'
    gamma = None

    nbr_experiments = 200

    if dataset not in ['mnist', 'cifar10', 'fashion']:
        print('unknown dataset %s' % dataset)
        return -1

    if black_box not in ['RF', 'AB', 'DNN']:
        print('unknown black box %s' % black_box)
        return -1

    if neigh_type not in ['rnd', 'gntp', 'hrgp']:
        print('unknown neigh type %s' % neigh_type)
        return -1

    path = './'
    path_models = path + 'models/'
    path_results = path + 'results/expcl/'
    path_aemodels = path + 'aemodels/%s/%s/' % (dataset, ae_name)

    black_box_filename = path_models + '%s_%s' % (dataset, black_box)
    results_filename = path_results + 'expcl_%s_%s_%s.json' % (dataset, black_box, neigh_type)

    _, _, X_test, Y_test, use_rgb = get_dataset(dataset)
    bb, transform = get_black_box(black_box, black_box_filename, use_rgb, return_model=True)
    bb_predict, bb_predict_proba = get_black_box(black_box, black_box_filename, use_rgb)

    Y_pred = bb_predict(X_test)

    X_test_comp = X_test[nbr_experiments:]
    Y_pred_comp = Y_pred[nbr_experiments:]
    X = np.array([x.ravel() for x in transform(X_test_comp)])

    ae = get_autoencoder(X_test, ae_name, dataset, path_aemodels)
    ae.load_model()

    class_name = 'class'
    class_values = ['%s' % i for i in range(len(np.unique(Y_test)))]

    explainer = ILOREM(bb_predict, class_name, class_values, neigh_type=neigh_type, use_prob=True, size=1000, ocr=0.1,
                       kernel_width=None, kernel=None, autoencoder=ae, use_rgb=use_rgb, valid_thr=0.5,
                       filter_crules=True, random_state=random_state, verbose=False, alpha1=0.5, alpha2=0.5,
                       metric=neuclidean, ngen=10, mutpb=0.2, cxpb=0.5, tournsize=3, halloffame_ratio=0.1,
                       bb_predict_proba=bb_predict_proba)

    errors = open(path_results + 'errors_expcl_%s_%s.csv' % (dataset, black_box), 'w')

    for i2e in range(nbr_experiments):

        if i2e < start_from:
            continue

        print(datetime.datetime.now(), '[%s/%s] %s %s' % (i2e, nbr_experiments, dataset, black_box))

        try:

            img = X_test[i2e]
            bbo = bb_predict(np.array([img]))
            jrow_o = {'i2e': i2e, 'dataset': dataset, 'black_box': black_box}
            jrow_list = list()

            X_test_knn, Y_test_knn = prepare_test_for_knn(bbo, X_test_comp, Y_pred_comp, bb_predict, 200)

            # Alore
            print(datetime.datetime.now(), 'alore')
            exp = explainer.explain_instance(img, num_samples=1000, use_weights=True, metric=neuclidean)

            # MMD
            # kernel = rbf_kernel(X, gamma=gamma)
            print(datetime.datetime.now(), 'mmd')
            kernel = calculate_kernel_individual(X, Y_pred_comp, gamma)

            # K-Medoids
            print(datetime.datetime.now(), 'k-medoids')
            X_idx = np.where(Y_pred_comp == bbo)[0]
            Z = X[X_idx]
            scaler = MinMaxScaler()
            Z = scaler.fit_transform(Z)
            zimg = scaler.transform(transform(np.array([img])).ravel().reshape(1, -1))
            dist = cdist(zimg.reshape(1, -1), Z)
            idx_p = np.argsort(dist)
            idx_p = np.array([X_idx[i] for i in idx_p])
            idx_p = idx_p[np.where(idx_p != i2e)]

            X_idx = np.where(Y_pred_comp != bbo)[0]
            Z = X[X_idx]
            scaler = MinMaxScaler()
            Z = scaler.fit_transform(Z)
            zimg = scaler.transform(transform(np.array([img])).ravel().reshape(1, -1))
            dist = cdist(zimg.reshape(1, -1), Z)
            idx_n = np.argsort(dist)
            idx_n = np.array([X_idx[i] for i in idx_n])
            idx_n = idx_n[np.where(idx_n != i2e)]

            # Alore
            print(datetime.datetime.now(), 'alore - generate exemplars')
            alore_exemplars = exp.get_prototypes_respecting_rule(num_prototypes=max_nbr_exemplars)
            alore_cexemplars = exp.get_counterfactual_prototypes(eps=0.01)
            if len(alore_cexemplars) < max_nbr_exemplars:
                cexemplars2 = exp.get_prototypes_not_respecting_rule(num_prototypes=max_nbr_exemplars - len(alore_cexemplars))
                alore_cexemplars.extend(cexemplars2)

            # MMD
            print(datetime.datetime.now(), 'mmd - select prototypes')
            proto_idx = greedy_select_protos(kernel, np.array(range(np.shape(kernel)[0])),
                                             nbr_prototypes=max_nbr_exemplars)
            crit_idx = select_criticism_regularized(kernel, proto_idx, nbr_criticisms=max_nbr_exemplars,
                                                    is_K_sparse=False, reg='logdet')

            proto_idx_sel = [i for i in proto_idx if Y_pred_comp[i] == bbo[0]][:max_nbr_exemplars]
            crit_idx_sel = [i for i in crit_idx if Y_pred_comp[i] != bbo[0]][:max_nbr_exemplars]
            mmd_exemplars = X_test_comp[proto_idx_sel]
            mmd_cexemplars = X_test_comp[crit_idx_sel]

            # K-Medoids
            print(datetime.datetime.now(), 'k-medoids - select prototypes')
            kmedoids_exemplars = X_test_comp[idx_p[:max_nbr_exemplars]]
            kmedoids_cexemplars = X_test_comp[idx_n[:max_nbr_exemplars]]

            for nbr_exemplars in [1, 2, 4, 8, 16, 32, 64, 128]:

                jrow_e = copy.deepcopy(jrow_o)
                jrow_e['nbr_exemplars'] = nbr_exemplars

                X_train_knn, Y_train_knn = prepare_data_for_knn(alore_exemplars[:nbr_exemplars],
                                                                alore_cexemplars[:nbr_exemplars],
                                                                bb_predict(np.array(alore_exemplars[:nbr_exemplars])),
                                                                bb_predict(np.array(alore_cexemplars[:nbr_exemplars])))

                for k in range(1, min(nbr_exemplars + 1, 11)):
                    jrow = copy.deepcopy(jrow_e)
                    acc = evaluate_with_knn(X_train_knn, Y_train_knn, X_test_knn, Y_test_knn, k)
                    jrow['method'] = 'alore'
                    jrow['k'] = k
                    jrow['accuracy'] = acc
                    jrow_list.append(jrow)
                    print(datetime.datetime.now(),
                          '[%s/%s] %s %s %s - alore: %.3f' % (i2e, nbr_experiments, dataset, black_box, k, acc))

                X_train_knn, Y_train_knn = prepare_data_for_knn(mmd_exemplars[:nbr_exemplars],
                                                                mmd_cexemplars[:nbr_exemplars],
                                                                bb_predict(np.array(mmd_exemplars[:nbr_exemplars])),
                                                                bb_predict(np.array(mmd_cexemplars[:nbr_exemplars])))

                for k in range(1, min(nbr_exemplars + 1, 11)):
                    jrow = copy.deepcopy(jrow_e)
                    acc = evaluate_with_knn(X_train_knn, Y_train_knn, X_test_knn, Y_test_knn, k)
                    jrow['method'] = 'mmd'
                    jrow['k'] = k
                    jrow['accuracy'] = acc
                    jrow_list.append(jrow)
                    print(datetime.datetime.now(),
                          '[%s/%s] %s %s %s - mmd: %.3f' % (i2e, nbr_experiments, dataset, black_box, k, acc))

                X_train_knn, Y_train_knn = prepare_data_for_knn(kmedoids_exemplars[:nbr_exemplars],
                                                                kmedoids_cexemplars[:nbr_exemplars],
                                                                bb_predict(np.array(kmedoids_exemplars[:nbr_exemplars])),
                                                                bb_predict(np.array(kmedoids_cexemplars[:nbr_exemplars])))

                for k in range(1, min(nbr_exemplars + 1, 11)):
                    jrow = copy.deepcopy(jrow_e)
                    acc = evaluate_with_knn(X_train_knn, Y_train_knn, X_test_knn, Y_test_knn, k)
                    jrow['method'] = 'k-medoids'
                    jrow['k'] = k
                    jrow['accuracy'] = acc
                    jrow_list.append(jrow)
                    print(datetime.datetime.now(),
                          '[%s/%s] %s %s %s - k-medoids: %.3f' % (i2e, nbr_experiments, dataset, black_box, k, acc))

        except Exception:
            print('error instance to explain: %d' % i2e)
            errors.write('%d\n' % i2e)
            continue

        results = open(results_filename, 'a')
        for jrow in jrow_list:
            results.write('%s\n' % json.dumps(jrow))
        results.close()

    errors.close()
Exemplo n.º 10
0
def main():

    dataset = sys.argv[1]

    # dataset = 'mnist'

    black_box = 'RF'
    neigh_type = 'hrgp'

    random_state = 0
    ae_name = 'aae'
    num_classes = 10

    nbr_experiments = 200

    if dataset not in ['mnist', 'cifar10', 'fashion']:
        print('unknown dataset %s' % dataset)
        return -1

    if black_box not in ['RF', 'AB', 'DNN']:
        print('unknown black box %s' % black_box)
        return -1

    if neigh_type not in ['rnd', 'gntp', 'hrgp']:
        print('unknown neigh type %s' % neigh_type)
        return -1

    path = './'
    path_models = path + 'models/'
    path_results = path + 'results/coherence/'
    path_aemodels = path + 'aemodels/%s/%s/' % (dataset, ae_name)

    black_box_filename = path_models + '%s_%s' % (dataset, black_box)
    results_filename = path_results + 'coh_%s_%s_%s.json' % (
        dataset, black_box, neigh_type)

    _, _, X_test, Y_test, use_rgb = get_dataset(dataset)
    bb, transform = get_black_box(black_box,
                                  black_box_filename,
                                  use_rgb,
                                  return_model=True)
    bb_predict, bb_predict_proba = get_black_box(black_box, black_box_filename,
                                                 use_rgb)

    Y_pred = bb_predict(X_test)
    Y_pred_proba = bb_predict_proba(X_test)

    X_test_comp = X_test[nbr_experiments:]
    Y_pred_comp = Y_pred[nbr_experiments:]
    Y_pred_proba_comp = Y_pred_proba[nbr_experiments:]

    ae = get_autoencoder(X_test, ae_name, dataset, path_aemodels)
    ae.load_model()

    class_name = 'class'
    class_values = ['%s' % i for i in range(len(np.unique(Y_test)))]

    explainer = ILOREM(bb_predict,
                       class_name,
                       class_values,
                       neigh_type=neigh_type,
                       use_prob=True,
                       size=1000,
                       ocr=0.1,
                       kernel_width=None,
                       kernel=None,
                       autoencoder=ae,
                       use_rgb=use_rgb,
                       valid_thr=0.5,
                       filter_crules=True,
                       random_state=random_state,
                       verbose=False,
                       alpha1=0.5,
                       alpha2=0.5,
                       metric=neuclidean,
                       ngen=10,
                       mutpb=0.2,
                       cxpb=0.5,
                       tournsize=3,
                       halloffame_ratio=0.1,
                       bb_predict_proba=bb_predict_proba)

    lime_explainer = lime_image.LimeImageExplainer()
    segmenter = SegmentationAlgorithm('quickshift',
                                      kernel_size=1,
                                      max_dist=200,
                                      ratio=0.2)

    errors = open(
        path_results + 'errors_coehrence_%s_%s.csv' % (dataset, black_box),
        'w')

    for i2e in range(nbr_experiments):

        try:

            expl_list = list()
            jrow_list = list()

            jrow_coh_o = {
                'i2e': i2e,
                'dataset': dataset,
                'black_box': black_box
            }

            # Finding Lipswhitz neighborhood
            img = X_test[i2e]
            bbo = bb_predict(np.array([img]))
            bbop = Y_pred_proba[i2e]

            X_idx = np.where(Y_pred_comp == bbo[0])[0]

            scaler = MinMaxScaler()
            x0 = scaler.fit_transform(img.ravel().reshape(-1, 1))
            Xj = scaler.fit_transform([x.ravel() for x in X_test_comp[X_idx]])
            dist = cdist(x0.reshape(1, -1), Xj)[0]
            eps = np.percentile(dist, 5)
            X_idx_eps = X_idx[np.where(dist <= eps)]

            # Alore
            exp = explainer.explain_instance(img,
                                             num_samples=1000,
                                             use_weights=True,
                                             metric=neuclidean)
            _, diff = exp.get_image_rule(features=None, samples=100)
            expl_list.append(diff)

            # Lime
            exp = lime_explainer.explain_instance(img,
                                                  bb_predict_proba,
                                                  top_labels=1,
                                                  hide_color=0,
                                                  num_samples=1000,
                                                  segmentation_fn=segmenter)
            _, mask = exp.get_image_and_mask(bbo[0],
                                             positive_only=False,
                                             num_features=5,
                                             hide_rest=False,
                                             min_weight=0.01)
            expl_list.append(mask)

            lipschitz_list = defaultdict(list)
            lipschitz_list_bb = defaultdict(list)

            print(
                datetime.datetime.now(), '[%s/%s] %s %s - checking coherence' %
                (i2e, nbr_experiments, dataset, black_box))

            for i2e1 in X_idx_eps[:20]:
                img1 = X_test_comp[i2e1]
                bbo1 = bb_predict(np.array([img1]))
                bbop1 = Y_pred_proba_comp[i2e1]
                norm_bb = calculate_lipschitz_factor(bbop, bbop1)
                norm_x = calculate_lipschitz_factor(img, img1)

                # Alore
                exp1 = explainer.explain_instance(img1,
                                                  num_samples=1000,
                                                  use_weights=True,
                                                  metric=neuclidean)
                _, diff1 = exp1.get_image_rule(features=None, samples=100)

                norm_exp = calculate_lipschitz_factor(expl_list[0], diff1)
                lipschitz_list['alore'].append(norm_exp / norm_x)
                lipschitz_list_bb['alore'].append(norm_exp / norm_bb)
                print(datetime.datetime.now(), '\talore', norm_exp / norm_x)

                # Lime
                exp1 = lime_explainer.explain_instance(
                    img1,
                    bb_predict_proba,
                    top_labels=1,
                    hide_color=0,
                    num_samples=1000,
                    segmentation_fn=segmenter)
                _, mask1 = exp1.get_image_and_mask(bbo[0],
                                                   positive_only=False,
                                                   num_features=5,
                                                   hide_rest=False,
                                                   min_weight=0.01)
                norm_exp = calculate_lipschitz_factor(expl_list[1], mask1)
                lipschitz_list['lime'].append(norm_exp / norm_x)
                lipschitz_list_bb['lime'].append(norm_exp / norm_bb)
                print(datetime.datetime.now(), '\tlime', norm_exp / norm_x)

            for k in lipschitz_list:
                jrow_coh = copy.deepcopy(jrow_coh_o)
                jrow_coh['method'] = k
                jrow_coh['mean'] = float(np.nanmean(lipschitz_list[k]))
                jrow_coh['std'] = float(np.nanstd(lipschitz_list[k]))
                jrow_coh['max'] = float(np.nanmax(lipschitz_list[k]))
                jrow_coh['mean_bb'] = float(np.nanmean(lipschitz_list_bb[k]))
                jrow_coh['std_bb'] = float(np.nanstd(lipschitz_list_bb[k]))
                jrow_coh['max_bb'] = float(np.nanmax(lipschitz_list_bb[k]))
                jrow_list.append(jrow_coh)
                print(
                    datetime.datetime.now(),
                    '[%s/%s] %s %s %s - mean: %.3f, max: %.3f' %
                    (i2e, nbr_experiments, dataset, black_box, k,
                     jrow_coh['mean'], jrow_coh['max']))

        except Exception:
            print('error instance to explain: %d' % i2e)
            errors.write('%d\n' % i2e)
            continue

        results = open(results_filename, 'a')
        for jrow in jrow_list:
            results.write('%s\n' % json.dumps(jrow))
        results.close()
Exemplo n.º 11
0
def main():

    dataset = sys.argv[1]
    black_box = sys.argv[2]

    # dataset = 'mnist'
    # black_box = 'RF'

    nbr_experiments = 200

    if dataset not in ['mnist', 'cifar10', 'fashion']:
        print('unknown dataset %s' % dataset)
        return -1

    if black_box not in ['RF', 'AB', 'DNN']:
        print('unknown black box %s' % black_box)
        return -1

    path = './'
    path_models = path + 'models/'
    path_results = path + 'results/fcp/'
    path_neigh = './neigh/'

    black_box_filename = path_models + '%s_%s' % (dataset, black_box)
    results_filename = path_results + 'lime_p_%s_%s.json' % (dataset,
                                                             black_box)
    neigh_filename = path_neigh + 'lime_%s_%s.json' % (dataset, black_box)

    _, _, X_test, Y_test, use_rgb = get_dataset(dataset)
    bb, transform = get_black_box(black_box,
                                  black_box_filename,
                                  use_rgb,
                                  return_model=True)
    bb_predict, bb_predict_proba = get_black_box(black_box, black_box_filename,
                                                 use_rgb)

    lime_explainer = lime_image.LimeImageExplainer()
    segmenter = SegmentationAlgorithm('quickshift',
                                      kernel_size=1,
                                      max_dist=200,
                                      ratio=0.2)

    for i2e in range(nbr_experiments):
        img = X_test[i2e]

        start_time = datetime.datetime.now()
        exp = lime_explainer.explain_instance(img,
                                              bb_predict_proba,
                                              top_labels=1,
                                              hide_color=0,
                                              num_samples=1000,
                                              segmentation_fn=segmenter)
        run_time = (datetime.datetime.now() - start_time).total_seconds()

        label = bb_predict(np.array([X_test[i2e]]))[0]

        bb_probs = lime_explainer.Zl[:, label]
        lr_probs = lime_explainer.lr.predict(lime_explainer.Zlr)

        fidelity = 1 - np.sum(
            np.abs(bb_probs - lr_probs) < 0.01) / len(bb_probs)

        img_cdist = transform(np.array([img]))
        Z_cdist = transform(lime_explainer.Z)

        if black_box == 'DNN':
            img_cdist = np.array([x.ravel() for x in img_cdist])
            Z_cdist = np.array([x.ravel() for x in Z_cdist])

        rdist = cdist(img_cdist, Z_cdist, metric='euclidean')
        compact, compact_var = float(np.mean(rdist)), float(np.std(rdist))

        sdist = pairwise_distances(lime_explainer.Zlr,
                                   np.array([lime_explainer.Zlr[0]]),
                                   metric='cosine').ravel()
        lcompact, lcompact_var = float(np.mean(sdist)), float(np.std(sdist))

        X_test_cdist = transform(X_test)
        if black_box == 'DNN':
            X_test_cdist = np.array([x.ravel() for x in X_test_cdist])

        dist = cdist(img_cdist, X_test_cdist, metric='euclidean')
        nbr_real_instances = len(X_test)
        plausibility = calculate_plausibilities(rdist, dist,
                                                nbr_real_instances)

        print(
            datetime.datetime.now(),
            '[%s/%s] %s %s - f: %.2f, c: %.2f, lc: %.2f, p: %.2f' %
            (i2e, nbr_experiments, dataset, black_box, fidelity, compact,
             lcompact, plausibility[-2]))

        Z = lime_explainer.Z
        Zl = lime_explainer.Zlr

        store_fcpn(i2e, results_filename, neigh_filename, dataset, black_box,
                   fidelity, compact, compact_var, lcompact, lcompact_var,
                   plausibility, run_time, Z, Zl, 'rnd')
Exemplo n.º 12
0
def main():

    random_state = 0
    dataset = 'fashion'
    black_box = 'RF'

    ae_name = 'aae'

    path = './'
    path_models = path + 'models/'
    path_aemodels = path + 'aemodels/%s/%s/' % (dataset, ae_name)

    black_box_filename = path_models + '%s_%s' % (dataset, black_box)

    _, _, X_test, Y_test, use_rgb = get_dataset(dataset)
    bb_predict, bb_predict_proba = get_black_box(black_box, black_box_filename,
                                                 use_rgb)
    ae = get_autoencoder(X_test, ae_name, dataset, path_aemodels)
    ae.load_model()

    class_name = 'class'
    class_values = ['%s' % i for i in range(len(np.unique(Y_test)))]

    i2e = 10
    img = X_test[i2e]

    explainer = ILOREM(bb_predict,
                       class_name,
                       class_values,
                       neigh_type='rnd',
                       use_prob=True,
                       size=1000,
                       ocr=0.1,
                       kernel_width=None,
                       kernel=None,
                       autoencoder=ae,
                       use_rgb=use_rgb,
                       valid_thr=0.5,
                       filter_crules=True,
                       random_state=random_state,
                       verbose=True,
                       alpha1=0.5,
                       alpha2=0.5,
                       metric=neuclidean,
                       ngen=10,
                       mutpb=0.2,
                       cxpb=0.5,
                       tournsize=3,
                       halloffame_ratio=0.1,
                       bb_predict_proba=bb_predict_proba)

    exp = explainer.explain_instance(img,
                                     num_samples=1000,
                                     use_weights=True,
                                     metric=neuclidean)

    print('e = {\n\tr = %s\n\tc = %s    \n}' % (exp.rstr(), exp.cstr()))
    print(exp.bb_pred, exp.dt_pred, exp.fidelity)
    print(exp.limg)

    img2show, mask = exp.get_image_rule(features=None, samples=10)
    if use_rgb:
        plt.imshow(img2show, cmap='gray')
    else:
        plt.imshow(img2show)
    bbo = bb_predict(np.array([img2show]))[0]
    plt.title('image to explain - black box %s' % bbo)
    plt.show()

    # if use_rgb:
    #     plt.imshow(img2show, cmap='gray')
    # else:
    #     plt.imshow(img2show)

    dx, dy = 0.05, 0.05
    xx = np.arange(0.0, img2show.shape[1], dx)
    yy = np.arange(0.0, img2show.shape[0], dy)
    xmin, xmax, ymin, ymax = np.amin(xx), np.amax(xx), np.amin(yy), np.amax(yy)
    extent = xmin, xmax, ymin, ymax
    cmap_xi = plt.get_cmap('Greys_r')
    cmap_xi.set_bad(alpha=0)

    # Compute edges (to overlay to heatmaps later)
    percentile = 100
    dilation = 3.0
    alpha = 0.8
    xi_greyscale = img2show if len(img2show.shape) == 2 else np.mean(img2show,
                                                                     axis=-1)
    in_image_upscaled = transform.rescale(xi_greyscale,
                                          dilation,
                                          mode='constant')
    edges = feature.canny(in_image_upscaled).astype(float)
    edges[edges < 0.5] = np.nan
    edges[:5, :] = np.nan
    edges[-5:, :] = np.nan
    edges[:, :5] = np.nan
    edges[:, -5:] = np.nan
    overlay = edges

    # abs_max = np.percentile(np.abs(data), percentile)
    # abs_min = abs_max

    # plt.pcolormesh(range(mask.shape[0]), range(mask.shape[1]), mask, cmap=plt.cm.BrBG, alpha=1, vmin=0, vmax=255)
    plt.imshow(mask,
               extent=extent,
               cmap=plt.cm.BrBG,
               alpha=1,
               vmin=0,
               vmax=255)
    plt.imshow(overlay,
               extent=extent,
               interpolation='none',
               cmap=cmap_xi,
               alpha=alpha)
    plt.axis('off')
    plt.title('attention area respecting latent rule')
    plt.show()
Exemplo n.º 13
0
def main():

    dataset = 'mnist'
    black_box = 'RF'

    path = './'
    path_models = path + 'models/'

    black_box_filename = path_models + '%s_%s' % (dataset, black_box)

    _, _, X_test, Y_test, use_rgb = get_dataset(dataset)
    bb_predict, bb_predict_proba = get_black_box(black_box, black_box_filename,
                                                 use_rgb)

    lime_explainer = lime_image.LimeImageExplainer()
    segmenter = SegmentationAlgorithm('quickshift',
                                      kernel_size=1,
                                      max_dist=200,
                                      ratio=0.2)

    i2e = 1
    img = X_test[i2e]

    exp = lime_explainer.explain_instance(img,
                                          bb_predict_proba,
                                          top_labels=1,
                                          hide_color=0,
                                          num_samples=1000,
                                          segmentation_fn=segmenter)
    print(exp.local_exp)
    print(exp.local_pred)

    # print(lime_explainer.Zlr)
    # print(lime_explainer.Zl)

    label = bb_predict(np.array([X_test[i2e]]))[0]
    print(label)

    # print(lime_explainer.Zl[:, label][0])
    # print(lime_explainer.lr.predict(lime_explainer.Zlr)[0])

    bb_probs = lime_explainer.Zl[:, label]
    lr_probs = lime_explainer.lr.predict(lime_explainer.Zlr)

    print(1 - np.sum(np.abs(np.round(bb_probs) - np.round(lr_probs))) /
          len(bb_probs))

    img2show, mask = exp.get_image_and_mask(Y_test[i2e],
                                            positive_only=False,
                                            num_features=5,
                                            hide_rest=False,
                                            min_weight=0.01)
    plt.imshow(label2rgb(mask, img2show, bg_label=0), interpolation='nearest')
    plt.show()

    img2show, mask = exp.get_image_and_mask(Y_test[i2e],
                                            positive_only=True,
                                            num_features=5,
                                            hide_rest=True,
                                            min_weight=0.01)
    plt.imshow(img2show.astype(np.int), cmap=None if use_rgb else 'gray')
    plt.show()
Exemplo n.º 14
0
def main():

    dataset = sys.argv[1]
    black_box = sys.argv[2]
    neigh_type = sys.argv[3]
    if len(sys.argv) > 4:
        start_from = int(sys.argv[4])
    else:
        start_from = 0

    # dataset = 'mnist'
    # black_box = 'DNN'
    # neigh_type = 'hrgp'

    random_state = 0
    ae_name = 'aae'

    nbr_experiments = 200

    if dataset not in ['mnist', 'cifar10', 'fashion']:
        print('unknown dataset %s' % dataset)
        return -1

    if black_box not in ['RF', 'AB', 'DNN']:
        print('unknown black box %s' % black_box)
        return -1

    if neigh_type not in ['rnd', 'gntp', 'hrgp']:
        print('unknown neigh type %s' % neigh_type)
        return -1

    path = './'
    path_models = path + 'models/'
    path_results = path + 'results/fcp/'
    path_aemodels = path + 'aemodels/%s/%s/' % (dataset, ae_name)
    path_neigh = './neigh/'

    black_box_filename = path_models + '%s_%s' % (dataset, black_box)
    results_filename = path_results + 'alore_p_%s_%s_%s.json' % (
        dataset, black_box, neigh_type)
    neigh_filename = path_neigh + 'alore_%s_%s_%s.json.gz' % (
        dataset, black_box, neigh_type)

    _, _, X_test, Y_test, use_rgb = get_dataset(dataset)
    bb, transform = get_black_box(black_box,
                                  black_box_filename,
                                  use_rgb,
                                  return_model=True)
    bb_predict, bb_predict_proba = get_black_box(black_box, black_box_filename,
                                                 use_rgb)
    ae = get_autoencoder(X_test, ae_name, dataset, path_aemodels)
    ae.load_model()

    class_name = 'class'
    class_values = ['%s' % i for i in range(len(np.unique(Y_test)))]

    explainer = ILOREM(bb_predict,
                       class_name,
                       class_values,
                       neigh_type=neigh_type,
                       use_prob=True,
                       size=1000,
                       ocr=0.1,
                       kernel_width=None,
                       kernel=None,
                       autoencoder=ae,
                       use_rgb=use_rgb,
                       valid_thr=0.5,
                       filter_crules=True,
                       random_state=random_state,
                       verbose=False,
                       alpha1=0.5,
                       alpha2=0.5,
                       metric=neuclidean,
                       ngen=10,
                       mutpb=0.2,
                       cxpb=0.5,
                       tournsize=3,
                       halloffame_ratio=0.1,
                       bb_predict_proba=bb_predict_proba)

    errors = open(
        path_results + 'errors_alore_%s_%s_%s.csv' %
        (dataset, black_box, neigh_type), 'w')

    for i2e in range(nbr_experiments):
        if i2e < start_from:
            continue

        img = X_test[i2e]

        start_time = datetime.datetime.now()
        try:
            exp = explainer.explain_instance(img,
                                             num_samples=1000,
                                             use_weights=True,
                                             metric=neuclidean)
        except Exception:
            print('error instance to explain: %d' % i2e)
            errors.write('%d\n' % i2e)
            errors.flush()
            continue

        run_time = (datetime.datetime.now() - start_time).total_seconds()

        fidelity = exp.fidelity

        img_cdist = transform(np.array([img]))
        Zl = exp.Z
        Z = exp.autoencoder.decode(Zl)
        Z_cdist = transform(Z)

        if black_box == 'DNN':
            img_cdist = np.array([x.ravel() for x in img_cdist])
            Z_cdist = np.array([x.ravel() for x in Z_cdist])

        rdist = cdist(img_cdist, Z_cdist, metric='euclidean')
        compact, compact_var = float(np.mean(rdist)), float(np.std(rdist))

        sdist = cdist(np.array([exp.Z[0]]), exp.Z, metric=neuclidean)
        lcompact, lcompact_var = float(np.mean(sdist)), float(np.std(sdist))

        X_test_cdist = transform(X_test)
        if black_box == 'DNN':
            X_test_cdist = np.array([x.ravel() for x in X_test_cdist])

        dist = cdist(img_cdist, X_test_cdist, metric='euclidean')
        nbr_real_instances = len(X_test)
        plausibility = calculate_plausibilities(rdist, dist,
                                                nbr_real_instances)

        print(
            datetime.datetime.now(),
            '[%s/%s] %s %s %s - f: %.2f, c: %.2f, lc: %.2f, p: %.2f' %
            (i2e, nbr_experiments, dataset, black_box, neigh_type, fidelity,
             compact, lcompact, plausibility[-2]))

        store_fcpn(i2e, results_filename, neigh_filename, dataset, black_box,
                   fidelity, compact, compact_var, lcompact, lcompact_var,
                   plausibility, run_time, Z, Zl, neigh_type)

    errors.close()