def experiment(dl_params, model_params, explainer_type, save_dir=""):

    keras.backend.clear_session()

    #   create data
    print("Loading data...")
    dataloader = Dataloader(dl_params, rseed=0)
    #X_train, y_train = dataloader.get_dataset("train")
    #X_valid, y_valid = dataloader.get_dataset("valid")
    X_test, y_test = dataloader.get_dataset("test")
    del dataloader  # save some memory

    #   convert to np.array
    #X_train = np.stack(X_train, axis=0)
    #X_valid = np.stack(X_valid, axis=0)
    X_test = np.stack(X_test, axis=0)
    #y_train = np.asarray(y_train)
    #y_valid = np.asarray(y_valid)
    y_test = np.asarray(y_test)

    #   normalize to between 0 and 1
    #X_train = X_train.astype("float") / 255.0
    #X_valid = X_valid.astype("float") / 255.0
    X_test = X_test.astype("float") / 255.0

    #image = expand_dims(X_test[0], axis=0)
    image = X_test[70]
    print(image.shape)

    print(matplotlib.get_backend())

    print("Building classifier...")
    #   add this line to prevent some Keras serializer error
    with CustomObjectScope({'GlorotUniform': glorot_uniform()}):
        model = load_model(model_params['load_location'])

    print("Predicting image...")
    label = model.predict(np.array([
        image,
    ]))

    print("The inputted image is predicted to be ", label)

    print("Building explainer...")
    if model_params['output_dim'] > 2:
        model_wo_sm = iutils.keras.graph.model_wo_softmax(
            model)  #   remove softmax
    else:
        model_wo_sm = model

    explainer = innvestigate.create_analyzer(explainer_type, model_wo_sm)
    print("Explainer type: ", type(explainer))
    explain_innvestigate(image,
                         label,
                         explainer,
                         save_name=explainer_type,
                         save_dir=save_dir)

    keras.backend.clear_session()
예제 #2
0
def experiment(dl_params,
               model_params,
               label=None,
               num_protos=10,
               save_dir=""):

    keras.backend.clear_session()

    #   create data
    print("Loading data...")
    dataloader = Dataloader(dl_params, rseed=0)
    X_train, y_train = dataloader.get_dataset("train")
    X_valid, y_valid = dataloader.get_dataset("valid")
    X_test, y_test = dataloader.get_dataset("test")
    del dataloader  # save some memory

    #   convert to np.array
    X_train = np.stack(X_train, axis=0)
    #X_valid = np.stack(X_valid, axis=0)
    X_test = np.stack(X_test, axis=0)
    y_train = np.asarray(y_train)
    #y_valid = np.asarray(y_valid)
    y_test = np.asarray(y_test)

    #   normalize to between 0 and 1
    X_train = X_train.astype("float") / 255.0
    #X_valid = X_valid.astype("float") / 255.0
    X_test = X_test.astype("float") / 255.0

    #image = expand_dims(X_test[0], axis=0)
    image = np.array([X_test[70]])
    image_label = np.array([label])
    print(image.shape)

    print(matplotlib.get_backend())

    print(image.shape)
    print(image_label.shape)

    #   single image
    proto_indices, weights = explain_protodash((image, image_label),
                                               (X_train, y_train),
                                               label=label,
                                               num_protos=num_protos,
                                               save_dir=save_dir)
    """
    #   multiple images
    proto_indices, weights = explain_protodash((X_train, y_train), (X_train, y_train),
                                               label=label, num_protos=num_protos, 
                                               save_dir=save_dir)
    """
    print("Prototype Indices: ", proto_indices)
    print("Weights: ", weights)

    keras.backend.clear_session()
예제 #3
0
def experiment(dl_params, model_params, train_params, train_model=False):

    #   create data
    print("Loading data...", flush=True)
    dataloader = Dataloader(dl_params, rseed=0)
    X_train, y_train = dataloader.get_dataset("train")
    X_valid, y_valid = dataloader.get_dataset("valid")
    X_test, y_test = dataloader.get_dataset("test")
    del dataloader  #   save some memory

    #   convert to np.array
    X_train = np.stack(X_train, axis=0)
    X_valid = np.stack(X_valid, axis=0)
    X_test = np.stack(X_test, axis=0)
    y_train = np.asarray(y_train)
    y_valid = np.asarray(y_valid)
    y_test = np.asarray(y_test)

    #   normalize to between 0 and 1
    X_train = X_train.astype("float") / 255.0
    X_valid = X_valid.astype("float") / 255.0
    X_test = X_test.astype("float") / 255.0

    #   convert labels to 1-hot vector
    binarizer = LabelBinarizer()
    y_train = binarizer.fit_transform(y_train)
    y_valid = binarizer.fit_transform(y_valid)
    y_test = binarizer.fit_transform(y_test)

    print("Building classifier...")
    #   need to add our own "top" FC to make classes=2
    clf = DenseNet(model_params)

    if train_model is True:
        print("Training classifier...")
        clf.model = train(train_params, clf.model, X_train, y_train, X_valid,
                          y_valid)
    elif train_model is False:
        clf.model = load_model(model_params['load_location'])
    else:
        pass  #   use untrained model
    del X_train, X_valid, y_train, y_valid  #   save memory

    print("Testing classifier...")
    y_pred = clf.model.predict(X_test)

    test_report = create_test_report(train_params, y_test, y_pred)
    print(test_report)

    keras.backend.clear_session()
    print("Experiment completed.")
    print("Session ended.")
예제 #4
0
def experiment(dl_params, model_params, save_dir=""):

    keras.backend.clear_session()

    #   create data
    print("Loading data...")
    dataloader = Dataloader(dl_params, rseed=0)
    #X_train, y_train = dataloader.get_dataset("train")
    #X_valid, y_valid = dataloader.get_dataset("valid")
    X_test, y_test = dataloader.get_dataset("test")
    del dataloader  # save some memory

    #   convert to np.array
    #X_train = np.stack(X_train, axis=0)
    #X_valid = np.stack(X_valid, axis=0)
    X_test = np.stack(X_test, axis=0)
    #y_train = np.asarray(y_train)
    #y_valid = np.asarray(y_valid)
    y_test = np.asarray(y_test)

    #   normalize to between 0 and 1
    #X_train = X_train.astype("float") / 255.0
    #X_valid = X_valid.astype("float") / 255.0
    X_test = X_test.astype("float") / 255.0

    #image = expand_dims(X_test[0], axis=0)
    image = X_test[100]
    print(image.shape)

    print(matplotlib.get_backend())

    print("Building classifier...")
    #clf = DenseNet(model_params)
    model = load_model(model_params['load_location'])

    print("Predicting image...")
    label = model.predict(np.array([
        image,
    ]))

    print("The inputted image is predicted to be ", label)

    print("Running LIME...")
    explain_lime(image, label, model, save_dir=save_dir)

    keras.backend.clear_session()
예제 #5
0
def experiment(params, dl_params):
    """
    Runs main pipeline for experiment
    """

    #   Load dataset
    print("Loading data...")
    #((X_train, y_train), (X_test, y_test)) = tf.keras.datasets.cifar10.load_data()

    dataloader = Dataloader(dl_params, rseed=0)
    X_train, y_train = dataloader.get_dataset("train")
    X_valid, y_valid = dataloader.get_dataset("valid")
    X_test, y_test = dataloader.get_dataset("test")
    del dataloader  #   save some memory

    #   convert to np.array
    X_train = np.stack(X_train, axis=0)
    X_valid = np.stack(X_valid, axis=0)
    X_test = np.stack(X_test, axis=0)
    y_train = np.asarray(y_train)
    y_valid = np.asarray(y_valid)
    y_test = np.asarray(y_test)

    #   normalize to between 0 and 1
    X_train = X_train.astype("float") / 255.0
    X_valid = X_valid.astype("float") / 255.0
    X_test = X_test.astype("float") / 255.0

    #   convert labels to 1-hot vector
    binarizer = LabelBinarizer()
    y_train = binarizer.fit_transform(y_train)
    y_valid = binarizer.fit_transform(y_valid)
    y_test = binarizer.fit_transform(y_test)

    print(y_train)

    print("Building classifier...")
    clf = CNN(params)

    print("Training classifier...")
    clf.model = train(params, clf.model, X_train, y_train, X_valid, y_valid)
    del X_train, y_train  #   save memory

    print("Saving classifier...")
    if 'report_dir' in params and params['report_dir'] != "":
        save_dir = params['report_dir']
        if save_dir[-1] != "/":
            save_dir += "/"
    else:
        save_dir = ""
    keras.backend.set_learning_phase(0)  #   set to inference
    clf.model.save(save_dir + "inference.h5")

    print("Testing classifier...")
    y_pred = clf.model.predict(X_test)
    test_report = create_test_report(params, y_test, y_pred)
    print(test_report)

    keras.backend.clear_session()
    print("Experiment completed.")
    print("Session ended.")
예제 #6
0
def experiment(dl_params, model_params, train_params, train_model=False):

    keras.backend.clear_session()

    #   use gpu
    config = tf.ConfigProto(log_device_placement=True)
    sess = tf.Session(config=config)
    keras.backend.set_session(sess)

    #   save parameters
    print("Saving parameters...")
    save_dir = train_params['report_dir']
    if os.path.exists(save_dir) is False:
        os.makedirs(save_dir)
    params_file = open(save_dir + "model_params.txt", "w+")
    params_file.write(str(model_params))
    params_file.close()

    params_file = open(save_dir + "train_params.txt", "w+")
    params_file.write(str(train_params))
    params_file.close()

    params_file = open(save_dir + "dl_params.txt", "w+")
    params_file.write(str(dl_params))
    params_file.close()

    #   create data
    print("Loading data...", flush=True)
    dataloader = Dataloader(dl_params, rseed=0)
    X_train, y_train = dataloader.get_dataset_images_and_labels("train")
    X_valid, y_valid = dataloader.get_dataset_images_and_labels("valid")
    X_test, y_test = dataloader.get_dataset_images_and_labels("test")

    #dataloader.print_dataset_files(save_dir=save_dir)
    del dataloader  #   save some memory

    #   convert to np.array
    X_train = np.stack(X_train, axis=0)
    X_valid = np.stack(X_valid, axis=0)
    X_test = np.stack(X_test, axis=0)
    y_train = np.asarray(y_train)
    y_valid = np.asarray(y_valid)
    y_test = np.asarray(y_test)

    #   normalize to between 0 and 1Convert
    X_train = X_train.astype("float") / 255.0
    X_valid = X_valid.astype("float") / 255.0
    X_test = X_test.astype("float") / 255.0

    #   convert labels to 1-hot vector
    binarizer = LabelBinarizer()
    y_train = binarizer.fit_transform(y_train)
    y_valid = binarizer.fit_transform(y_valid)
    y_test = binarizer.fit_transform(y_test)

    #   convert from grayscale to rgb image(LIME requires this...annoying af)
    if X_train.shape[-1] == 1:  #   if grayscale
        print("Converting from grayscale to RGB...")
        X_train = gray2rgb(X_train.squeeze(axis=-1))
        X_valid = gray2rgb(X_valid.squeeze(axis=-1))
        X_test = gray2rgb(X_test.squeeze(axis=-1))

    print("Building classifier...")
    #   need to add our own "top" FC to make classes=2
    clf = CNN(model_params)

    if train_model is True or model_params['load_location'] == "":
        print("Training classifier...")
        clf.model = train(train_params, clf.model, X_train, y_train, X_valid,
                          y_valid)
    elif train_model is False:
        clf.model = load_model(model_params['load_location'])
    else:
        pass  #   use untrained model
    del X_train, X_valid, y_train, y_valid  #   save memory

    print("Testing classifier...")
    y_pred = clf.model.predict(X_test)

    test_report = create_test_report(train_params, y_test, y_pred)
    print(test_report)

    sess.close()
    keras.backend.clear_session()
    print("Experiment completed.")
    print("Session ended.")
예제 #7
0
def experiment(dl_params, model_params, save_dir):
    """
    Experimental pipeline.
    
    """

    #   load dataset
    print("Loading dataset...")
    dataloader = Dataloader(dl_params, rseed=0)
    X_train, y_train = dataloader.get_dataset("train")
    X_valid, y_valid = dataloader.get_dataset("valid")
    X_test, y_test = dataloader.get_dataset("test")
    del dataloader  #   save some memory

    #   convert to np.array
    X_train = np.stack(X_train, axis=0)
    X_valid = np.stack(X_valid, axis=0)
    X_test = np.stack(X_test, axis=0)
    y_train = np.asarray(y_train)
    y_valid = np.asarray(y_valid)
    y_test = np.asarray(y_test)

    #   normalize to between 0 and 1
    X_train = X_train.astype("float") / 255.0
    X_valid = X_valid.astype("float") / 255.0
    X_test = X_test.astype("float") / 255.0

    #   run Protodash
    print("Running Protodash...")
    print(type(X_train), X_train.shape)
    print(type(X_test), X_test.shape)

    proto_indices, weights = explain_protodash((X_train, y_train),
                                               (X_test, y_test),
                                               label=None,
                                               num_protos=3,
                                               save_dir=save_dir)

    #   get and order samples to explain (most important first)
    samples_to_explain = [
        idx for _, idx in sorted(zip(weights, proto_indices))
    ]
    samples_to_explain.reverse()
    print("Samples: ", samples_to_explain)

    #   load model into classifier
    print("Loading pre-existing classifier...")
    #   add this line to prevent some Keras serializer error
    with CustomObjectScope({'GlorotUniform': glorot_uniform()}):
        model = load_model(model_params['load_location'])

    #   run LIME
    print("Creating LIME explanations...")
    for idx in samples_to_explain:
        #   get image, label corresponding to idx
        image = X_test[idx, :, :, :]
        label = y_test[idx]
        explain_lime(image,
                     label,
                     model,
                     save_name="lime_" + str(idx),
                     save_dir=save_dir)

    #   run heatmap
    print("Creating heatmap explanations...")
    #   select analyzer
    explainer_type = "deep_taylor"
    if model_params['output_dim'] > 2:
        model_wo_sm = iutils.keras.graph.model_wo_softmax(
            model)  #   remove softmax
    else:
        model_wo_sm = model
    analyzer = innvestigate.create_analyzer(explainer_type, model_wo_sm)

    for idx in samples_to_explain:
        #   get image, label corresponding to idx
        image = X_test[idx, :, :, :]
        label = y_test[idx]
        explain_innvestigate(image,
                             label,
                             analyzer,
                             save_name="heatmap_" + str(idx),
                             save_dir=save_dir)

    print("Experiment completed.")
예제 #8
0
def experiment(dl_params, model_params, explain_params, save_dir):
    """
    Experimental pipeline.
    """

    #   load dataset
    print("Loading dataset...")
    dataloader = Dataloader(dl_params, rseed=0)
    #   outputs are not in batches, but list of all samples
    X_train, y_train = dataloader.get_dataset_images_and_labels("train")
    X_valid, y_valid = dataloader.get_dataset_images_and_labels("valid")
    X_test, y_test = dataloader.get_dataset_images_and_labels("test")

    X_test_filenames, y_test_labels = dataloader.get_dataset_names_and_labels(
        "test")

    del dataloader  #   save some memory

    #   convert to np.array
    X_train = np.stack(X_train, axis=0)
    X_valid = np.stack(X_valid, axis=0)
    X_test = np.stack(X_test, axis=0)
    y_train = np.asarray(y_train)
    y_valid = np.asarray(y_valid)
    y_test = np.asarray(y_test)

    #   normalize to between 0 and 1
    X_train = X_train.astype("float") / 255.0
    X_valid = X_valid.astype("float") / 255.0
    X_test = X_test.astype("float") / 255.0

    print("X_train.shape: ", X_train.shape)

    #   convert from grayscale to rgb image(LIME requires this...annoying af)
    if X_train.shape[-1] == 1:  #   if grayscale
        print("Converting from grayscale to RGB...")
        X_train = gray2rgb(X_train.squeeze(axis=-1))
        X_valid = gray2rgb(X_valid.squeeze(axis=-1))
        X_test = gray2rgb(X_test.squeeze(axis=-1))

    #   getting prototypes for each class in entire training datasets
    folder = save_dir + "class_0/"
    if os.path.exists(folder) is False:
        os.makedirs(folder)
    proto_indices, weights, _ = explain_protodash((X_train, y_train),
                                                  (X_train, y_train),
                                                  label=0,
                                                  num_protos=5,
                                                  filename="proto",
                                                  save_dir=folder)
    folder = save_dir + "class_1/"
    if os.path.exists(folder) is False:
        os.makedirs(folder)
    proto_indices, weights, _ = explain_protodash((X_train, y_train),
                                                  (X_train, y_train),
                                                  label=1,
                                                  num_protos=5,
                                                  filename="proto",
                                                  save_dir=folder)

    #   load model into classifier
    print("Loading pre-existing classifier...")
    #   add this line to prevent some Keras serializer error
    with CustomObjectScope({'GlorotUniform': glorot_uniform()}):
        model = load_model(model_params['load_location'])

    #   run prediction
    model.summary()
    y_pred = model.predict(X_test)

    print("Ypred length: ", len(y_pred))
    print("X_test: ", len(X_test))

    #   sort into true positives, true negatives, false positives, false negatives
    tp, tn, fp, fn = separate_to_positives_negatives(X_test_filenames, y_test,
                                                     y_pred)

    print("TP: ", len(tp))
    print("TN: ", len(tn))
    print("FP: ", len(fp))
    print("FN: ", len(fn))

    #   create files listing prediction and label for each accuracy category
    data, labels, preds = zip(*tp)
    _, ordered_tp = order_indices_by_accuracy(np.asarray(labels),
                                              np.asarray(preds), data)
    data, labels, preds, errors, idx = zip(*ordered_tp)
    print_iterables_file([data, labels, preds, errors, idx],
                         save_name="tp",
                         save_dir=save_dir)
    data, labels, preds = zip(*tn)
    ordered_tn, _ = order_indices_by_accuracy(np.asarray(labels),
                                              np.asarray(preds), data)
    data, labels, preds, errors, idx = zip(*ordered_tn)
    print_iterables_file([data, labels, preds, errors, idx],
                         save_name="tn",
                         save_dir=save_dir)
    data, labels, preds = zip(*fp)
    ordered_fp, _ = order_indices_by_accuracy(np.asarray(labels),
                                              np.asarray(preds), data)
    data, labels, preds, errors, idx = zip(*ordered_fp)
    print_iterables_file([data, labels, preds, errors, idx],
                         save_name="fp",
                         save_dir=save_dir)
    data, labels, preds = zip(*fn)
    _, ordered_fn = order_indices_by_accuracy(np.asarray(labels),
                                              np.asarray(preds), data)
    data, labels, preds, errors, idx = zip(*ordered_fn)
    print_iterables_file([data, labels, preds, errors, idx],
                         save_name="fn",
                         save_dir=save_dir)

    print("orderd tp: ", len(ordered_tp))
    print("orderd tn: ", len(ordered_tn))
    print("orderd fp: ", len(ordered_fp))
    print("orderd fn: ", len(ordered_fn))

    #   get most accurate and least accurate for each class
    ordered_indices_0, ordered_indices_1 = order_indices_by_accuracy(
        y_test, y_pred)

    #   get top indices
    top_indices_0 = [idx for _, _, _, idx in ordered_indices_0]
    top_indices_1 = [idx for _, _, _, idx in ordered_indices_1]
    top_diff_0 = [diff.item() for _, _, diff, _ in ordered_indices_0]
    top_diff_1 = [diff.item() for _, _, diff, _ in ordered_indices_1]

    num_indices = explain_params['num_images']
    image_names = [
        'top_indices_0', 'top_indices_1', 'worst_indices_0', 'worst_indices_1'
    ]  #   name of output files

    top_indices = top_indices_0[:num_indices] + top_indices_1[:num_indices]
    worst_indices = top_indices_0[-1 *
                                  num_indices:] + top_indices_1[-1 *
                                                                num_indices:]

    samples_to_explain = top_indices + worst_indices
    print(samples_to_explain)

    #   create folder system
    foldernames = [
        save_dir + 'sample_' + str(idx) + "_class_" + str(y_test[idx]) +
        "_pred_" + str(round(y_pred[idx].item(), 3)) + "/"
        for idx in samples_to_explain
    ]
    for folder in foldernames:
        if os.path.exists(folder) is False:
            os.makedirs(folder)
    print(foldernames)

    #   plot original images
    original_image_files = []
    for i in range(0, len(samples_to_explain)):
        idx = samples_to_explain[i]
        folder = foldernames[i]
        image = np.array([X_test[idx, :, :, :]])
        label = np.array([y_test[idx]])
        filename = "original_" + str(idx)
        plot_numpy_images((image, label), filename=filename, save_dir=folder)
        original_image_files.append(folder + filename + "_" + str(label[0]) +
                                    ".png")

    #   group images based on their classification values (e.g., top_indices_0, worst_indices_1)
    top_indices_folders_0 = foldernames[:num_indices]
    top_indices_folders_1 = foldernames[num_indices:2 * num_indices]
    worst_indices_folders_0 = foldernames[2 * num_indices:3 * num_indices]
    worst_indices_folders_1 = foldernames[3 * num_indices:]

    #   run LIME
    num_superpixels = 10
    imagenet = explain_params['imagenet']
    lime_image_files = []
    print("Creating LIME explanations...")
    for i in range(0, len(samples_to_explain)):
        print("Sample: ", i)
        idx = samples_to_explain[i]
        folder = foldernames[i]
        #   get image, label corresponding to idx
        image = X_test[idx, :, :, :]
        label = y_test[idx]

        image_file = explain_lime(image,
                                  label,
                                  model,
                                  num_superpixels=num_superpixels,
                                  save_name="lime_" + str(idx),
                                  save_dir=folder,
                                  imagenet=imagenet)
        lime_image_files.append(image_file)

    print("num_indices: ", num_indices)

    #   plot LIME explanations
    explanation_name = 'lime'
    combine_single_explain_images(lime_image_files,
                                  original_image_files,
                                  num_indices,
                                  explanation_name,
                                  image_names,
                                  explain_params,
                                  save_dir=save_dir)

    #   run heatmap
    print("Creating heatmap explanations...")
    #   select analyzer
    explainer_type = "deep_taylor"
    if model_params['output_dim'] > 2:
        model_wo_sm = iutils.keras.graph.model_wo_softmax(
            model)  #   remove softmax
    else:
        model_wo_sm = model
    analyzer = innvestigate.create_analyzer(explainer_type, model_wo_sm)

    heatmap_image_files = []
    for i in range(0, len(samples_to_explain)):
        idx = samples_to_explain[i]
        folder = foldernames[i]
        #   get image, label corresponding to idx
        image = X_test[idx, :, :, :]
        label = y_test[idx]
        image_file = explain_innvestigate(image,
                                          label,
                                          analyzer,
                                          save_name="heatmap_" + str(idx),
                                          save_dir=folder)
        heatmap_image_files.append(image_file)

    #   plot heatmap explanations
    explanation_name = 'heatmap'
    combine_single_explain_images(heatmap_image_files,
                                  original_image_files,
                                  num_indices,
                                  explanation_name,
                                  image_names,
                                  explain_params,
                                  save_dir=save_dir)

    print("Creating Protodash explanations...")
    proto_image_files = []
    for i in range(0, len(samples_to_explain)):
        print("Identifying prototypes for: ", idx)
        idx = samples_to_explain[i]
        folder = foldernames[i]
        image = np.array([
            X_test[idx, :, :, :]
        ])  #   convert to shape (1 * height * width * num_layers)
        image_label = np.array([
            y_pred[idx]
        ])  #   use the predicted label, not the ground truth label
        proto_indices, weights, image_file = explain_protodash(
            (image, image_label), (X_train, y_train),
            label=None,
            num_protos=3,
            mark_label=True,
            font_file=explain_params['font_file'],
            label_names=dl_params['labels'],
            filename="proto_" + str(idx),
            save_dir=folder)
        proto_image_files.append(image_file)

    #   plot prototype explanations
    explanation_name = "prototypes"

    print(proto_image_files)

    combine_multi_explain_images(proto_image_files,
                                 original_image_files,
                                 num_indices,
                                 explanation_name,
                                 image_names,
                                 explain_params,
                                 save_dir=save_dir)

    print("Experiment completed.")