def experiment(dl_params, model_params, label=None, num_protos=10, save_dir=""): keras.backend.clear_session() # create data print("Loading data...") dataloader = Dataloader(dl_params, rseed=0) X_train, y_train = dataloader.get_dataset("train") X_valid, y_valid = dataloader.get_dataset("valid") X_test, y_test = dataloader.get_dataset("test") del dataloader # save some memory # convert to np.array X_train = np.stack(X_train, axis=0) #X_valid = np.stack(X_valid, axis=0) X_test = np.stack(X_test, axis=0) y_train = np.asarray(y_train) #y_valid = np.asarray(y_valid) y_test = np.asarray(y_test) # normalize to between 0 and 1 X_train = X_train.astype("float") / 255.0 #X_valid = X_valid.astype("float") / 255.0 X_test = X_test.astype("float") / 255.0 #image = expand_dims(X_test[0], axis=0) image = np.array([X_test[70]]) image_label = np.array([label]) print(image.shape) print(matplotlib.get_backend()) print(image.shape) print(image_label.shape) # single image proto_indices, weights = explain_protodash((image, image_label), (X_train, y_train), label=label, num_protos=num_protos, save_dir=save_dir) """ # multiple images proto_indices, weights = explain_protodash((X_train, y_train), (X_train, y_train), label=label, num_protos=num_protos, save_dir=save_dir) """ print("Prototype Indices: ", proto_indices) print("Weights: ", weights) keras.backend.clear_session()
def experiment(dl_params, model_params, train_params, train_model=False): # create data print("Loading data...", flush=True) dataloader = Dataloader(dl_params, rseed=0) X_train, y_train = dataloader.get_dataset("train") X_valid, y_valid = dataloader.get_dataset("valid") X_test, y_test = dataloader.get_dataset("test") del dataloader # save some memory # convert to np.array X_train = np.stack(X_train, axis=0) X_valid = np.stack(X_valid, axis=0) X_test = np.stack(X_test, axis=0) y_train = np.asarray(y_train) y_valid = np.asarray(y_valid) y_test = np.asarray(y_test) # normalize to between 0 and 1 X_train = X_train.astype("float") / 255.0 X_valid = X_valid.astype("float") / 255.0 X_test = X_test.astype("float") / 255.0 # convert labels to 1-hot vector binarizer = LabelBinarizer() y_train = binarizer.fit_transform(y_train) y_valid = binarizer.fit_transform(y_valid) y_test = binarizer.fit_transform(y_test) print("Building classifier...") # need to add our own "top" FC to make classes=2 clf = DenseNet(model_params) if train_model is True: print("Training classifier...") clf.model = train(train_params, clf.model, X_train, y_train, X_valid, y_valid) elif train_model is False: clf.model = load_model(model_params['load_location']) else: pass # use untrained model del X_train, X_valid, y_train, y_valid # save memory print("Testing classifier...") y_pred = clf.model.predict(X_test) test_report = create_test_report(train_params, y_test, y_pred) print(test_report) keras.backend.clear_session() print("Experiment completed.") print("Session ended.")
def experiment(dl_params, model_params, explainer_type, save_dir=""): keras.backend.clear_session() # create data print("Loading data...") dataloader = Dataloader(dl_params, rseed=0) #X_train, y_train = dataloader.get_dataset("train") #X_valid, y_valid = dataloader.get_dataset("valid") X_test, y_test = dataloader.get_dataset("test") del dataloader # save some memory # convert to np.array #X_train = np.stack(X_train, axis=0) #X_valid = np.stack(X_valid, axis=0) X_test = np.stack(X_test, axis=0) #y_train = np.asarray(y_train) #y_valid = np.asarray(y_valid) y_test = np.asarray(y_test) # normalize to between 0 and 1 #X_train = X_train.astype("float") / 255.0 #X_valid = X_valid.astype("float") / 255.0 X_test = X_test.astype("float") / 255.0 #image = expand_dims(X_test[0], axis=0) image = X_test[70] print(image.shape) print(matplotlib.get_backend()) print("Building classifier...") # add this line to prevent some Keras serializer error with CustomObjectScope({'GlorotUniform': glorot_uniform()}): model = load_model(model_params['load_location']) print("Predicting image...") label = model.predict(np.array([ image, ])) print("The inputted image is predicted to be ", label) print("Building explainer...") if model_params['output_dim'] > 2: model_wo_sm = iutils.keras.graph.model_wo_softmax( model) # remove softmax else: model_wo_sm = model explainer = innvestigate.create_analyzer(explainer_type, model_wo_sm) print("Explainer type: ", type(explainer)) explain_innvestigate(image, label, explainer, save_name=explainer_type, save_dir=save_dir) keras.backend.clear_session()
def experiment(dl_params, model_params, save_dir=""): keras.backend.clear_session() # create data print("Loading data...") dataloader = Dataloader(dl_params, rseed=0) #X_train, y_train = dataloader.get_dataset("train") #X_valid, y_valid = dataloader.get_dataset("valid") X_test, y_test = dataloader.get_dataset("test") del dataloader # save some memory # convert to np.array #X_train = np.stack(X_train, axis=0) #X_valid = np.stack(X_valid, axis=0) X_test = np.stack(X_test, axis=0) #y_train = np.asarray(y_train) #y_valid = np.asarray(y_valid) y_test = np.asarray(y_test) # normalize to between 0 and 1 #X_train = X_train.astype("float") / 255.0 #X_valid = X_valid.astype("float") / 255.0 X_test = X_test.astype("float") / 255.0 #image = expand_dims(X_test[0], axis=0) image = X_test[100] print(image.shape) print(matplotlib.get_backend()) print("Building classifier...") #clf = DenseNet(model_params) model = load_model(model_params['load_location']) print("Predicting image...") label = model.predict(np.array([ image, ])) print("The inputted image is predicted to be ", label) print("Running LIME...") explain_lime(image, label, model, save_dir=save_dir) keras.backend.clear_session()
def experiment(params, dl_params): """ Runs main pipeline for experiment """ # Load dataset print("Loading data...") #((X_train, y_train), (X_test, y_test)) = tf.keras.datasets.cifar10.load_data() dataloader = Dataloader(dl_params, rseed=0) X_train, y_train = dataloader.get_dataset("train") X_valid, y_valid = dataloader.get_dataset("valid") X_test, y_test = dataloader.get_dataset("test") del dataloader # save some memory # convert to np.array X_train = np.stack(X_train, axis=0) X_valid = np.stack(X_valid, axis=0) X_test = np.stack(X_test, axis=0) y_train = np.asarray(y_train) y_valid = np.asarray(y_valid) y_test = np.asarray(y_test) # normalize to between 0 and 1 X_train = X_train.astype("float") / 255.0 X_valid = X_valid.astype("float") / 255.0 X_test = X_test.astype("float") / 255.0 # convert labels to 1-hot vector binarizer = LabelBinarizer() y_train = binarizer.fit_transform(y_train) y_valid = binarizer.fit_transform(y_valid) y_test = binarizer.fit_transform(y_test) print(y_train) print("Building classifier...") clf = CNN(params) print("Training classifier...") clf.model = train(params, clf.model, X_train, y_train, X_valid, y_valid) del X_train, y_train # save memory print("Saving classifier...") if 'report_dir' in params and params['report_dir'] != "": save_dir = params['report_dir'] if save_dir[-1] != "/": save_dir += "/" else: save_dir = "" keras.backend.set_learning_phase(0) # set to inference clf.model.save(save_dir + "inference.h5") print("Testing classifier...") y_pred = clf.model.predict(X_test) test_report = create_test_report(params, y_test, y_pred) print(test_report) keras.backend.clear_session() print("Experiment completed.") print("Session ended.")
def experiment(dl_params, model_params, save_dir): """ Experimental pipeline. """ # load dataset print("Loading dataset...") dataloader = Dataloader(dl_params, rseed=0) X_train, y_train = dataloader.get_dataset("train") X_valid, y_valid = dataloader.get_dataset("valid") X_test, y_test = dataloader.get_dataset("test") del dataloader # save some memory # convert to np.array X_train = np.stack(X_train, axis=0) X_valid = np.stack(X_valid, axis=0) X_test = np.stack(X_test, axis=0) y_train = np.asarray(y_train) y_valid = np.asarray(y_valid) y_test = np.asarray(y_test) # normalize to between 0 and 1 X_train = X_train.astype("float") / 255.0 X_valid = X_valid.astype("float") / 255.0 X_test = X_test.astype("float") / 255.0 # run Protodash print("Running Protodash...") print(type(X_train), X_train.shape) print(type(X_test), X_test.shape) proto_indices, weights = explain_protodash((X_train, y_train), (X_test, y_test), label=None, num_protos=3, save_dir=save_dir) # get and order samples to explain (most important first) samples_to_explain = [ idx for _, idx in sorted(zip(weights, proto_indices)) ] samples_to_explain.reverse() print("Samples: ", samples_to_explain) # load model into classifier print("Loading pre-existing classifier...") # add this line to prevent some Keras serializer error with CustomObjectScope({'GlorotUniform': glorot_uniform()}): model = load_model(model_params['load_location']) # run LIME print("Creating LIME explanations...") for idx in samples_to_explain: # get image, label corresponding to idx image = X_test[idx, :, :, :] label = y_test[idx] explain_lime(image, label, model, save_name="lime_" + str(idx), save_dir=save_dir) # run heatmap print("Creating heatmap explanations...") # select analyzer explainer_type = "deep_taylor" if model_params['output_dim'] > 2: model_wo_sm = iutils.keras.graph.model_wo_softmax( model) # remove softmax else: model_wo_sm = model analyzer = innvestigate.create_analyzer(explainer_type, model_wo_sm) for idx in samples_to_explain: # get image, label corresponding to idx image = X_test[idx, :, :, :] label = y_test[idx] explain_innvestigate(image, label, analyzer, save_name="heatmap_" + str(idx), save_dir=save_dir) print("Experiment completed.")