def evaluate_importance(model_data: ModelData, importance_type: ImportanceType, importance_calculation: ImportanceCalculation): model_data.reload_model() importance_handler: ImportanceEvaluator = ImportanceEvaluator(model_data) importance_handler.setup(importance_type, importance_calculation) (x_train, y_train), (x_test, y_test), input_shape, num_classes = get_prepared_data(model_data.get_class_selection()) importance_handler.set_train_and_test_data(x_train, y_train, x_test, y_test) importance_handler.create_evaluation_data(10)
def create_importance_data(model_data: ModelData, importance_type: ImportanceType): pn = ProcessedNetwork(model_data=model_data) pn.generate_importance_data("mnist/mnist_train_split%s" % split_suffix, "mnist/mnist_test_split%s" % split_suffix, importance_type) model_data.store_model_data() model_data.save_data()
def calculate_performance_of_model(model_data: ModelData): (x_train, y_train), (x_test, y_test), input_shape, num_classes = get_prepared_data() logging.info("Train examples: %i" % x_train.shape[0]) logging.info("Test examples: %i" % x_test.shape[0]) model_data.reload_model() model_data.model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adam(0.001), metrics=["accuracy"]) model_data = evaluate_model(model_data, x_train, y_train, x_test, y_test) return model_data
def evaluate_model(model_data: ModelData, x_train: Any, y_train: Any, x_test: Any, y_test: Any) \ -> ModelData: train_score = model_data.model.evaluate(x_train, y_train, verbose=0) test_score = model_data.model.evaluate(x_test, y_test, verbose=0) logging.info("Train loss: %f, Train accuracy: %f, Test loss: %f, Test accuracy: %f" % ( train_score[0], train_score[1], test_score[0], test_score[1])) c_y_test = np.argmax(y_test, axis=1) prediction_test = model_data.model.predict_classes(x_test) c_report: any = classification_report(c_y_test, prediction_test, output_dict=True) model_data.set_initial_performance(test_score[0], test_score[1], train_score[0], train_score[1], c_report) return model_data
def create(name: str, batch_size: int, epochs: int, layer_data: List[int], learning_rate: float = 0.001, regularized: bool = False, train_type: ModelTrainType = ModelTrainType.BALANCED, main_class: int = None, other_class_percentage: float = None, class_selection: List[int] = None) -> ModelData: logging.info("Create MNIST neural network model with training type \"%s\"." % train_type.name) if train_type is not ModelTrainType.UNBALANCED: (x_train, y_train), (x_test, y_test), input_shape, num_classes = get_prepared_data(class_selection) else: (x_train, y_train), (x_test, y_test), input_shape, num_classes = get_unbalance_data(main_class, other_class_percentage, class_selection) logging.info("Train examples: %i" % x_train.shape[0]) logging.info("Test examples: %i" % x_test.shape[0]) if class_selection is not None: num_classes = len(class_selection) model: Model = build_mnist_model(layer_data, num_classes, input_shape, learning_rate, regularized) if train_type is not ModelTrainType.UNTRAINED: model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test)) model_description: str = generate_model_description(batch_size, epochs, model.layers, learning_rate) model_layer_nodes: List[int] = [input_shape[0]] model_layer_nodes.extend(layer_data) model_layer_nodes.append(num_classes) model_data: ModelData = ModelData(name, model_description, model) model_data.set_parameter(batch_size, epochs, model_layer_nodes, learning_rate, x_train.shape[0], x_test.shape[0]) model_data = evaluate_model(model_data, x_train, y_train, x_test, y_test) model_data.set_class_selection(class_selection) model_data.save_model() model_data.store_model_data() return model_data
def __init__(self, model_data: ModelData): self.model_data: ModelData = model_data self.importance_type: ImportanceType = ImportanceType( model_data.get_importance_type()) self.importance_calculation: ImportanceCalculation = ImportanceCalculation.BNN_EDGE self.relevant_classes: List[int] or None = None self.x_train = None self.y_train = None self.x_test = None self.y_test = None
def __init__(self, model_data: ModelData, store_path: str = None): self.model_data: ModelData = model_data self.name: str = "Undefined" self.num_classes: int = -1 self.path: str = store_path if model_data is None else model_data.get_path( ) self.original_name: str = None if model_data is None else model_data.name self.model_data.reload_model() self.architecture_data: List[int] = [] self.node_importance_value: List[List[np.array]] = [] self.edge_importance_value: List[np.array] = [] self.edge_importance_set: bool = False for i, layer in enumerate(model_data.model.layers): self.architecture_data.append(layer.output_shape[1]) if i is not 0: self.node_importance_value.append([]) self.edge_importance_value.append(None) if i is len(model_data.model.layers) - 1: self.num_classes = layer.output_shape[1] self.importance_type: ImportanceType = ImportanceType(0)
from data.mnist_data_handler import get_prepared_data from data.model_data import ModelData from evaluation.evaluator import ImportanceEvaluator from utility.log_handling import setup_logger setup_logger("sample_evaluation") name: str = "default_all" model_data: ModelData = ModelData(name) model_data.reload_model() importance_handler: ImportanceEvaluator = ImportanceEvaluator(model_data) importance_handler.setup() (x_train, y_train), (x_test, y_test), input_shape, num_classes = get_prepared_data(model_data.get_class_selection()) importance_handler.set_train_and_test_data(x_train, y_train, x_test, y_test) importance_handler.create_evaluation_data(10)