Exemplo n.º 1
0
def main(*, graph_path: str, node_names_path: str, dataset_path: str,
         batch_size: int):
    """
    Evaluate a .pb graph

    :param graph_path: Path to the .pb graph
    :param node_names_path: Path to the .json file with input and output node
        names
    :param dataset_path: Path to the .h5 dataset file
    :param batch_size: Size of the batch
    """
    graph = io.load_pb(graph_path)
    test_dict = io.extract_set(dataset_path, enums.Dataset.TEST)
    min_value, max_value = test_dict[enums.DataStats.MIN], \
                           test_dict[enums.DataStats.MAX]

    transformations = [
        transforms.MinMaxNormalize(min_=min_value, max_=max_value),
        transforms.SpectralTransform()
    ]

    test_dict = transforms.apply_transformations(test_dict, transformations)

    with open(node_names_path, 'r') as node_names_file:
        node_names = json.loads(node_names_file.read())

    input_node = graph.get_tensor_by_name(node_names[enums.NodeNames.INPUT] +
                                          ':0')
    output_node = graph.get_tensor_by_name(node_names[enums.NodeNames.OUTPUT] +
                                           ':0')

    with tf.Session(graph=graph) as session:
        predict = timeit(utils.predict_with_graph_in_batches)
        predictions, inference_time = predict(session, input_node, output_node,
                                              test_dict[enums.Dataset.DATA],
                                              batch_size)

    graph_metrics = get_model_metrics(test_dict[enums.Dataset.LABELS],
                                      predictions)
    graph_metrics['inference_time'] = [inference_time]
    conf_matrix = confusion_matrix(test_dict[enums.Dataset.LABELS],
                                   predictions)
    io.save_metrics(dest_path=os.path.dirname(graph_path),
                    file_name=enums.Experiment.INFERENCE_GRAPH_METRICS,
                    metrics=graph_metrics)
    io.save_confusion_matrix(conf_matrix, os.path.dirname(graph_path))
Exemplo n.º 2
0
def evaluate(*,
             y_pred: np.ndarray,
             data,
             dest_path: str,
             n_classes: int,
             model_path: str,
             voting: str = 'hard',
             train_set_predictions: np.ndarray = None,
             voting_model: str = None,
             voting_model_params: str = None):
    """
    Function for evaluating the trained model.

    :param y_pred: Predictions of test set.
    :param model_path: Path to the model.
    :param data: Either path to the input data or the data dict.
    :param dest_path: Directory in which to store the calculated metrics
    :param n_classes: Number of classes.
    :param voting: Method of ensemble voting. If ‘hard’, uses predicted class
        labels for majority rule voting. Else if ‘soft’, predicts the class
        label based on the argmax of the sums of the predicted probabilities.
        Else if 'booster', employs a new model, which is trained on the
        ensemble predictions on the training set.
    :param train_set_predictions: Predictions of the train set. Only used if
        'voting' = 'classifier'.
    :param voting_model: Type of model to use when the voting argument is set
        to "booster". This indicates, that a new model is trained on the
        ensemble predictions on the learning set,
        to leverage the quality of the classification or
        regression. Supported models are: SVR, SVC (support vector machine for
        regression and classification), RFR, RFC (random forest for regression
        and classification), DTR, DTC (decision tree for regression and
        classification).
    :param voting_model_params: Parameters of the voting model,
        should be specified in the same manner
        as the parameters for the noise injection.
    """

    ensemble = Ensemble(voting=voting)
    if voting == 'booster':
        train_set_predictions = np.array(train_set_predictions)
        ensemble.train_ensemble_predictor(
            train_set_predictions,
            data[enums.Dataset.TRAIN][enums.Dataset.LABELS], voting_model,
            voting_model_params)
    vote = timeit(ensemble.vote)
    y_pred, voting_time = vote(y_pred)

    y_true = data[enums.Dataset.TEST][enums.Dataset.LABELS]
    model_metrics = get_model_metrics(y_true, y_pred)
    model_metrics['inference_time'] = [voting_time]
    conf_matrix = confusion_matrix(y_true,
                                   y_pred,
                                   labels=[i for i in range(n_classes)])
    io.save_metrics(dest_path=dest_path,
                    file_name=enums.Experiment.INFERENCE_METRICS,
                    metrics=model_metrics)
    io.save_confusion_matrix(conf_matrix, dest_path)
    if enums.Splits.GRIDS in model_path:
        if type(data) is str:
            train_dict = io.extract_set(data, enums.Dataset.TRAIN)
            labels_in_train = np.unique(train_dict[enums.Dataset.LABELS])
        else:
            train_labels = data[enums.Dataset.TRAIN][enums.Dataset.LABELS]
            if train_labels.ndim > 1:
                train_labels = np.argmax(train_labels, axis=-1)
            labels_in_train = np.unique(train_labels)
        fair_metrics = get_fair_model_metrics(conf_matrix, labels_in_train)
        io.save_metrics(dest_path=dest_path,
                        file_name=enums.Experiment.INFERENCE_FAIR_METRICS,
                        metrics=fair_metrics)
Exemplo n.º 3
0
def evaluate(*,
             data,
             model_path: str,
             dest_path: str,
             n_classes: int,
             batch_size: int = 1024,
             use_ensemble: bool = False,
             ensemble_copies: int = 1,
             voting: str = 'hard',
             noise: ('post', multi(min=0)),
             noise_sets: ('spost', multi(min=0)),
             noise_params: str = None,
             seed: int = 0):
    """
    Function for evaluating the trained model.

    :param model_path: Path to the model.
    :param data: Either path to the input data or the data dict.
    :param dest_path: Directory in which to store the calculated metrics.
    :param n_classes: Number of classes.
    :param batch_size: Size of the batch for inference.
    :param use_ensemble: Use ensemble for prediction.
    :param ensemble_copies: Number of model copies for the ensemble.
    :param voting: Method of ensemble voting. If ‘hard’, uses predicted class
        labels for majority rule voting. Else if ‘soft’, predicts the class
        label based on the argmax of the sums of the predicted probabilities.
    :param noise: List containing names of used noise injection methods
        that are performed after the normalization transformations.
    :type noise: list[str]
    :param noise_sets: List of sets that are affected by the noise injection.
        For this module single element can be "test".
    :type noise_sets: list[str]
    :param noise_params: JSON containing the parameters
        setting of noise injection methods.
        Exemplary value for this parameter: "{"mean": 0, "std": 1, "pa": 0.1}".
        This JSON should include all parameters for noise injection
        functions that are specified in the noise argument.
        For the accurate description of each parameter, please
        refer to the ml_intuition/data/noise.py module.
    :param seed: Seed for RNG.
    """
    os.makedirs(dest_path, exist_ok=True)
    if type(data) is str:
        test_dict = io.extract_set(data, enums.Dataset.TEST)
    else:
        test_dict = copy(data[enums.Dataset.TEST])
    min_max_path = os.path.join(os.path.dirname(model_path), "min-max.csv")
    if os.path.exists(min_max_path):
        min_value, max_value = io.read_min_max(min_max_path)
    else:
        min_value, max_value = data[enums.DataStats.MIN], \
                               data[enums.DataStats.MAX]

    transformations = [transforms.SpectralTransform(),
                       transforms.OneHotEncode(n_classes=n_classes),
                       transforms.MinMaxNormalize(min_=min_value,
                                                  max_=max_value)]
    transformations = transformations + \
                      get_noise_functions(noise, noise_params) \
        if enums.Dataset.TEST in noise_sets else transformations

    test_dict = transforms.apply_transformations(test_dict, transformations)

    model = tf.keras.models.load_model(model_path, compile=True)
    if use_ensemble:
        model = Ensemble(model, voting=voting)

        if ensemble_copies is not None:
            noise_params = yaml.load(noise_params)
            model.generate_models_with_noise(copies=ensemble_copies,
                                             mean=noise_params['mean'],
                                             seed=seed)
        if voting == 'classifier':
            if type(data) is str:
                train_dict = io.extract_set(data, enums.Dataset.TRAIN)
            else:
                train_dict = data[enums.Dataset.TRAIN]
            train_dict = transforms.apply_transformations(train_dict, transformations)
            train_probabilities = model.predict_probabilities(train_dict[enums.Dataset.DATA])
            model.train_ensemble_predictor(train_probabilities, train_dict[enums.Dataset.LABELS])

    predict = timeit(model.predict)
    y_pred, inference_time = predict(test_dict[enums.Dataset.DATA],
                                     batch_size=batch_size)

    if voting == 'classifier':
        y_pred = np.argmax(y_pred, axis=-1)
    y_true = np.argmax(test_dict[enums.Dataset.LABELS], axis=-1)

    model_metrics = get_model_metrics(y_true, y_pred)
    model_metrics['inference_time'] = [inference_time]
    conf_matrix = confusion_matrix(y_true, y_pred)
    io.save_metrics(dest_path=dest_path,
                    file_name=enums.Experiment.INFERENCE_METRICS,
                    metrics=model_metrics)
    io.save_confusion_matrix(conf_matrix, dest_path)
    if enums.Splits.GRIDS in model_path:
        if type(data) is str:
            train_dict = io.extract_set(data, enums.Dataset.TRAIN)
            labels_in_train = np.unique(train_dict[enums.Dataset.LABELS])
        else:
            train_labels = data[enums.Dataset.TRAIN][enums.Dataset.LABELS]
            if train_labels.ndim > 1:
                train_labels = np.argmax(train_labels, axis=-1)
            labels_in_train = np.unique(train_labels)
        fair_metrics = get_fair_model_metrics(conf_matrix, labels_in_train)
        io.save_metrics(dest_path=dest_path,
                        file_name=enums.Experiment.INFERENCE_FAIR_METRICS,
                        metrics=fair_metrics)
Exemplo n.º 4
0
def evaluate(*,
             data,
             model_path: str,
             dest_path: str,
             n_classes: int,
             batch_size: int = 1024,
             noise: ('post', multi(min=0)),
             noise_sets: ('spost', multi(min=0)),
             noise_params: str = None):
    """
    Function for evaluating the trained model.

    :param model_path: Path to the model.
    :param data: Either path to the input data or the data dict.
    :param dest_path: Directory in which to store the calculated metrics
    :param n_classes: Number of classes.
    :param batch_size: Size of the batch for inference
    :param noise: List containing names of used noise injection methods
        that are performed after the normalization transformations.
    :param noise_sets: List of sets that are affected by the noise injection.
        For this module single element can be "test".
    :param noise_params: JSON containing the parameters
        setting of noise injection methods.
        Exemplary value for this parameter: "{"mean": 0, "std": 1, "pa": 0.1}".
        This JSON should include all parameters for noise injection
        functions that are specified in the noise argument.
        For the accurate description of each parameter, please
        refer to the ml_intuition/data/noise.py module.
    """
    if type(data) is str:
        test_dict = io.extract_set(data, enums.Dataset.TEST)
    else:
        test_dict = data[enums.Dataset.TEST]
    min_max_path = os.path.join(os.path.dirname(model_path), "min-max.csv")
    if os.path.exists(min_max_path):
        min_value, max_value = io.read_min_max(min_max_path)
    else:
        min_value, max_value = data[enums.DataStats.MIN], \
                               data[enums.DataStats.MAX]

    transformations = [transforms.SpectralTransform(),
                       transforms.OneHotEncode(n_classes=n_classes),
                       transforms.MinMaxNormalize(min_=min_value, max_=max_value)]
    transformations = transformations + get_noise_functions(noise, noise_params) \
        if enums.Dataset.TEST in noise_sets else transformations

    test_dict = transforms.apply_transformations(test_dict, transformations)

    model = tf.keras.models.load_model(model_path, compile=True)

    predict = timeit(model.predict)
    y_pred, inference_time = predict(test_dict[enums.Dataset.DATA],
                                     batch_size=batch_size)

    y_pred = np.argmax(y_pred, axis=-1)
    y_true = np.argmax(test_dict[enums.Dataset.LABELS], axis=-1)

    model_metrics = get_model_metrics(y_true, y_pred)
    model_metrics['inference_time'] = [inference_time]
    conf_matrix = confusion_matrix(y_true, y_pred)
    io.save_metrics(dest_path=dest_path,
                    file_name=enums.Experiment.INFERENCE_METRICS,
                    metrics=model_metrics)
    io.save_confusion_matrix(conf_matrix, dest_path)
    if enums.Splits.GRIDS in model_path:
        if type(data) is str:
            train_dict = io.extract_set(data, enums.Dataset.TRAIN)
            labels_in_train = np.unique(train_dict[enums.Dataset.LABELS])
        else:
            train_labels = data[enums.Dataset.TRAIN][enums.Dataset.LABELS]
            if train_labels.ndim > 1:
                train_labels = np.argmax(train_labels, axis=-1)
            labels_in_train = np.unique(train_labels)
        fair_metrics = get_fair_model_metrics(conf_matrix, labels_in_train)
        io.save_metrics(dest_path=dest_path,
                        file_name=enums.Experiment.INFERENCE_FAIR_METRICS,
                        metrics=fair_metrics)