def macro_mae(y_test, y_pred, classes):
    cat_to_class_mapping = {v: int(k) for k, v in
                            get_labels_to_categories_map(classes).items()}
    _y_test = [cat_to_class_mapping[y] for y in y_test]
    _y_pred = [cat_to_class_mapping[y] for y in y_pred]

    c = Counter(_y_pred)
    print(c)

    classes = set(_y_test)
    micro_m = {}
    for c in classes:
        class_sentences = [(t, p) for t, p in zip(_y_test, _y_pred) if t == c]
        yt = [y[0] for y in class_sentences]
        yp = [y[1] for y in class_sentences]
        micro_m[c] = mean_absolute_error(yt, yp)

    # pprint.pprint(sorted(micro_m.items(), key=lambda x: x[1], reverse=True))

    return numpy.mean(list(micro_m.values()))
Exemple #2
0
                      rnn_cells=64,
                      attention="context",
                      clipnorm=.1,
                      classes=len(classes))

print(nn_model.summary())

############################################################################
# CALLBACKS
############################################################################

# define metrics and class weights
if TASK == "BD":
    cat_to_class_mapping = {
        v: k
        for k, v in get_labels_to_categories_map(classes).items()
    }
    metrics = {
        "accuracy": (lambda y_test, y_pred: accuracy_score(y_test, y_pred)),
        "recall":
        (lambda y_test, y_pred: recall_score(y_test, y_pred, average='macro')),
        "precision": (lambda y_test, y_pred: precision_score(
            y_test, y_pred, average='macro'))
    }
else:
    cat_to_class_mapping = {
        v: int(k)
        for k, v in get_labels_to_categories_map(classes).items()
    }

    def macro_mae(y_test, y_pred):
    "f1_weighted": (lambda y_test, y_pred:
                    f1_score(y_test, y_pred, average='weighted',
                       labels=[class_to_cat_mapping['positive'],
                                class_to_cat_mapping['neutral'],
                                class_to_cat_mapping['negative']])),
    "M_recall": (
        lambda y_test, y_pred: recall_score(y_test, y_pred, average='macro')),
    "M_precision": (
        lambda y_test, y_pred: precision_score(y_test, y_pred,
                                               average='macro')),
    "accuracy": (
        lambda y_test, y_pred: accuracy_score(y_test, y_pred))
}

classes = ['positive', 'negative', 'neutral']
class_to_cat_mapping = get_labels_to_categories_map(
    classes)  # {'negative': 0, 'neutral': 1, 'positive': 2}
cat_to_class_mapping = {v: k for k, v in
                        get_labels_to_categories_map(classes).items()}  # {0: 'negative', 1: 'neutral', 2: 'positive'}

_datasets = {}
_datasets["1-train"] = training,
_datasets["2-val"] = validation if not FINAL else testing
if not FINAL:
    _datasets["3-test"] = testing

metrics_callback = MetricsCallback(datasets=_datasets, metrics=metrics)
plotting = PlottingCallback(grid_ranges=(0.7, 1), height=5,
                            plot_name="model_{}_{}_{}".format(COPRPUSNAME, PREPROCESS_TYP, MODEL_FILE_NUMBER))  # benchmarks={"SE17": 0.681},
tensorboard = TensorBoard(log_dir='./logs/{}'.format(COPRPUSNAME))

_callbacks = []
Exemple #4
0
# CALLBACKS
############################################################################
metrics = {
    "f1_pn": (lambda y_test, y_pred:
              f1_score(y_test, y_pred, average='macro',
                       labels=[class_to_cat_mapping['positive'],
                               class_to_cat_mapping['negative']])),
    "M_recall": (
        lambda y_test, y_pred: recall_score(y_test, y_pred, average='macro')),
    "M_precision": (
        lambda y_test, y_pred: precision_score(y_test, y_pred,
                                               average='macro'))
}

classes = ['positive', 'negative', 'neutral']
class_to_cat_mapping = get_labels_to_categories_map(classes)
cat_to_class_mapping = {v: k for k, v in
                        get_labels_to_categories_map(classes).items()}

_datasets = {}
_datasets["1-train"] = training,
_datasets["2-val"] = validation if not FINAL else testing
if not FINAL:
    _datasets["3-test"] = testing

metrics_callback = MetricsCallback(datasets=_datasets, metrics=metrics)
weights = WeightsCallback(parameters=["W"], stats=["raster", "mean", "std"])
# plotting = PlottingCallback(grid_ranges=(0.5, 0.75), height=5, benchmarks={"SE17": 0.67, "goal": 0.71})
plotting = PlottingCallback(grid_ranges=(0.5, 0.75), height=5,
                            benchmarks={"SE17": 0.681})