def calc_loss(self, output, label):
     '''
     Calculates the loss and loss gradient with respect to the input of the
     output layer.
     '''
     if self.loss_fun == 'cross_entropy' and self.act_funs[-1] == 'softmax':
         loss = fun.cross_entropy(output, label)
         loss_grad = fun.cross_entropy_prime(output, label)
     return loss, loss_grad
Exemple #2
0
def load_data(linear, dataset):
    train_dataset = functions.create_io_arrays(
        dl.load_from_csv(train_files[dataset], 2), linear)
    test_dataset = functions.create_io_arrays(
        dl.load_from_csv(test_files[dataset], 2), linear)

    weights = [0] * len(train_dataset[0][0])

    train_error = []
    test_error = []

    # Does 1000 epochs of training.
    for i in range(0, 1000):
        weights = functions.train_function(weights, train_dataset[0],
                                           train_dataset[1], 0.1)
        train_error.append(
            functions.cross_entropy(weights, train_dataset[0],
                                    train_dataset[1]))
        test_error.append(
            functions.cross_entropy(weights, test_dataset[0], test_dataset[1]))
    return [train_dataset, test_dataset, weights, train_error, test_error]
Exemple #3
0
def cv_confidence_intervals(base_classifier,
                            x_train,
                            y_train,
                            x_test,
                            y_test,
                            cv=2,
                            score_type=None):
    folds = StratifiedKFold(y_train, n_folds=cv, shuffle=True)
    intervals = None
    for i, (train, cali) in enumerate(folds):
        if i == 0:
            x_t = x_train[train]
            y_t = y_train[train]
            x_c = x_train[cali]
            y_c = y_train[cali]
            classifier = clone(base_classifier)
            classifier.fit(x_t, y_t)
            ccv = calibrate(classifier,
                            x_c,
                            y_c,
                            method=None,
                            score_type=score_type)

            scores = ccv.predict_proba(x_c)[:, 1]
            scores_test = ccv.predict_proba(x_test)[:, 1]
            ll_before = cross_entropy(scores_test, y_test)
            brier_before = brier_score(scores_test, y_test)

            calibrator = BetaCalibration(parameters="abm").fit(scores, y_c)

            ll_after = cross_entropy(calibrator.predict(scores_test), y_test)
            brier_after = brier_score(calibrator.predict(scores_test), y_test)

            original_map = calibrator.calibrator_.map_
            intervals = beta_test(original_map,
                                  test_type="adev",
                                  scores=scores)
            intervals["ll_diff"] = ll_after - ll_before
            intervals["bs_diff"] = brier_after - brier_before
    return intervals
Exemple #4
0
def cv_calibration(base_classifier,
                   methods,
                   x_train,
                   y_train,
                   x_test,
                   y_test,
                   cv=3,
                   score_type=None,
                   verbose=False):
    folds = StratifiedKFold(y_train, n_folds=cv, shuffle=True)
    mean_probas = {method: np.zeros(np.alen(y_test)) for method in methods}
    classifiers = {method: [] for method in methods}
    for i, (train, cali) in enumerate(folds):
        if i < cv:
            x_t = x_train[train]
            y_t = y_train[train]
            x_c = x_train[cali]
            y_c = y_train[cali]
            classifier = clone(base_classifier)
            classifier.fit(x_t, y_t)
            for method in methods:
                if verbose:
                    print("Calibrating with " +
                          'none' if method is None else method)
                ccv = calibrate(classifier,
                                x_c,
                                y_c,
                                method=method,
                                score_type=score_type)
                mean_probas[method] += ccv.predict_proba(x_test)[:, 1] / cv
                classifiers[method].append(ccv)
    losses = {
        method: cross_entropy(mean_probas[method], y_test)
        for method in methods
    }
    accs = {
        method: np.mean((mean_probas[method] >= 0.5) == y_test)
        for method in methods
    }
    briers = {
        method: brier_score(mean_probas[method], y_test)
        for method in methods
    }
    return accs, losses, briers, mean_probas, classifiers
Exemple #5
0
 def forward(self, X, target):
     self.input = X
     self.target = target
     self.softmax_out = softmax(self.input)
     self.output = cross_entropy(self.softmax_out, self.target)
     return self.output
Exemple #6
0
    def forward(self, x, t):
        self.t = t
        self.y = fun.softmax(x)
        self.loss = fun.cross_entropy(self.y, self.t)

        return self.loss
Exemple #7
0
def cv_calibration(base_classifier,
                   methods,
                   x_train,
                   y_train,
                   x_test,
                   y_test,
                   cv=3,
                   score_type=None,
                   model_type='map-only',
                   verbose=False):
    folds = StratifiedKFold(y_train, n_folds=cv, shuffle=True)
    mean_probas = {method: np.zeros(np.alen(y_test)) for method in methods}
    classifiers = {method: [] for method in methods}
    main_classifier = clone(base_classifier)
    rejected_count = 0
    if model_type == 'map-only':
        main_classifier.fit(x_train, y_train)
    for i, (train, cali) in enumerate(folds):
        if i < cv:
            x_t = x_train[train]
            y_t = y_train[train]
            x_c = x_train[cali]
            y_c = y_train[cali]
            classifier = clone(base_classifier)
            classifier.fit(x_t, y_t)
            for method in methods:
                if verbose:
                    print("Calibrating with " +
                          'none' if method is None else method)
                ccv = calibrate(classifier,
                                x_c,
                                y_c,
                                method=method,
                                score_type=score_type)
                if method in ["beta_test_strict", "beta_test_relaxed"]:
                    test = beta_test(ccv.calibrator.calibrator_.map_,
                                     test_type="adev",
                                     scores=ccv._preproc(x_c))
                    if test["p-value"] < 0.05:
                        rejected_count += 1
                if model_type == 'map-only':
                    ccv.set_base_estimator(main_classifier,
                                           score_type=score_type)
                mean_probas[method] += ccv.predict_proba(x_test)[:, 1] / cv
                classifiers[method].append(ccv)
    if "beta_test_strict" in methods and rejected_count < cv:
        mean_probas["beta_test_strict"] = 0
        for classifier in classifiers["beta_test_strict"]:
            classifier.calibrator = _DummyCalibration()
            mean_probas["beta_test_strict"] += classifier.predict_proba(
                x_test)[:, 1] / cv
    if "beta_test_relaxed" in methods and rejected_count == 0:
        mean_probas["beta_test_relaxed"] = 0
        for classifier in classifiers["beta_test_relaxed"]:
            classifier.calibrator = _DummyCalibration()
            mean_probas["beta_test_relaxed"] += classifier.predict_proba(
                x_test)[:, 1] / cv
    losses = {
        method: cross_entropy(mean_probas[method], y_test)
        for method in methods
    }
    accs = {
        method: np.mean((mean_probas[method] >= 0.5) == y_test)
        for method in methods
    }
    briers = {
        method: brier_score(mean_probas[method], y_test)
        for method in methods
    }
    return accs, losses, briers, mean_probas, classifiers