Esempio n. 1
0
    def on_epoch_end(self, epoch, logs=None):
        """
        Performs the following operations at the end of each training epoch.

        :param epoch: Current epoch
        :type epoch: ``int``
        :param logs: Current logs
        :type logs: ``dict``
        """
        if logs is None:
            logs = {}
        y_true, y_pred = self._compute_y()

        cur_lauc = Metrics.compute_lauc(y_true, y_pred, self.fct)
        cur_log_loss = logs["val_loss"]

        if cur_lauc > self.lauc:
            self.model.save(self.best_lauc_model_fpath)
            self.lauc = cur_lauc
            self.best_lauc_epoch = epoch

        if cur_log_loss < self.log_loss:
            self.model.save(self.best_log_loss_model_fpath)
            self.log_loss = cur_log_loss
            self.best_log_loss_epoch = epoch

        if self.verbose >= 1:
            self.logger.info("Epoch {}".format(epoch))
            self.logger.info("")
            self.logger.info("{0:10}: {1:.6f}".format("Train Loss", logs["loss"]))
            self.logger.info("{0:10}: {1:.6f}".format("Train Acc", logs["accuracy"]))
            self.logger.info("{0:10}: {1:.6f}".format("Val Loss", logs["val_loss"]))
            self.logger.info("{0:10}: {1:.6f}".format("Val Acc", logs["val_accuracy"]))
            self.logger.info("{0:10}: {1:.6f}".format("Val LAUC", cur_lauc))
            self.logger.info("{0:10}: {1:.6f}".format("Time (sec)", time.time()-self.epoch_time_start))
            self.logger.info("{0:10}: {1:.6f} at Epoch: {2}".format("Best LAUC", self.lauc, self.best_lauc_epoch))
            self.logger.info("{0:10}: {1:.6f} at Epoch: {2}".format("Best LOSS", self.log_loss, self.best_log_loss_epoch))

        self.training_history["Epoch"].append(epoch)
        self.training_history["Train Loss"].append(logs["loss"])
        self.training_history["Train Acc"].append(logs["accuracy"])
        self.training_history["Val Loss"].append(logs["val_loss"])
        self.training_history["Val Acc"].append(logs["val_accuracy"])
        self.training_history["Val LAUC"].append(cur_lauc)
Esempio n. 2
0
    def compute_metrics(self, target, predictions, fct_vals=None):
        """
        Compute metrics.
        :param target: Array of target values
        :type target: ``np.ndarray``
        :param predictions: Array of predicted values
        :type predictions: ``np.ndarray``
        :param fct_vals: Fractions of true positives to be included in auc calculation
        :type fct_vals: ``list``
        :returns: An instance of FraudDetection mlp
        :rtype: ``core_scripts.nn.keras_tools.MlpHelper``
        """

        if fct_vals is None:
            fct_vals = [
                0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1.0
            ]
        if self.multi_class_flag:
            accuracy = accuracy_score(target, np.argmax(predictions, axis=1))

            auc = []
            precision = []
            recall = []
            f1 = []
            target = to_categorical(target)
            for i in range(self.num_classes):
                auc.append(roc_auc_score(target[:, i], predictions[:, i]))
                precision.append(
                    precision_score(target[:, i], np.rint(predictions[:, i])))
                recall.append(
                    recall_score(target[:, i], np.rint(predictions[:, i])))
                f1.append(f1_score(target[:, i], np.rint(predictions[:, i])))
        else:
            accuracy = accuracy_score(target, np.rint(predictions))
            auc = roc_auc_score(target, predictions)
            precision = precision_score(target, np.rint(predictions))
            recall = recall_score(target, np.rint(predictions))
            f1 = f1_score(target, np.rint(predictions))

        if self.multi_class_flag:
            laucs = []
            for i in range(self.num_classes):
                laucs.append(
                    dict([(fct,
                           Metrics.compute_lauc(target[:, i],
                                                predictions[:, i], fct))
                          for fct in fct_vals]))
        else:
            laucs = dict([(fct, Metrics.compute_lauc(target, predictions, fct))
                          for fct in fct_vals])

        self.metrics = {
            "accuracy": accuracy,
            "auc": auc,
            "precision": precision,
            "recall": recall,
            "f1_score": f1,
            "laucs": laucs
        }

        return self