Exemplo n.º 1
0
    def _calculate_metrics(self, run_states):
        loss_sum = run_examples = 0
        run_step = run_time_used = 0
        all_labels = []
        all_infers = []

        for run_state in run_states:
            run_examples += run_state.run_examples
            run_step += run_state.run_step
            loss_sum += np.mean(
                run_state.run_results[-1]) * run_state.run_examples

            left_scores, right_scores, labels = run_state.run_results[:-1]
            for index in range(left_scores.shape[0]):
                if left_scores[index] > right_scores[index]:
                    prediction = 1
                else:
                    prediction = 0
                all_infers.append(prediction)
                all_labels.append(int(labels[index][0]))

        run_time_used = time.time() - run_states[0].run_time_begin
        avg_loss = loss_sum / run_examples
        run_speed = run_step / run_time_used

        # The first key will be used as main metrics to update the best model
        scores = OrderedDict()
        precision, recall, f1 = calculate_f1_np(all_infers, all_labels)
        acc = simple_accuracy(all_infers, all_labels)
        for metric in self.metrics_choices:
            if metric == "precision":
                scores["precision"] = precision
            elif metric == "recall":
                scores["recall"] = recall
            elif metric == "f1":
                scores["f1"] = f1
            elif metric == "acc":
                scores["acc"] = acc
            else:
                raise ValueError(
                    "Unknown metric: %s! The chosen metrics must be acc, f1, presicion or recall."
                    % metric)

        return scores, avg_loss, run_speed
Exemplo n.º 2
0
    def _calculate_metrics(self, run_states):
        loss_sum = run_examples = 0
        run_step = run_time_used = 0
        all_labels = []
        all_infers = []

        for run_state in run_states:
            run_examples += run_state.run_examples
            run_step += run_state.run_step
            loss_sum += np.mean(
                run_state.run_results[-1]) * run_state.run_examples

            scores, labels = run_state.run_results[:-1]

            predictions = np.argmax(scores, axis=1)

            all_infers += [int(pre) for pre in predictions]
            true_labels = [int(label[0]) for label in labels]
            all_labels += true_labels

        run_time_used = time.time() - run_states[0].run_time_begin
        avg_loss = loss_sum / run_examples
        run_speed = run_step / run_time_used

        # The first key will be used as main metrics to update the best model
        scores = OrderedDict()
        precision, recall, f1 = calculate_f1_np(all_infers, all_labels)
        acc = simple_accuracy(all_infers, all_labels)
        for metric in self.metrics_choices:
            if metric == "precision":
                scores["precision"] = precision
            elif metric == "recall":
                scores["recall"] = recall
            elif metric == "f1":
                scores["f1"] = f1
            elif metric == "acc":
                scores["acc"] = acc
            else:
                raise ValueError(
                    "Unknown metric: %s! The chosen metrics must be acc, f1, presicion or recall."
                    % self.metrics_choice)

        return scores, avg_loss, run_speed
Exemplo n.º 3
0
    def _calculate_metrics(self, run_states):
        loss_sum = acc_sum = run_examples = 0
        run_step = run_time_used = 0
        all_labels = np.array([])
        all_infers = np.array([])

        for run_state in run_states:
            run_examples += run_state.run_examples
            run_step += run_state.run_step
            loss_sum += np.mean(
                run_state.run_results[-1]) * run_state.run_examples
            acc_sum += np.mean(
                run_state.run_results[2]) * run_state.run_examples
            np_labels = run_state.run_results[0]
            np_infers = run_state.run_results[1]
            all_labels = np.hstack((all_labels, np_labels.reshape([-1])))
            all_infers = np.hstack((all_infers, np_infers.reshape([-1])))

        run_time_used = time.time() - run_states[0].run_time_begin
        avg_loss = loss_sum / run_examples
        run_speed = run_step / run_time_used

        # The first key will be used as main metrics to update the best model
        scores = OrderedDict()
        precision, recall, f1 = calculate_f1_np(all_infers, all_labels)
        matthews = matthews_corrcoef(all_infers, all_labels)
        for metric in self.metrics_choices:
            if metric == "precision":
                scores["precision"] = precision
            elif metric == "recall":
                scores["recall"] = recall
            elif metric == "f1":
                scores["f1"] = f1
            elif metric == "acc":
                scores["acc"] = acc_sum / run_examples
            elif metric == "matthews":
                scores["matthews"] = matthews
            else:
                raise ValueError(
                    "Unknown metric: %s! The chosen metrics must be acc, f1, presicion or recall."
                    % metric)

        return scores, avg_loss, run_speed
Exemplo n.º 4
0
    def _calculate_metrics(self, run_states):
        loss_sum = acc_sum = run_examples = 0
        run_step = run_time_used = 0
        all_labels = np.array([])
        all_infers = np.array([])

        for run_state in run_states:
            run_examples += run_state.run_examples
            run_step += run_state.run_step
            loss_sum += np.mean(
                run_state.run_results[-1]) * run_state.run_examples
            acc_sum += np.mean(
                run_state.run_results[2]) * run_state.run_examples
            np_labels = run_state.run_results[0]
            np_infers = run_state.run_results[1]
            all_labels = np.hstack((all_labels, np_labels.reshape([-1])))
            all_infers = np.hstack((all_infers, np_infers.reshape([-1])))

        run_time_used = time.time() - run_states[0].run_time_begin
        avg_loss = loss_sum / run_examples
        run_speed = run_step / run_time_used

        # The first key will be used as main metrics to update the best model
        scores = OrderedDict()

        for metric in self.metrics_choices:
            if metric == "acc":
                avg_acc = acc_sum / run_examples
                scores["acc"] = avg_acc
            elif metric == "f1":
                f1 = calculate_f1_np(all_infers, all_labels)
                scores["f1"] = f1
            elif metric == "matthews":
                matthews = matthews_corrcoef(all_infers, all_labels)
                scores["matthews"] = matthews
            else:
                raise ValueError("Not Support Metric: \"%s\"" % metric)

        return scores, avg_loss, run_speed