예제 #1
0
    def get_metrics(y, y_pred):

        cnf_matrix = confusion_matrix(y, y_pred)

        false_positive = cnf_matrix.sum(
            axis=0) - np.diag(cnf_matrix).astype(float)
        false_negative = cnf_matrix.sum(
            axis=1) - np.diag(cnf_matrix).astype(float)
        true_positive = np.diag(cnf_matrix).astype(float)
        true_negative = (
            cnf_matrix.sum() -
            (false_positive + false_negative + true_positive)).astype(float)

        y = to_categorical(y, num_classes=5)
        y_pred = to_categorical(y_pred, num_classes=5)

        auc = AUC()
        _ = auc.update_state(y, y_pred)
        acc = CategoricalAccuracy()
        _ = acc.update_state(y, y_pred)

        return {
            'accuracy': acc.result().numpy(),
            'auc': auc.result().numpy(),
            'sensitivity': true_positive / (true_positive + false_negative),
            'specificity': true_negative / (true_negative + false_positive)
        }
예제 #2
0
파일: test_metrics.py 프로젝트: NEGU93/cvnn
def test_with_tf():
    classes = 3
    y_true = tf.cast(tf.random.uniform(shape=(34, 54, 12), maxval=classes),
                     dtype=tf.int32)
    y_pred = tf.cast(tf.random.uniform(shape=y_true.shape, maxval=classes),
                     dtype=tf.int32)
    y_pred_one_hot = tf.one_hot(y_pred, depth=classes)
    y_true_one_hot = tf.one_hot(y_true, depth=classes)
    tf_metric = CategoricalAccuracy()
    tf_metric.update_state(y_true_one_hot, y_pred_one_hot)
    own_metric = ComplexCategoricalAccuracy()
    own_metric.update_state(y_true_one_hot, y_pred_one_hot)
    # set_trace()
    assert own_metric.result().numpy() == tf_metric.result().numpy()
    y_true = np.array([
        [1., 0., 0., 0.],  # 1
        [1., 0., 0., 0.],  # 1
        [1., 0., 0., 0.],  # 1
        [1., 0., 0., 0.],  # 1
        [
            0., 0., 0., 0.
        ],  # This shows tf does not ignore cases with [0. 0. 0. 0.] (unlabeled)
        [0., 0., 1., 0.],  # 3
        [0., 0., 1., 0.],  # 3
        [0., 0., 0., 0.],  # 3
        [0., 0., 1., 0.]  # 3
    ])
    y_pred = np.array([
        [1., 0., 0., 0.],  # 1
        [1., 0., 0., 0.],  # 1
        [1., 0., 0., 0.],  # 1
        [1., 0., 0., 0.],  # 1
        [1., 0., 0., 0.],  # 1
        [0., 0., 0., 1.],  # 4
        [0., 0., 0., 1.],  # 4
        [0., 0., 0., 1.],  # 4
        [0., 0., 0., 1.]  # 4
    ])
    tf_metric = CategoricalAccuracy()
    tf_metric.update_state(y_true, y_pred)
    own_metric = ComplexCategoricalAccuracy()
    own_metric.update_state(y_true, y_pred,
                            ignore_unlabeled=False)  # to make it as tf
    assert own_metric.result().numpy() == tf_metric.result().numpy()
    y_true = np.array([[1., 0.], [1., 0.], [1., 0.], [1., 0.], [1., 0.],
                       [1., 0.], [1., 0.], [1., 0.], [1., 0.], [0., 1.]])
    y_pred = np.array([[1., 0.], [1., 0.], [1., 0.], [1., 0.], [1., 0.],
                       [1., 0.], [1., 0.], [1., 0.], [1., 0.], [1., 0.]])
    tf_metric = CategoricalAccuracy()
    tf_metric.update_state(y_true, y_pred)
    own_metric = ComplexCategoricalAccuracy()
    own_metric.update_state(y_true, y_pred,
                            ignore_unlabeled=False)  # to make it as tf
    assert own_metric.result().numpy() == tf_metric.result().numpy()
class ExperimentClassify:
    def __init__(self, model, optimizer, exptConfig):

        self.now = dt.now().strftime('%Y-%m-%d--%H-%M-%S')
        self.exptConfig = exptConfig
        self.model = model
        self.optimizer = optimizer
        self.loss = CategoricalCrossentropy(
            from_logits=exptConfig['LossParams']['fromLogits'])

        # ------------ metrics ----------------------
        self.catAccTest = CategoricalAccuracy()
        self.catAccTrain = CategoricalAccuracy()

        self.exptFolder = os.path.join(
            exptConfig['OtherParams']['exptBaseFolder'], self.now,
            exptConfig['ModelParams']['name'])
        self.modelFolder = os.path.join(self.exptFolder, 'model')
        self.chkptFolder = os.path.join(self.exptFolder, 'checkpoints')

        os.makedirs(self.modelFolder, exist_ok=True)
        os.makedirs(self.chkptFolder, exist_ok=True)

        self.stepNumber = 0
        self.evalNumber = 0
        self.epoch = 0

        # All the logs go here ...
        # ------------------------
        self.createMetaData()

        self.logDir = os.path.join(self.exptFolder, 'logs')
        self.scalarWriter = tf.summary.create_file_writer(
            os.path.join(self.logDir, 'scalars', 'metrics'))
        self.graphWriter = tf.summary.create_file_writer(
            os.path.join(self.logDir, 'graph'))

        return

    def step(self, x, y):

        with tf.GradientTape() as tape:

            yHat = self.model.call(x)
            loss = self.loss(y, yHat)

            grads = tape.gradient(loss, self.model.trainable_weights)
            self.optimizer.apply_gradients(
                zip(grads, self.model.trainable_weights))

        self.catAccTrain.update_state(y, yHat)

        with self.scalarWriter.as_default():
            tf.summary.scalar('training loss', data=loss, step=self.stepNumber)
            tf.summary.scalar('training accuracy',
                              data=self.catAccTrain.result().numpy(),
                              step=self.stepNumber)

        self.stepNumber += 1

        return loss.numpy()

    def eval(self, x, y):

        yHat = self.model.predict(x)
        self.catAccTest.update_state(y, yHat)

        with self.scalarWriter.as_default():
            tf.summary.scalar('testing accuracy',
                              data=self.catAccTest.result().numpy(),
                              step=self.evalNumber)

        self.evalNumber += 1

        return self.catAccTest.result().numpy()

    def createMetaData(self):

        if not os.path.exists(self.exptFolder):
            os.makedirs(self.exptFolder)

        with open(os.path.join(self.exptFolder, 'config.json'), 'w') as fOut:
            json.dump(self.exptConfig, fOut)

        return

    def createModelSummary(self, x):
        tf.summary.trace_on(graph=True)
        self.model.predict(x)
        with self.graphWriter.as_default():
            tf.summary.trace_export('name', step=0)
        tf.summary.trace_off()

    def saveModel(self):

        try:
            self.model.save(self.modelFolder)
        except Exception as e:
            print(f'Unable to save the model: {e}')

        return

    def checkPoint(self):
        try:
            epoch = self.epoch
            step = self.stepNumber
            self.model.save_weights(
                os.path.join(self.chkptFolder, f'{epoch:07d}-{step:07d}'))
        except Exception as e:
            print(f'Unable to checkpoint: {self.stepNumber}: {e}')
        return