예제 #1
0
def test_with_sparse_labels():
    y_true = np.array([4, 4, 3, 4], dtype=np.int32)
    y_pred = np.array([4, 4, 1, 2], dtype=np.int32)

    obj = CohenKappa(num_classes=5, sparse_labels=True)

    obj.update_state(y_true, y_pred)
    np.testing.assert_allclose(0.19999999, obj.result())
예제 #2
0
def test_cohen_kappa_single_batch():
    # Test for issue #1962
    obj = CohenKappa(5, regression=True, sparse_labels=True)

    # Test single batch update
    obj.update_state(tf.ones(1), tf.zeros(1))

    np.testing.assert_allclose(0, obj.result().numpy())
예제 #3
0
def initialize_vars():
    kp_obj1 = CohenKappa(num_classes=5, sparse_labels=True)
    kp_obj2 = CohenKappa(num_classes=5, sparse_labels=True, weightage="linear")
    kp_obj3 = CohenKappa(num_classes=5,
                         sparse_labels=True,
                         weightage="quadratic")

    return kp_obj1, kp_obj2, kp_obj3
예제 #4
0
    def initialize_vars(self):
        kp_obj1 = CohenKappa(num_classes=5)
        kp_obj2 = CohenKappa(num_classes=5, weightage="linear")
        kp_obj3 = CohenKappa(num_classes=5, weightage="quadratic")

        self.evaluate(tf.compat.v1.variables_initializer(kp_obj1.variables))
        self.evaluate(tf.compat.v1.variables_initializer(kp_obj2.variables))
        self.evaluate(tf.compat.v1.variables_initializer(kp_obj3.variables))
        return kp_obj1, kp_obj2, kp_obj3
예제 #5
0
def test_with_ohe_labels():
    y_true = np.array([4, 4, 3, 4], dtype=np.int32)
    y_true = tf.keras.utils.to_categorical(y_true, num_classes=5)
    y_pred = np.array([4, 4, 1, 2], dtype=np.int32)

    obj = CohenKappa(num_classes=5, sparse_labels=False)

    obj.update_state(y_true, y_pred)
    np.testing.assert_allclose(0.19999999, obj.result().numpy())
예제 #6
0
    def test_large_values(self):
        y_true = [1] * 10000 + [0] * 20000 + [1] * 20000
        y_pred = [0] * 20000 + [1] * 30000

        obj = CohenKappa(num_classes=2)
        self.evaluate(tf.compat.v1.variables_initializer(obj.variables))

        self.evaluate(obj.update_state(y_true, y_pred))
        self.assertAllClose(0.166666666, obj.result())
예제 #7
0
    def test_with_sparse_labels(self):
        y_true = np.array([4, 4, 3, 4], dtype=np.int32)
        y_pred = np.array([4, 4, 1, 2], dtype=np.int32)

        obj = CohenKappa(num_classes=5, sparse_labels=True)
        self.evaluate(tf.compat.v1.variables_initializer(obj.variables))

        self.evaluate(obj.update_state(y_true, y_pred))
        self.assertAllClose(0.19999999, obj.result())
예제 #8
0
    def test_config(self):
        kp_obj = CohenKappa(name="cohen_kappa", num_classes=5)
        self.assertEqual(kp_obj.name, "cohen_kappa")
        self.assertEqual(kp_obj.dtype, tf.float32)
        self.assertEqual(kp_obj.num_classes, 5)

        # Check save and restore config
        kb_obj2 = CohenKappa.from_config(kp_obj.get_config())
        self.assertEqual(kb_obj2.name, "cohen_kappa")
        self.assertEqual(kb_obj2.dtype, tf.float32)
        self.assertEqual(kp_obj.num_classes, 5)
예제 #9
0
def test_config():
    kp_obj = CohenKappa(name="cohen_kappa", num_classes=5)
    assert kp_obj.name == "cohen_kappa"
    assert kp_obj.dtype == tf.float32
    assert kp_obj.num_classes == 5

    # Check save and restore config
    kb_obj2 = CohenKappa.from_config(kp_obj.get_config())
    assert kb_obj2.name == "cohen_kappa"
    assert kb_obj2.dtype == tf.float32
    assert kp_obj.num_classes == 5
예제 #10
0
def test_large_values():
    y_true = [1] * 10000 + [0] * 20000 + [1] * 20000
    y_pred = [0] * 20000 + [1] * 30000

    y_true = tf.convert_to_tensor(y_true)
    y_pred = tf.convert_to_tensor(y_pred)

    obj = CohenKappa(num_classes=2)

    obj.update_state(y_true, y_pred)
    np.testing.assert_allclose(0.166666666, obj.result(), 1e-6, 1e-6)
예제 #11
0
def test_cohen_kappa_serialization():
    actuals = np.array([4, 4, 3, 3, 2, 2, 1, 1], dtype=np.int32)
    preds = np.array([1, 2, 4, 1, 3, 3, 4, 4], dtype=np.int32)
    weights = np.array([1, 1, 2, 5, 10, 2, 3, 3], dtype=np.int32)

    ck = CohenKappa(num_classes=5, sparse_labels=True, weightage="quadratic")
    check_metric_serialization(ck, actuals, preds, weights)
예제 #12
0
def test_keras_binary_clasasification_model():
    kp = CohenKappa(num_classes=2)
    inputs = tf.keras.layers.Input(shape=(10, ))
    outputs = tf.keras.layers.Dense(1, activation="sigmoid")(inputs)
    model = tf.keras.models.Model(inputs, outputs)
    model.compile(optimizer="sgd", loss="binary_crossentropy", metrics=[kp])

    x = np.random.rand(1000, 10).astype(np.float32)
    y = np.random.randint(2, size=(1000, 1)).astype(np.float32)

    model.fit(x, y, epochs=1, verbose=0, batch_size=32)
예제 #13
0
    def test_keras_multiclass_reg_model(self):
        kp = CohenKappa(num_classes=5, regression=True, sparse_labels=True)
        inputs = tf.keras.layers.Input(shape=(10, ))
        outputs = tf.keras.layers.Dense(1)(inputs)
        model = tf.keras.models.Model(inputs, outputs)
        model.compile(optimizer="sgd", loss="mse", metrics=[kp])

        x = np.random.rand(1000, 10).astype(np.float32)
        y = np.random.randint(5, size=(1000, )).astype(np.float32)

        model.fit(x, y, epochs=1, verbose=0, batch_size=32)
예제 #14
0
def test_keras_multiclass_classification_model():
    kp = CohenKappa(num_classes=5)
    inputs = tf.keras.layers.Input(shape=(10, ))
    outputs = tf.keras.layers.Dense(5, activation="softmax")(inputs)
    model = tf.keras.models.Model(inputs, outputs)
    model.compile(optimizer="sgd",
                  loss="categorical_crossentropy",
                  metrics=[kp])

    x = np.random.rand(1000, 10).astype(np.float32)
    y = np.random.randint(5, size=(1000, )).astype(np.float32)
    y = tf.keras.utils.to_categorical(y, num_classes=5)

    model.fit(x, y, epochs=1, verbose=0, batch_size=32)
예제 #15
0
                       restore_best_weights=True)
    return es


# In[12]:

# Constant
EPOCH = 50
BATCH_SIZE = 2048
VERBOSE = 0

# In[13]:

METRICS = [
    SparseCategoricalAccuracy(name='accuracy'),
    CohenKappa(name='kappa', num_classes=5, sparse_labels=True),
    F1Score(name='f1_micro', num_classes=5, average="micro", threshold=0.5),
]


def create_mlp():
    MLP = Sequential([
        Dense(
            10,
            activation='relu',
            input_dim=X_train.shape[1],
        ),
        Dropout(0.5),
        Dense(5, activation='softmax')
    ])
    MLP.compile(optimizer='adam',
예제 #16
0
    def __init__(self, **kwargs):
        """
            Input:
                translation_spec - dict with keys 'f_X', 'f_Y'.
                                   Values are passed as kwargs to the
                                   respective ImageTranslationNetwork's
                cycle_lambda=2 - float, loss weight
                cross_lambda=1 - float, loss weight
                l2_lambda=1e-3 - float, loss weight
                learning_rate=1e-5 - float, initial learning rate for
                                     ExponentialDecay
                clipnorm=None - gradient norm clip value, passed to
                                tf.clip_by_global_norm if not None
                logdir=None - path to log directory. If provided, tensorboard
                              logging of training and evaluation is set up at
                              'logdir/'
        """
        learning_rate = kwargs.get("learning_rate", 1e-5)
        lr_all = ExponentialDecay(
            learning_rate, decay_steps=10000, decay_rate=0.96, staircase=True
        )
        self._optimizer_all = tf.keras.optimizers.Adam(lr_all)
        lr_k = ExponentialDecay(
            learning_rate, decay_steps=10000, decay_rate=0.9, staircase=True
        )
        self._optimizer_k = tf.keras.optimizers.Adam(lr_k)
        self.clipnorm = kwargs.get("clipnorm", None)

        # To keep a history for a specific training_metrics,
        # add `self.metrics_history[name] = []` in subclass __init__
        self.train_metrics = {}
        self.difference_img_metrics = {"AUC": tf.keras.metrics.AUC()}
        self.change_map_metrics = {
            "ACC": tf.keras.metrics.Accuracy(),
            "cohens kappa": CohenKappa(num_classes=2),
            # 'F1': tfa.metrics.F1Score(num_classes=2, average=None)
        }
        assert not set(self.difference_img_metrics) & set(self.change_map_metrics)
        # If the metric dictionaries shares keys, the history will not work
        self.metrics_history = {
            **{key: [] for key in self.change_map_metrics.keys()},
            **{key: [] for key in self.difference_img_metrics.keys()},
        }

        self.timestamp = datetime.now().strftime("%Y%m%d-%H%M%S")
        self.channels = {"x": kwargs.get("channel_x"), "y": kwargs.get("channel_y")}

        # Flag used in image_to_tensorboard decorator
        self._save_images = tf.Variable(False, trainable=False)

        logdir = kwargs.get("logdir", None)
        if logdir is not None:
            self.log_path = logdir
            self.tb_writer = tf.summary.create_file_writer(self.log_path)
            self._image_dir = tf.constant(os.path.join(self.log_path, "images"))
        else:
            self.tb_writer = tf.summary.create_noop_writer()

        self.evaluation_frequency = tf.constant(
            kwargs.get("evaluation_frequency", 1), dtype=tf.int64
        )
        self.epoch = tf.Variable(0, dtype=tf.int64)
예제 #17
0
dataset_train, dataset_val = load_datasets(DATA_PATH,
                                           batch_size=BATCH_SZ,
                                           buffer_size=1000,
                                           val_size=128)
input_shape = [512, 512, 6]
model = Nest_Net2(input_shape)
print('Dataset spec')
print(dataset_train.element_spec)

precision = Precision()
recall = Recall()
#accuracy = BinaryAccuracy()
#f1_score = F1Score(num_classes=2, threshold=0.5)
#f1_score = F1_score
kappa = CohenKappa(num_classes=2)
auc = AUC(num_thresholds=20)
iou = MeanIoU(num_classes=2)
# use LR?

#model.compile(optimizer='adam',
#                       loss=weighted_bce_dice_loss, metrics=['accuracy', recall, precision, iou])
#model.compile(optimizer='adam', loss=BinaryCrossentropy(), metrics=[accuracy, recall, precision])
model_checkpoint_callback = ModelCheckpoint(filepath=CHECKPOINT_PATH,
                                            monitor='val_loss')
early_stopping = EarlyStopping(patience=10)
now = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
training_dir = os.path.join(MODEL_PATH, f'training_{MODEL_NAME}_{now}')
os.mkdir(training_dir)
csv_file = os.path.join(training_dir, 'training_log.csv')
csv_logger = CSVLogger(csv_file)