Esempio n. 1
0
def train_model(model: Model, base_model: Model,
                train_gen: classifier_sequence.ClassifierSequence,
                val_gen: classifier_sequence.ClassifierSequence) -> None:
    """Trains the model on the given data sets."""
    for layer in base_model.layers:
        layer.trainable = False

    opt = optimizers.SGD(learning_rate=0.00001, momentum=0.8, clipnorm=1)
    # opt = optimizers.Adam(learning_rate=0.000001)

    model.compile(optimizer=opt,
                  loss='binary_crossentropy',
                  metrics=[
                      'accuracy',
                      metrics.Recall(),
                      metrics.Precision(),
                      metrics.FalsePositives(),
                      metrics.FalseNegatives()
                  ])

    log_dir = "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
    tensorboard_callback = callbacks.TensorBoard(log_dir=log_dir,
                                                 histogram_freq=1)

    model.fit(train_gen,
              validation_data=val_gen,
              epochs=60,
              callbacks=[tensorboard_callback])

    for layer in base_model.layers:
        layer.trainable = True

    opt = optimizers.SGD(learning_rate=0.00001, momentum=0.8, clipnorm=1)

    model.compile(optimizer=opt,
                  loss='binary_crossentropy',
                  metrics=[
                      'accuracy',
                      metrics.Recall(),
                      metrics.Precision(),
                      metrics.FalsePositives(),
                      metrics.FalseNegatives()
                  ])

    model.fit(train_gen,
              validation_data=val_gen,
              epochs=120,
              initial_epoch=60,
              callbacks=[tensorboard_callback])
Esempio n. 2
0
def test_eval(model, test_df, y_test):
    test_data = BertPreprocessing(
        test_df[["sentence1", "sentence2"]].values.astype("str"),
        y_test,
        batch_size=config.batch_size,
        shuffle=False,
    )

    y_pred = model.predict(test_data)

    size = y_pred.shape[0]
    y_test = y_test[:size, :]

    accuracy = metrics.CategoricalAccuracy()
    accuracy.update_state(y_test, y_pred)

    precision = metrics.Precision()
    precision.update_state(y_test, y_pred)

    recall = metrics.Recall()
    recall.update_state(y_test, y_pred)

    f1 = tfa.metrics.F1Score(num_classes=3, average="macro")
    f1.update_state(y_test, y_pred)

    auc = metrics.AUC()
    auc.update_state(y_test, y_pred)

	print(f"""
	Accuracy: {accuracy.result().numpy()}
	Precision: {precision.result().numpy()}
	Recall: {recall.result().numpy()}
	F1 score: {f1.result().numpy()}
	AUC: {auc.result().numpy()}
	""")
 def confusion_matrix_other_metric(self):
     return [
         metrics.Accuracy(name='acc'),
         metrics.Precision(name='precision'),
         metrics.Recall(name='recall'),
         metrics.AUC(name='auc'),
     ]
Esempio n. 4
0
def evaluate(dataset,
             model,
             loss,
             metrics=[metrics.Recall(), metrics.Precision()]):
    """
    Evaluate keras model.

    Args:
    
        dataset: Tensorflow Dataset object for evaluate the model.

        model: Keras model.

        loss: Loss function.

        metrics: List of tensorflow mertics. Default contains Recall and Precision.

    Returns:
        Dict with keys 'loss_value', 'metric_name_1', 'metric_name_2' and etc. 
    """

    for x, y in dataset:
        loss_value = __calculate_loss(model, x, y, loss=loss,
                                      training=False)['loss']
        for metric in metrics:
            y_pred = predict_classes(model, x, training=False)
            metric.update_state(y_true=y, y_pred=y_pred)
    result = {
        'loss_value': loss_value,
    }
    for metric in metrics:
        result[__get_name(metric)['name']] = metric.result()
    return result
Esempio n. 5
0
 def __init__(self, chip_size):
     self.chip_size = chip_size
     self.metrics = [
         metrics.Precision(top_k=1, name='precision'),
         metrics.Recall(top_k=1, name='recall'),
         CustomMeanIOU(num_classes=6, name='mIOU'),
     ]
Esempio n. 6
0
def main(args):
    # Used for memory error in RTX2070
    physical_devices = tf.config.experimental.list_physical_devices('GPU')
    config = tf.config.experimental.set_memory_growth(physical_devices[0],
                                                      True)

    input_size = (None, None)
    # Load model from JSON file if file path was provided...
    if os.path.exists(args.model):
        try:
            with open(args.model, 'r') as f:
                json = f.read()
            model = model_from_json(json)
            args.model = os.path.splitext(os.path.split(args.model)[-1])[0]
        except JSONDecodeError:
            raise ValueError(
                "JSON decode error found. File path %s exists but could not be decoded; verify if JSON encoding was "
                "performed properly." % args.model)
    # ...Otherwise, create model from this project by using a proper key name
    else:
        model = models_dict[args.model]((input_size[0], input_size[1], 1))
    try:
        # Model name should match with the name of a model from
        # https://www.tensorflow.org/api_docs/python/tf/keras/applications/
        # This assumes you used a model with RGB inputs as the first part of your model,
        # therefore your input data should be preprocessed with the corresponding
        # 'preprocess_input' function
        m = importlib.import_module('tensorflow.keras.applications.%s' %
                                    model.name)
        rgb_preprocessor = getattr(m, "preprocess_input")
    except ModuleNotFoundError:
        rgb_preprocessor = None

    # Load trained weights
    model.load_weights(args.pretrained_weights)

    # Model is compiled to provide the desired metrics
    model.compile(optimizer=Adam(lr=1e-4),
                  loss=custom_losses.bce_dsc_loss(3.0),
                  metrics=[
                      custom_losses.dice_coef,
                      keras_metrics.Precision(),
                      keras_metrics.Recall()
                  ])

    # Here we find to paths to all images from the selected datasets
    paths = data.create_image_paths(args.dataset_names, args.dataset_paths)

    for path in paths[0, :]:

        [im, gt, pred] = data.test_image_from_path(model, path, None,
                                                   rgb_preprocessor)

        name = os.path.split(path)[-1]
        name, extension = os.path.splitext(name)
        extension = ".png"
        if not os.path.exists(args.save_to) and path != '':
            os.makedirs(args.save_to)
        cv2.imwrite(os.path.join(args.save_to, name + extension),
                    255 * np.where(pred > 0.5, 1.0, 0.0))
 def eva_metric(self, y_label, y_class):
     recall = metrics.Recall()
     recall.update_state(y_label, y_class)
     print('Recall result: ', recall.result().numpy())
     pre = metrics.Precision() 
     pre.update_state(y_label, y_class) 
     print('Precision result: ', pre.result().numpy())
Esempio n. 8
0
    def build_model(self,
                    embedding_size=EMBEDDING_SIZE,
                    input_length=MAX_DOCUMENT_LENGTH,
                    link_embedding_size=LINK_EMBEDDING_SIZE,
                    link_input_length=LINK_INPUT_LENGTH):
        he_inputs = keras.Input(shape=(input_length, ), name="hebrew")
        en_inputs = keras.Input(shape=(input_length, ), name="english")
        link_inputs = keras.Input(shape=(link_input_length, ), name="links")

        assert getattr(
            self, 'he_embed_model', None
        ) is not None, "CnnClfEnsemble.load_all_embedding_models() needs to be called before calling build_model()"
        he_embed = self.he_embed_model(he_inputs)
        en_embed = self.en_embed_model(en_inputs)
        link_embed = self.link_embed_model(link_inputs)

        self.model_head = self.get_model_head()
        outputs = self.model_head([he_embed, en_embed, link_embed])

        self.model = keras.Model(inputs=[he_inputs, en_inputs, link_inputs],
                                 outputs=outputs)
        self.model.compile(
            optimizer=optimizers.Adam(),  #learning_rate=0.001), 
            loss=losses.CategoricalCrossentropy(from_logits=False),
            metrics=[
                metrics.CategoricalAccuracy(),
                metrics.Recall(class_id=0),
                metrics.Precision(class_id=0)
            ])
Esempio n. 9
0
def evaluate(dataset,
             model,
             loss,
             metrics=[metrics.Recall(), metrics.Precision()]):
    """
    Evaluate keras model.

    Args:
    
        dataset: Tensorflow Dataset object for evaluate the model.

        model: Keras model.

        loss: Loss function.

        metrics: List of tensorflow mertics. Default contains Recall and Precision.
    """

    _data = api.evaluate(dataset=dataset,
                         model=model,
                         loss=loss,
                         metrics=metrics)
    for name, val in _data.items():
        print('{}: {:.6f}'.format(name, val), end=', ')
    print()
Esempio n. 10
0
    def build(self):
        model = tf.keras.Sequential()
        model.add(layers.Flatten(input_shape=(42, 4)))

        for i in range(self.layers):
            if self.reg:
                model.add(
                    layers.Dense(self.sizes[i],
                                 activation='elu',
                                 kernel_regularizer=regularizers.l2(
                                     self.reg[i])))
            else:
                model.add(layers.Dense(self.sizes[i], activation='elu'))

            if self.dropout:
                model.add(layers.Dropout(self.dropout[i]))

        model.add(layers.Dense(1, activation='sigmoid'))

        model.compile(optimizer=optimizers.Adam(learning_rate=self.lr),
                      loss=losses.BinaryCrossentropy(),
                      metrics=[
                          'binary_accuracy',
                          metrics.TruePositives(name='tp'),
                          metrics.FalseNegatives(name='fn'),
                          metrics.TrueNegatives(name='tn'),
                          metrics.FalsePositives(name='fp'),
                          metrics.Recall(name='recall'),
                          metrics.Precision(name='precision')
                      ])

        return model
Esempio n. 11
0
    def __init__(self, vocab_size, embedding_size, input_length,
                 n_punct_classes) -> None:
        self.vocab_size = vocab_size
        self.embedding_size = embedding_size
        self.input_length = input_length

        inputs = keras.Input((self.input_length, ))
        model_head = self.get_model_head()(inputs)
        has_punct_out = self.get_mlp_model(2, 'has_p')(model_head)
        punct_out = self.get_mlp_model(n_punct_classes, 'p')(model_head)
        # punct_mask = self.get_punct_mask(n_punct_classes, 'p')({'has_p': has_punct_out, 'p_inter': punct_out})
        start_quote_out = self.get_mlp_model(2, 'sq')(model_head)
        end_quote_out = self.get_mlp_model(2, 'eq')(model_head)
        dash_out = self.get_mlp_model(2, 'd')(model_head)
        self.model = keras.Model(inputs=inputs,
                                 outputs={
                                     'has_p': has_punct_out,
                                     'p': punct_out,
                                     'sq': start_quote_out,
                                     'eq': end_quote_out,
                                     'd': dash_out,
                                 })
        my_losses = {
            "has_p": losses.BinaryCrossentropy(from_logits=False),
            "p": losses.CategoricalCrossentropy(from_logits=False),
            "sq": losses.BinaryCrossentropy(from_logits=False),
            "eq": losses.BinaryCrossentropy(from_logits=False),
            "d": losses.BinaryCrossentropy(from_logits=False),
        }
        my_metrics = {
            "has_p":
            [metrics.Recall(class_id=1),
             metrics.Precision(class_id=1)],
            "p": [metrics.Recall(class_id=1),
                  metrics.Precision(class_id=1)],
            "sq": [metrics.Recall(class_id=1),
                   metrics.Precision(class_id=1)],
            "eq": [metrics.Recall(class_id=1),
                   metrics.Precision(class_id=1)],
            "d": [metrics.Recall(class_id=1),
                  metrics.Precision(class_id=1)],
        }
        self.model.compile(
            optimizer=optimizers.Adam(),  #learning_rate=0.001), 
            loss=my_losses,
            metrics=my_metrics)
        self.model.summary()
Esempio n. 12
0
def evaluate(x_test, y_test):
    model = keras.models.load_model(WEIGHTS_PATH)
    model.compile(loss='categorical_crossentropy',
                  metrics=[metrics.CategoricalAccuracy(), metrics.Precision(), metrics.Recall()])
    loss, accuracy, precision, recall = model.evaluate(x_test, y_test, verbose=1)
    F1_Score = 2 * (precision * recall) / (precision + recall)
    print('loss:%.4f accuracy:%.4f precision:%.4f recall:%.4f F1_Score:%.4f'
          % (loss, accuracy, precision, recall, F1_Score))
Esempio n. 13
0
 def getCompiledModel(self, optimizer=None, loss=None, runEagerly=False):
     self.model.compile(
         optimizer='adam' if optimizer is None else optimizer,
         loss='categorical_crossentropy' if loss is None else loss,
         metrics=['accuracy',
                  metrics.Recall(),
                  metrics.Precision()],
         run_eagerly=runEagerly)
Esempio n. 14
0
def train(model, x_train, y_train):
    model.compile(loss='binary_crossentropy',
                  optimizer=keras.optimizers.Adam(lr=LEARNING_RATE),
                  metrics=[metrics.BinaryAccuracy(), metrics.Precision(), metrics.Recall()])
    checkpoint = ModelCheckpoint(NN_ATTACK_WEIGHTS_PATH, monitor='precision', verbose=1, save_best_only=True,
                                 mode='max')
    model.fit(x_train, y_train,
              epochs=EPOCHS,
              batch_size=BATCH_SIZE,
              callbacks=[checkpoint])
Esempio n. 15
0
def test_metric_direction_inference():
    # Test min metrics.
    assert metrics_tracking.infer_metric_direction("MAE") == "min"
    assert (
        metrics_tracking.infer_metric_direction(metrics.binary_crossentropy) == "min"
    )
    assert metrics_tracking.infer_metric_direction(metrics.FalsePositives()) == "min"

    # All losses in keras.losses are considered as 'min'.
    assert metrics_tracking.infer_metric_direction("squared_hinge") == "min"
    assert metrics_tracking.infer_metric_direction(losses.hinge) == "min"
    assert (
        metrics_tracking.infer_metric_direction(losses.CategoricalCrossentropy())
        == "min"
    )

    # Test max metrics.
    assert metrics_tracking.infer_metric_direction("binary_accuracy") == "max"
    assert (
        metrics_tracking.infer_metric_direction(metrics.categorical_accuracy)
        == "max"
    )
    assert metrics_tracking.infer_metric_direction(metrics.Precision()) == "max"

    # Test unknown metrics.
    assert metrics_tracking.infer_metric_direction("my_metric") is None

    def my_metric_fn(x, y):
        return x

    assert metrics_tracking.infer_metric_direction(my_metric_fn) is None

    class MyMetric(metrics.Metric):
        def update_state(self, x, y):
            return 1

        def result(self):
            return 1

    assert metrics_tracking.infer_metric_direction(MyMetric()) is None

    # Test special cases.
    assert metrics_tracking.infer_metric_direction("loss") == "min"
    assert metrics_tracking.infer_metric_direction("acc") == "max"
    assert metrics_tracking.infer_metric_direction("val_acc") == "max"
    assert metrics_tracking.infer_metric_direction("crossentropy") == "min"
    assert metrics_tracking.infer_metric_direction("ce") == "min"
    assert metrics_tracking.infer_metric_direction("weighted_acc") == "max"
    assert metrics_tracking.infer_metric_direction("val_weighted_ce") == "min"
    assert (
        metrics_tracking.infer_metric_direction("weighted_binary_accuracy") == "max"
    )
Esempio n. 16
0
def build_simple_model(dataset='Fashion Mnist',
                       opt='sgd',
                       hidden=None,
                       funcs=None,
                       loss=None,
                       metrics_list=None):
    model = models.Sequential()
    if dataset == 'CIFAR-10':
        model.add(layers.Flatten(input_shape=[32, 32, 3]))
    elif ('Fashion Mnist'):
        model.add(layers.Flatten(input_shape=[28, 28]))
    for i in hidden.keys():
        model.add(layers.Dense(hidden[i], activation=funcs[i].lower()))
    model.add(layers.Dense(10, activation="softmax"))

    loss_dict = {
        'Categorical Crossentropy': 'categorical_crossentropy',
        'Binary Crossentropy': 'binary_crossentropy',
        'Categorical Hinge': 'categorical_hinge',
        'Huber loss': 'huber_loss'
    }
    metrics_dict = {
        'auc':
        metrics.AUC(),
        'recall':
        metrics.Recall(),
        'accuracy':
        metrics.CategoricalAccuracy()
        if loss.startswith('Categorical') else metrics.Accuracy(),
        'precision':
        metrics.Precision(),
        'categorical Hinge':
        metrics.CategoricalHinge(),
        'squared Hinge':
        metrics.SquaredHinge(),
        'Kullback-Leibler divergence':
        metrics.KLDivergence(),
        'mean absolute error':
        metrics.MeanAbsoluteError(),
        'mean squared error':
        metrics.MeanSquaredError()
    }
    if metrics_list is not None and len(metrics_list) > 0:
        metrics_list = [metrics_dict.get(m, m) for m in metrics_list]
    else:
        metrics_list = ['accuracy']

    loss_f = loss_dict.get(loss)

    model.compile(loss=loss_f, optimizer=opt, metrics=metrics_list)
    return model
Esempio n. 17
0
def inference(tfrecords_path, weights_path, wts_root):
    """ Inference function to reproduce original model scores. This
    script can be run as a standalone using python inference.py.
    For more information try: `python inference.py -h`

    Parameters
    ----------
    tfrecords_path: str
        The path to directory containing preprocessed tfrecords.
    weights_path: str
        The path to the combined model weights. A copy of the
        weights can be found here:
        https://gin.g-node.org/shashankbansal56/nondefaced-detector-reproducibility/src/master/pretrained_weights/combined
    wts_root: str
        The path to the root directory of all the model weights.
        A copy of the weights can be found here:
        https://gin.g-node.org/shashankbansal56/nondefaced-detector-reproducibility/src/master/pretrained_weights
    """

    model = CombinedClassifier(input_shape=(128, 128),
                               dropout=0.4,
                               wts_root=wts_root,
                               trainable=False)

    model.load_weights(os.path.abspath(weights_path))
    model.trainable = False

    dataset_test = get_dataset(
        file_pattern=os.path.join(tfrecords_path, "data-test_*"),
        n_classes=2,
        batch_size=16,
        volume_shape=(128, 128, 128),
        plane="combined",
        mode="test",
    )

    METRICS = [
        metrics.BinaryAccuracy(name="accuracy"),
        metrics.Precision(name="precision"),
        metrics.Recall(name="recall"),
        metrics.AUC(name="auc"),
    ]

    model.compile(
        loss=tf.keras.losses.binary_crossentropy,
        optimizer=Adam(learning_rate=1e-3),
        metrics=METRICS,
    )

    model.evaluate(dataset_test)
Esempio n. 18
0
def main(args):
    input_size = (None, None)
    # Load model from JSON file if file path was provided...
    if os.path.exists(args.model):
        try:
            with open(args.model, 'r') as f:
                json = f.read()
            model = model_from_json(json)
            args.model = os.path.splitext(os.path.split(args.model)[-1])[0]
        except JSONDecodeError:
            raise ValueError(
                "JSON decode error found. File path %s exists but could not be decoded; verify if JSON encoding was "
                "performed properly." % args.model)
    # ...Otherwise, create model from this project by using a proper key name
    else:
        model = models_dict[args.model]((input_size[0], input_size[1], 1))
    try:
        # Model name should match with the name of a model from
        # https://www.tensorflow.org/api_docs/python/tf/keras/applications/
        # This assumes you used a model with RGB inputs as the first part of your model,
        # therefore your input data should be preprocessed with the corresponding
        # 'preprocess_input' function
        m = importlib.import_module('tensorflow.keras.applications.%s' % model.name)
        rgb_preprocessor = getattr(m, "preprocess_input")
    except ModuleNotFoundError:
        rgb_preprocessor = None

    # Load trained weights
    model.load_weights(args.pretrained_weights)

    # Model is compiled to provide the desired metrics
    model.compile(optimizer=Adam(lr=1e-4), loss=custom_losses.bce_dsc_loss(3.0),
                  metrics=[custom_losses.dice_coef, keras_metrics.Precision(), keras_metrics.Recall()])

    # Here we find to paths to all images from the selected datasets
    paths = data.create_image_paths(args.dataset_names, args.dataset_paths)

    print("Evaluating the model...")
    data.save_results_on_paths(model, paths, args.save_to)
    metrics = model.evaluate(x=data.validation_image_generator(paths, batch_size=1, rgb_preprocessor=rgb_preprocessor),
                             steps=paths.shape[1])
    result_string = "Dataset: %s\nModel: %s\n" % ("/".join(args.dataset_names), args.model)
    for idx, metric in enumerate(model.metrics_names):
        result_string += "{}: {:.4f}\n".format(metric, metrics[idx])
    for attribute in args.__dict__.keys():
        result_string += "\n--%s: %s" % (attribute, str(args.__getattribute__(attribute)))
    with open(os.path.join(args.save_to, "results.txt"), "w") as f:
        f.write(result_string.strip())
def test_metric_direction_inference():
    # Test min metrics.
    assert metrics_tracking.infer_metric_direction('MAE') == 'min'
    assert metrics_tracking.infer_metric_direction(
        metrics.binary_crossentropy) == 'min'
    assert metrics_tracking.infer_metric_direction(
        metrics.FalsePositives()) == 'min'

    # All losses in keras.losses are considered as 'min'.
    assert metrics_tracking.infer_metric_direction('squared_hinge') == 'min'
    assert metrics_tracking.infer_metric_direction(losses.hinge) == 'min'
    assert metrics_tracking.infer_metric_direction(
        losses.CategoricalCrossentropy()) == 'min'

    # Test max metrics.
    assert metrics_tracking.infer_metric_direction('binary_accuracy') == 'max'
    assert metrics_tracking.infer_metric_direction(
        metrics.categorical_accuracy) == 'max'
    assert metrics_tracking.infer_metric_direction(
        metrics.Precision()) == 'max'

    # Test unknown metrics.
    assert metrics_tracking.infer_metric_direction('my_metric') is None

    def my_metric_fn(x, y):
        return x

    assert metrics_tracking.infer_metric_direction(my_metric_fn) is None

    class MyMetric(metrics.Metric):
        def update_state(self, x, y):
            return 1

        def result(self):
            return 1

    assert metrics_tracking.infer_metric_direction(MyMetric()) is None

    # Test special cases.
    assert metrics_tracking.infer_metric_direction('loss') == 'min'
    assert metrics_tracking.infer_metric_direction('acc') == 'max'
    assert metrics_tracking.infer_metric_direction('val_acc') == 'max'
    assert metrics_tracking.infer_metric_direction('crossentropy') == 'min'
    assert metrics_tracking.infer_metric_direction('ce') == 'min'
    assert metrics_tracking.infer_metric_direction('weighted_acc') == 'max'
    assert metrics_tracking.infer_metric_direction('val_weighted_ce') == 'min'
    assert metrics_tracking.infer_metric_direction(
        'weighted_binary_accuracy') == 'max'
Esempio n. 20
0
def load_badword_model() -> Model:
    """
    학습된 모델을 불러옵니다. 불러온 모델은 compile 작업을 마친 상태입니다.
    
    return: 사전학습된 tf.keras.Model 객체가 compile된 상태로 반환됩니다.
    """
    model = load_model(get_path('model.h5'))
    model.compile(loss="binary_crossentropy",
                  optimizer="adam",
                  metrics=[
                      metrics.BinaryAccuracy(name="acc"),
                      metrics.Recall(name="recall"),
                      metrics.Precision(name="prec"),
                  ])

    return model
Esempio n. 21
0
 def __get_metric(self, metric):
     if metric == "auc":
         return m.AUC()
     elif metric == "accuracy":
         return m.Accuracy()
     elif metric == "binary_accuracy":
         return m.BinaryAccuracy()
     elif metric == "categorical_accuracy":
         return m.CategoricalAccuracy()
     elif metric == "binary_crossentropy":
         return m.BinaryCrossentropy()
     elif metric == "categorical_crossentropy":
         return m.CategoricalCrossentropy()
     elif metric == "sparse_categorical_crossentropy":
         return m.SparseCategoricalCrossentropy()
     elif metric == "kl_divergence":
         return m.KLDivergence()
     elif metric == "poisson":
         return m.Poission()
     elif metric == "mse":
         return m.MeanSquaredError()
     elif metric == "rmse":
         return m.RootMeanSquaredError()
     elif metric == "mae":
         return m.MeanAbsoluteError()
     elif metric == "mean_absolute_percentage_error":
         return m.MeanAbsolutePercentageError()
     elif metric == "mean_squared_logarithm_error":
         return m.MeanSquaredLogarithmError()
     elif metric == "cosine_similarity":
         return m.CosineSimilarity()
     elif metric == "log_cosh_error":
         return m.LogCoshError()
     elif metric == "precision":
         return m.Precision()
     elif metric == "recall":
         return m.Recall()
     elif metric == "true_positive":
         return m.TruePositives()
     elif metric == "true_negative":
         return m.TrueNegatives()
     elif metric == "false_positive":
         return m.FalsePositives()
     elif metric == "false_negative":
         return m.FalseNegatives()
     else:
         raise Exception("specified metric not defined")
Esempio n. 22
0
def train(model, x_train, y_train):
    """
    Train the target model and save the weight of the model
    :param model: the model that will be trained
    :param x_train: the image as numpy format
    :param y_train: the label for x_train
    :param weights_path: path to save the model file
    :return: None
    """
    model.compile(loss='categorical_crossentropy',
                  optimizer=keras.optimizers.Adam(lr=5e-5),
                  metrics=[metrics.CategoricalAccuracy(), metrics.Precision(), metrics.Recall()])
    model.fit(x_train,
              y_train,
              batch_size=BATCH_SIZE,
              epochs=EPOCHS)
    model.save(WEIGHTS_PATH)
Esempio n. 23
0
    def __init__(self, n_features, n_classes):
        print("##################### Init NN #####################")
        self.N_FEATURES = n_features
        self.N_CLASSES = n_classes
        self.METRICS = [
            'accuracy',
            tkm.TruePositives(),
            tkm.FalsePositives(name='fp'),
            tkm.TrueNegatives(name='tn'),
            tkm.FalseNegatives(name='fn'),
            #tkm.BinaryAccuracy(name='accuracy'),
            tkm.Precision(name='precision'),
            tkm.Recall(name='recall'),
            tkm.AUC(name='auc')
        ]

        self.DATE = datetime.now().strftime("%d-%m_%H%M%S")
        create_dir(self.DATE)
Esempio n. 24
0
def load_simple_model(model_path='',
                      weights_path='',
                      opt='sgd',
                      loss=None,
                      metrics_list=None):
    model = models.load_model(model_path)
    model.load_weights(weights_path)
    loss_dict = {
        'Categorical Crossentropy': 'categorical_crossentropy',
        'Binary Crossentropy': 'binary_crossentropy',
        'Categorical Hinge': 'categorical_hinge',
        'Huber loss': 'huber_loss'
    }
    metrics_dict = {
        'auc':
        metrics.AUC(),
        'recall':
        metrics.Recall(),
        'accuracy':
        metrics.CategoricalAccuracy()
        if loss.startswith('Categorical') else metrics.Accuracy(),
        'precision':
        metrics.Precision(),
        'categorical Hinge':
        metrics.CategoricalHinge(),
        'squared Hinge':
        metrics.SquaredHinge(),
        'Kullback-Leibler divergence':
        metrics.KLDivergence(),
        'mean absolute error':
        metrics.MeanAbsoluteError(),
        'mean squared error':
        metrics.MeanSquaredError()
    }
    if metrics_list is not None and len(metrics_list) > 0:
        metrics_list = [metrics_dict.get(m, m) for m in metrics_list]
    else:
        metrics_list = ['accuracy']

    loss_f = loss_dict.get(loss)

    model.compile(loss=loss_f, optimizer=opt, metrics=metrics_list)
    return model
    def __init__(self, vocab_size, embedding_size, input_length) -> None:
        self.vocab_size = vocab_size
        self.embedding_size = embedding_size
        self.input_length = input_length

        inputs = keras.Input((self.input_length,))
        model_head = self.get_model_head()(inputs)
        has_diff_out = self.get_mlp_model(2, 'has_diff')(model_head)
        self.model = keras.Model(inputs=inputs, outputs={
            'has_diff': has_diff_out,
        })
        my_losses = {
            "has_diff": losses.BinaryCrossentropy(from_logits=False),
        }
        my_metrics = {
            "has_diff": [metrics.Recall(), metrics.Precision()],
        }
        self.model.compile(optimizer=optimizers.Adam(), #learning_rate=0.001),
                    loss=my_losses,
                    metrics=my_metrics)
        self.model.summary()
Esempio n. 26
0
    def experiment(self, under=False, ratio=3, plot=False):
        METRICS = [
            metrics.TruePositives(name='tp'),
            metrics.FalsePositives(name='fp'),
            metrics.TrueNegatives(name='tn'),
            metrics.FalseNegatives(name='fn'),
            metrics.BinaryAccuracy(name='accuracy'),
            metrics.Precision(name='precision'),
            metrics.Recall(name='recall'),
            metrics.AUC(name='auc')
        ]

        data = DataLoader()
        model = LeNet(data.X, METRICS)
        augmenter = Augmenter(data.X, data.Y)

        if under:
            data.X, data.Y = augmenter.undersample(ratio=ratio)

        if self.augmentation.type == 1 or self.augmentation.type == 2:
            data.X, data.Y = augmenter.duplicate(noise=self.augmentation.noise,
                                                 sigma=self.augmentation.sigma)
        elif self.augmentation.type == 3:
            data.X, data.Y = augmenter.SMOTE()

        #data.normalize()
        #print(len(data.X))
        #print(len(data.valX))

        data.summarize(test=False)
        his = model.fit(data.X, data.Y, data.valX, data.valY)
        RES, fpr, tpr = model.predict(data.testX, data.testY)
        #self.model_summary(RES)
        if plot:
            self.plot(his)
            self.ROC(fpr, tpr)
        return RES
Esempio n. 27
0
    def compile(self, model, train_generator, valid_generator):
        """:arg
        This function contain model compile and model fit process, input a model and output history and trained model

        """
        start_time = time()
        print("*" * 40, "Start {} Processing".format(model._name), "*" * 40)

        # we use a lot of metric to evalute our binary classification result
        METRICS = [
              metrics.TruePositives(name='tp'),
              metrics.FalsePositives(name='fp'),
              metrics.TrueNegatives(name='tn'),
              metrics.FalseNegatives(name='fn'),
              metrics.BinaryAccuracy(name='binary_accuracy'),
              #metrics.CategoricalAccuracy(name='accuracy'),
              metrics.Precision(name='precision'),
              metrics.Recall(name='recall'),
              metrics.AUC(name='auc'),
              # F1Score(num_classes = int(y_train.shape[1]), name='F1')
        ]

        # define a optimizer
        opt_rms = optimizers.RMSprop(lr = 1e-4, decay = 1e-5)
        # define compile parameters
        model.compile(loss = 'binary_crossentropy', optimizer = opt_rms, metrics = ['accuracy'])
        # start to fit
        history = model.fit(
            train_generator,
            steps_per_epoch=20,
            epochs=5,
            validation_data=valid_generator,
            validation_steps=20
        )

        return history
Esempio n. 28
0
                   name='DenseCell-2'),

        # final output layer
        Dense(1, activation='sigmoid', name='output'),
    ],
    name='Binary-Classifier')

NAME = 'SSBML-Transfer-Model'

OPTIMIZER = 'adam'

LOSS = Focal()

METRICS = [
    metrics.BinaryAccuracy(name='accuracy'),
    metrics.Precision(),
    metrics.Recall(),

    # this is an ugly hack but it is neccessary as
    # keras does not have simply a "specificity" metric
    metrics.SpecificityAtSensitivity(
        sensitivity=.01,  # this doesn't matter
        num_thresholds=1,  # so we only get score at threshold = .5
        name='specificity')
]


def remove_head(base_model, trainable=False):
    ''' 
    Returns a copy of the base model with the head removed.
Esempio n. 29
0
    BATCH_SIZE = int(args.batch_size)
    DROPOUT = float(args.dropout)
    IMGSIZE = (int(args.imgsize[0]), int(args.imgsize[1]))
    LOGDIR = args.logdir
    DATA = args.data
    BACKBONE = args.backbone
    NAME = args.model

    # --- define model metrics ---
    METRICS = [
        metrics.TruePositives(name="True_Positives"),
        metrics.FalsePositives(name="False_Positives"),
        metrics.TrueNegatives(name="True_Negatives"),
        metrics.FalseNegatives(name="False_Negatives"),
        metrics.BinaryAccuracy(name="Binary_Accuracy"),
        metrics.Precision(name="Precision"),
        metrics.Recall(name="Recall"),
        metrics.AUC(name="AUC")
    ]

    # --- tensorflow calbacks ---
    date = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
    if platform.system().lower() == "windows":
        LOGDIR = LOGDIR + "\\" + NAME + "\\" + date
    else:
        LOGDIR = LOGDIR + "/" + NAME + "/" + date
    if not os.path.isdir(LOGDIR):
        os.makedirs(LOGDIR, exist_ok=True)

    tensorboard = callbacks.TensorBoard(log_dir=LOGDIR,
                                        histogram_freq=1,
Esempio n. 30
0
    def train_model(self, themes_weight: List[float],
                    dataset: TrainValidationDataset, voc_size: int,
                    keras_callback: LambdaCallback):

        article_length = dataset.article_length
        theme_count = dataset.theme_count

        model = tf.keras.Sequential([
            # 1
            # keras.layers.Embedding(input_dim=voc_size, output_dim=firstLayoutOutputDim),
            # keras.layers.Dropout(0.2),
            # keras.layers.Conv1D(200,3,input_shape=(ARTICLE_MAX_WORD_COUNT,firstLayoutOutputDim), activation=tf.nn.relu),
            # keras.layers.GlobalAveragePooling1D(),
            # keras.layers.Dense(250, activation=tf.nn.relu),
            # keras.layers.Dense(theme_count, activation=tf.nn.softmax)

            # 2
            # keras.layers.Embedding(input_dim=voc_size, output_dim=firstLayoutOutputDim),
            # keras.layers.LSTM(ltsmOutputDim, dropout=0.2, recurrent_dropout=0.2, activation='tanh'),
            # keras.layers.Dense(theme_count, activation=tf.nn.softmax)

            # 3
            # keras.layers.Embedding(input_dim=self.voc_size, output_dim=embedding_output_dim),
            # keras.layers.Bidirectional(keras.layers.LSTM(intermediate_dim, return_sequences=True)),
            # # keras.layers.Dropout(0.1),
            # keras.layers.Bidirectional(keras.layers.LSTM(last_dim, dropout=0.05, recurrent_dropout=0.05)),
            # keras.layers.Dense(last_dim, activation=tf.nn.relu),
            # keras.layers.Dense(self.theme_count, activation=tf.nn.softmax)

            # 4
            # keras.layers.Embedding(input_dim=self.voc_size, input_length=self.article_length, output_dim=embedding_output_dim),
            # keras.layers.Bidirectional(keras.layers.LSTM(intermediate_dim, return_sequences=True, dropout=0.2, recurrent_dropout=0.2)),
            # keras.layers.Dropout(0.2),
            # keras.layers.Bidirectional(keras.layers.LSTM(last_dim * 2, recurrent_dropout=0.2)), #was last_dim * 2
            # keras.layers.Dense(last_dim, activation=tf.nn.relu),
            # keras.layers.Dense(self.theme_count, activation=tf.nn.sigmoid)

            # 5
            #keras.layers.Embedding(input_dim=self.voc_size, input_length=self.article_length, output_dim=embedding_output_dim),
            # keras.layers.Conv1D(filters=64, kernel_size=5, input_shape=(self.voc_size, embedding_output_dim), activation="relu"),
            # keras.layers.MaxPool1D(4),
            #keras.layers.Bidirectional(keras.layers.LSTM(intermediate_dim, recurrent_dropout=0.1)),
            #keras.layers.Dense(last_dim, activation=tf.nn.relu),
            #keras.layers.Dense(self.theme_count, activation=tf.nn.sigmoid)

            #6
            keras.layers.Embedding(input_dim=voc_size,
                                   input_length=article_length,
                                   output_dim=128,
                                   mask_zero=True),
            keras.layers.Bidirectional(
                keras.layers.LSTM(128, recurrent_dropout=0.2, dropout=0.2)),
            #keras.layers.Dropout(0.2),
            #keras.layers.Dense(last_dim, activation=tf.nn.relu),
            # keras.layers.Dense(self.theme_count, activation=tf.nn.sigmoid, use_bias=True,bias_initializer=tf.keras.initializers.Constant(-1.22818328))
            keras.layers.Dense(theme_count,
                               activation=tf.nn.sigmoid,
                               kernel_regularizer=regularizers.l2(0.1),
                               activity_regularizer=regularizers.l1(0.05))

            # 7
            # keras.layers.Embedding(input_dim=self.voc_size, input_length=self.article_length,
            #                        output_dim=embedding_output_dim),
            # keras.layers.GlobalAvgPool1D(),
            # keras.layers.Dense(last_dim, activation=tf.nn.relu),
            # keras.layers.Dense(self.theme_count, activation=tf.nn.sigmoid)
        ])

        model.summary()

        model.compile(
            optimizer=tf.keras.optimizers.Adam(clipnorm=1, clipvalue=0.5),
            #loss=WeightedBinaryCrossEntropy(themes_weight, from_logits=True),
            loss=keras.losses.BinaryCrossentropy(from_logits=True),
            metrics=[
                metrics.AUC(),
                metrics.BinaryAccuracy(),
                metrics.TruePositives(),
                metrics.TrueNegatives(),
                metrics.FalseNegatives(),
                metrics.FalsePositives(),
                metrics.Recall(),
                metrics.Precision()
            ],
            run_eagerly=self.run_eagerly)

        keras.utils.plot_model(model, 'Model1.png', show_shapes=True)

        cb_list = [ManualInterrupter, keras_callback]

        model.fit(dataset.trainData,
                  epochs=10,
                  steps_per_epoch=dataset.train_batch_count,
                  validation_data=dataset.validationData,
                  validation_steps=dataset.validation_batch_count,
                  callbacks=cb_list,
                  class_weight={
                      0: 1,
                      1: themes_weight[0]
                  })

        model.save("output/" + self.get_model_name() + ".h5")
        model.save_weights("output/" + self.get_model_name() + "_weight.h5")

        self.__model__ = model