예제 #1
0
def train_model(model: Model, base_model: Model,
                train_gen: classifier_sequence.ClassifierSequence,
                val_gen: classifier_sequence.ClassifierSequence) -> None:
    """Trains the model on the given data sets."""
    for layer in base_model.layers:
        layer.trainable = False

    opt = optimizers.SGD(learning_rate=0.00001, momentum=0.8, clipnorm=1)
    # opt = optimizers.Adam(learning_rate=0.000001)

    model.compile(optimizer=opt,
                  loss='binary_crossentropy',
                  metrics=[
                      'accuracy',
                      metrics.Recall(),
                      metrics.Precision(),
                      metrics.FalsePositives(),
                      metrics.FalseNegatives()
                  ])

    log_dir = "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
    tensorboard_callback = callbacks.TensorBoard(log_dir=log_dir,
                                                 histogram_freq=1)

    model.fit(train_gen,
              validation_data=val_gen,
              epochs=60,
              callbacks=[tensorboard_callback])

    for layer in base_model.layers:
        layer.trainable = True

    opt = optimizers.SGD(learning_rate=0.00001, momentum=0.8, clipnorm=1)

    model.compile(optimizer=opt,
                  loss='binary_crossentropy',
                  metrics=[
                      'accuracy',
                      metrics.Recall(),
                      metrics.Precision(),
                      metrics.FalsePositives(),
                      metrics.FalseNegatives()
                  ])

    model.fit(train_gen,
              validation_data=val_gen,
              epochs=120,
              initial_epoch=60,
              callbacks=[tensorboard_callback])
예제 #2
0
파일: ff_models.py 프로젝트: KorfLab/genDL
    def build(self):
        model = tf.keras.Sequential()
        model.add(layers.Flatten(input_shape=(42, 4)))

        for i in range(self.layers):
            if self.reg:
                model.add(
                    layers.Dense(self.sizes[i],
                                 activation='elu',
                                 kernel_regularizer=regularizers.l2(
                                     self.reg[i])))
            else:
                model.add(layers.Dense(self.sizes[i], activation='elu'))

            if self.dropout:
                model.add(layers.Dropout(self.dropout[i]))

        model.add(layers.Dense(1, activation='sigmoid'))

        model.compile(optimizer=optimizers.Adam(learning_rate=self.lr),
                      loss=losses.BinaryCrossentropy(),
                      metrics=[
                          'binary_accuracy',
                          metrics.TruePositives(name='tp'),
                          metrics.FalseNegatives(name='fn'),
                          metrics.TrueNegatives(name='tn'),
                          metrics.FalsePositives(name='fp'),
                          metrics.Recall(name='recall'),
                          metrics.Precision(name='precision')
                      ])

        return model
예제 #3
0
def eval_use_model(model_name, path_model_file, test_file, class_num):
    """
    evaluating model by using entire model (weights, architecture, optimizers, etc.)

    Arguments:\n
    model_name --> String, Resnet50/Resnet18/VGG16/VGG19
    path_model_file --> String, path which store .hdf5 of model's weight\n
    test_file --> String, path to which store .h5 file of test dataset
    class_num --> Int, number of class/label\n

    Returns:\n
    none
    """
    # Load model weights
    new_model = Model()
    new_model = load_model(path_model_file)
    new_model.compile(optimizer='adam',
                      loss='categorical_crossentropy',
                      metrics=[
                          metrics.AUC(),
                          metrics.CategoricalAccuracy(),
                          metrics.TruePositives(),
                          metrics.TrueNegatives(),
                          metrics.FalsePositives(),
                          metrics.FalseNegatives()
                      ])

    # retrieve X_test, Y_test
    X_test, Y_test = retrieve_test_dataset(test_file, int(class_num))

    for i in range(4):
        hasil = new_model.evaluate(X_test, Y_test)
        print(new_model.metrics_names)
        print(hasil)
 def call(self,
          y_true_local,
          y_pred_local,
          fp=metrics.FalsePositives(),
          fn=metrics.FalseNegatives(),
          tp=metrics.TruePositives(),
          tn=metrics.TrueNegatives()):
     return 0.5 * tn / (tn + fn) + (tp / (tp + fp))
예제 #5
0
def Simple(input_data):
    # Simple
    model = models.Sequential()
    model.add(
        layers.LSTM(256,
                    activation='tanh',
                    recurrent_activation='sigmoid',
                    recurrent_dropout=0,
                    unroll=False,
                    use_bias=True,
                    return_sequences=False,
                    time_major=False))
    model.add(layers.Dense(number_of_outputs, activation='sigmoid'))

    loss_fn = losses.BinaryCrossentropy()
    opt = optimizers.Adam(learning_rate=.005)

    myMetric = metrics.CategoricalCrossentropy()

    model.compile(optimizer=opt,
                  loss=loss_fn,
                  metrics=[myMetric, metrics.FalseNegatives()])

    inputPath = None
    outputPath = rootDir + getUniqueName("Simple")

    if inputPath is not None:
        print("Loading Model From A file")
        print(inputPath)
        model.load_weights(inputPath)

    class_weights = {}

    class_weights[0] = .15
    non_zero_class_weight = (1 - class_weights[0]) / (number_of_outputs - 1)
    for i in range(1, number_of_outputs):
        class_weights[i] = 1

    # cp_callback = callbacks.ModelCheckpoint(filepath=outputPath,
    #                                         save_weights_only=True,
    #                                         verbose=1)

    cp_callback = callbacks.ModelCheckpoint(filepath=outputPath,
                                            save_weights_only=True,
                                            save_best_only=True,
                                            verbose=1)

    print(outputPath)

    model.fit(
        input_data,
        epochs=50000,
        callbacks=[cp_callback],
        class_weight=class_weights,
    )
예제 #6
0
def TwoWide(input_data):
    model = models.Sequential()
    model.add(
        layers.LSTM(415,
                    activation='tanh',
                    recurrent_activation='sigmoid',
                    recurrent_dropout=0,
                    unroll=False,
                    use_bias=True,
                    return_sequences=False,
                    time_major=False))
    model.add(layers.Dense(65 * 3, activation='relu'))
    model.add(layers.Dense(number_of_outputs, activation='sigmoid'))

    loss_fn = losses.BinaryCrossentropy()
    opt = optimizers.Adam(learning_rate=.0001)

    myMetric = metrics.CategoricalCrossentropy()

    model.compile(optimizer=opt,
                  loss=loss_fn,
                  metrics=[myMetric, metrics.FalseNegatives()])

    inputPath = rootDir + "Models/TwoWide2020-09-30 05:08:13.693069-27-0.0133-112-0.0074"
    outputPath = inputPath + "-{epoch:02d}-{loss:.4f}"

    if inputPath is not None:
        print("Loading Model From A file")
        print(inputPath)
        model.load_weights(inputPath)

    print("Saving Model To\n", outputPath)

    class_weights = {}

    class_weights[0] = .15
    non_zero_class_weight = (1 - class_weights[0]) / (number_of_outputs - 1)
    for i in range(1, number_of_outputs):
        class_weights[i] = 1.15

    cp_callback = callbacks.ModelCheckpoint(filepath=outputPath,
                                            save_weights_only=True,
                                            monitor='loss',
                                            verbose=1)

    print(outputPath)

    model.fit(input_data,
              epochs=50000,
              callbacks=[cp_callback],
              class_weight=class_weights,
              initial_epoch=113)
 def confusion_matrix(self, y_label, y_class):
     tn = metrics.TrueNegatives()
     tn.update_state(y_label, y_class) 
     print('TrueNegatives result: ', tn.result().numpy())
     tp =metrics.TruePositives()
     tp.update_state(y_label, y_class) 
     print('TruePositives result: ', tp.result().numpy())
     fn = metrics.FalseNegatives()
     fn.update_state(y_label, y_class) 
     print('FalseNegatives result: ', fn.result().numpy())
     fp = metrics.FalsePositives()
     fp.update_state(y_label, y_class) 
     print('FalsePositives result: ', fp.result().numpy())
예제 #8
0
 def __get_metric(self, metric):
     if metric == "auc":
         return m.AUC()
     elif metric == "accuracy":
         return m.Accuracy()
     elif metric == "binary_accuracy":
         return m.BinaryAccuracy()
     elif metric == "categorical_accuracy":
         return m.CategoricalAccuracy()
     elif metric == "binary_crossentropy":
         return m.BinaryCrossentropy()
     elif metric == "categorical_crossentropy":
         return m.CategoricalCrossentropy()
     elif metric == "sparse_categorical_crossentropy":
         return m.SparseCategoricalCrossentropy()
     elif metric == "kl_divergence":
         return m.KLDivergence()
     elif metric == "poisson":
         return m.Poission()
     elif metric == "mse":
         return m.MeanSquaredError()
     elif metric == "rmse":
         return m.RootMeanSquaredError()
     elif metric == "mae":
         return m.MeanAbsoluteError()
     elif metric == "mean_absolute_percentage_error":
         return m.MeanAbsolutePercentageError()
     elif metric == "mean_squared_logarithm_error":
         return m.MeanSquaredLogarithmError()
     elif metric == "cosine_similarity":
         return m.CosineSimilarity()
     elif metric == "log_cosh_error":
         return m.LogCoshError()
     elif metric == "precision":
         return m.Precision()
     elif metric == "recall":
         return m.Recall()
     elif metric == "true_positive":
         return m.TruePositives()
     elif metric == "true_negative":
         return m.TrueNegatives()
     elif metric == "false_positive":
         return m.FalsePositives()
     elif metric == "false_negative":
         return m.FalseNegatives()
     else:
         raise Exception("specified metric not defined")
예제 #9
0
def create_model_v2(data):
    name_len = len(data['training_data'][0][0])
    gender_len = len(data['training_data'][0][2])
    dob_len = len(data['training_data'][0][3])

    i_name_1 = keras.Input(name="name_1", shape=(name_len, 1))
    i_name_2 = keras.Input(name="name_2", shape=(name_len, 1))
    i_last_name_1 = keras.Input(name="last_name_1", shape=(name_len, 1))
    i_last_name_2 = keras.Input(name="last_name_2", shape=(name_len, 1))
    i_gender_1 = keras.Input(name="gender_1", shape=(gender_len, ))
    i_gender_2 = keras.Input(name="gender_2", shape=(gender_len, ))
    i_dob_1 = keras.Input(name="dob_1", shape=(dob_len, 1))
    i_dob_2 = keras.Input(name="dob_2", shape=(dob_len, 1))
    i_ratio_n = keras.Input(name="fuzz_n", shape=(1, ))
    i_ratio_ln = keras.Input(name="fuzz_ln", shape=(1, ))
    i_ratio_d = keras.Input(name="fuzz_d", shape=(1, ))

    l4_combined = layers.Concatenate()([
        create_name_branch(i_name_1, i_name_2),
        create_name_branch(i_last_name_1, i_last_name_2),
        create_gender_branch(i_gender_1, i_gender_2),
        create_dob_branch(i_dob_1, i_dob_2),
        create_ratio_branch(i_ratio_n, 10),
        create_ratio_branch(i_ratio_ln, 10),
        create_ratio_branch(i_ratio_d, 5)
    ])
    l5_brain = layers.Dense(20, activation='tanh')(l4_combined)

    model = keras.Model(inputs=[
        i_name_1, i_last_name_1, i_gender_1, i_dob_1, i_name_2, i_last_name_2,
        i_gender_2, i_dob_2, i_ratio_n, i_ratio_ln, i_ratio_d
    ],
                        outputs=[
                            layers.Dense(1, activation='sigmoid')(l5_brain)
                        ])

    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=[
                      'accuracy',
                      metrics.FalsePositives(),
                      metrics.FalseNegatives()
                  ])

    return model
예제 #10
0
    def __init__(self, n_features, n_classes):
        print("##################### Init NN #####################")
        self.N_FEATURES = n_features
        self.N_CLASSES = n_classes
        self.METRICS = [
            'accuracy',
            tkm.TruePositives(),
            tkm.FalsePositives(name='fp'),
            tkm.TrueNegatives(name='tn'),
            tkm.FalseNegatives(name='fn'),
            #tkm.BinaryAccuracy(name='accuracy'),
            tkm.Precision(name='precision'),
            tkm.Recall(name='recall'),
            tkm.AUC(name='auc')
        ]

        self.DATE = datetime.now().strftime("%d-%m_%H%M%S")
        create_dir(self.DATE)
예제 #11
0
    def experiment(self, under=False, ratio=3, plot=False):
        METRICS = [
            metrics.TruePositives(name='tp'),
            metrics.FalsePositives(name='fp'),
            metrics.TrueNegatives(name='tn'),
            metrics.FalseNegatives(name='fn'),
            metrics.BinaryAccuracy(name='accuracy'),
            metrics.Precision(name='precision'),
            metrics.Recall(name='recall'),
            metrics.AUC(name='auc')
        ]

        data = DataLoader()
        model = LeNet(data.X, METRICS)
        augmenter = Augmenter(data.X, data.Y)

        if under:
            data.X, data.Y = augmenter.undersample(ratio=ratio)

        if self.augmentation.type == 1 or self.augmentation.type == 2:
            data.X, data.Y = augmenter.duplicate(noise=self.augmentation.noise,
                                                 sigma=self.augmentation.sigma)
        elif self.augmentation.type == 3:
            data.X, data.Y = augmenter.SMOTE()

        #data.normalize()
        #print(len(data.X))
        #print(len(data.valX))

        data.summarize(test=False)
        his = model.fit(data.X, data.Y, data.valX, data.valY)
        RES, fpr, tpr = model.predict(data.testX, data.testY)
        #self.model_summary(RES)
        if plot:
            self.plot(his)
            self.ROC(fpr, tpr)
        return RES
예제 #12
0
    def compile(self, model, train_generator, valid_generator):
        """:arg
        This function contain model compile and model fit process, input a model and output history and trained model

        """
        start_time = time()
        print("*" * 40, "Start {} Processing".format(model._name), "*" * 40)

        # we use a lot of metric to evalute our binary classification result
        METRICS = [
              metrics.TruePositives(name='tp'),
              metrics.FalsePositives(name='fp'),
              metrics.TrueNegatives(name='tn'),
              metrics.FalseNegatives(name='fn'),
              metrics.BinaryAccuracy(name='binary_accuracy'),
              #metrics.CategoricalAccuracy(name='accuracy'),
              metrics.Precision(name='precision'),
              metrics.Recall(name='recall'),
              metrics.AUC(name='auc'),
              # F1Score(num_classes = int(y_train.shape[1]), name='F1')
        ]

        # define a optimizer
        opt_rms = optimizers.RMSprop(lr = 1e-4, decay = 1e-5)
        # define compile parameters
        model.compile(loss = 'binary_crossentropy', optimizer = opt_rms, metrics = ['accuracy'])
        # start to fit
        history = model.fit(
            train_generator,
            steps_per_epoch=20,
            epochs=5,
            validation_data=valid_generator,
            validation_steps=20
        )

        return history
예제 #13
0
                       .map(VAL_image_augmentor)
        elif data_subset_mode == 'test':
            data = data.batch(batch_size, drop_remainder=True) \
                       .map(TEST_image_augmentor)

        if infinite:
            data = data.repeat()

        return data.prefetch(AUTOTUNE)

    METRICS = [
    #     per_class_accuracy,
        metrics.TruePositives(name='tp'),
        metrics.FalsePositives(name='fp'),
        metrics.TrueNegatives(name='tn'),
        metrics.FalseNegatives(name='fn'),
        metrics.CategoricalAccuracy(name='accuracy'),
        metrics.Precision(name='precision'),
        metrics.Recall(name='recall'),
        metrics.TopKCategoricalAccuracy(name='top_3_categorical_accuracy', k=3),
        metrics.TopKCategoricalAccuracy(name='top_5_categorical_accuracy', k=5)
    ]
    ###########################################################################
    ###########################################################################


    encoder = base_dataset.LabelEncoder(data.data.family)
    split_data = base_dataset.preprocess_data(data, encoder, data_config)
    for subset, subset_data in split_data.items():
        split_data[subset] = [list(i) for i in unzip(subset_data)]
예제 #14
0
import numpy as np
from tensorflow.keras import metrics as keras_metrics
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Sequential

from ProteinDataset import ProteinDataset, mask_generator

EPOCHS_DEFAULT = 100
PATIENCE_DEFAULT = 10
METRICS = [
    keras_metrics.TruePositives(name="tp"),
    keras_metrics.FalsePositives(name="fp"),
    keras_metrics.TrueNegatives(name="tn"),
    keras_metrics.FalseNegatives(name="fn"),
    keras_metrics.BinaryAccuracy(name="accuracy"),
    keras_metrics.Precision(name="precision"),
    keras_metrics.Recall(name="recall"),
    keras_metrics.AUC(name="auc"),
]

parser = argparse.ArgumentParser(
    description="Run a Logistic Regression pipeline.")
parser.add_argument("dataset_pkl", help="Dataset (in PKL format) to use.")
parser.add_argument(
    "--name",
    help=
    "Configuration name (e.g. 'contacts' or 'topology'). This will be tagged during the MLFlow run.",
    default=None,
)
parser.add_argument("--epochs",
예제 #15
0
def run_model(model,
              train_generator,
              validation_generator,
              min_lr,
              max_lr,
              model_path,
              tensorboard_path,
              trial_id,
              optimizer,
              hparams=None,
              step_factor=8,
              epochs=120,
              loss=tf.keras.losses.CategoricalCrossentropy(from_logits=False)):
    checkpoint_path = os.path.join(model_path, 'cp-best.cpkt')
    checkpoint = tf.keras.callbacks.ModelCheckpoint(checkpoint_path,
                                                    monitor='val_AUC',
                                                    mode='max',
                                                    verbose=1,
                                                    save_freq='epoch',
                                                    save_best_only=True,
                                                    save_weights_only=True)
    tensorboard = tf.keras.callbacks.TensorBoard(
        log_dir=tensorboard_path, profile_batch=0,
        write_graph=True)  # profile_batch='300,401',

    lrate = SGDRScheduler(min_lr=min_lr,
                          max_lr=max_lr,
                          steps_per_epoch=len(train_generator),
                          cycle_length=step_factor,
                          lr_decay=0.5,
                          mult_factor=1,
                          gentle_start_epochs=0,
                          gentle_fraction=1.0)
    add_lr = Add_Images_and_LR(log_dir=tensorboard_path, add_images=False)
    early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_AUC',
                                                  patience=300,
                                                  verbose=True,
                                                  mode='max')
    callbacks = [tensorboard, lrate, add_lr]
    # if epochs < 9000:
    callbacks += [early_stop]
    if hparams is not None:
        hp_callback = Callback(tensorboard_path,
                               hparams=hparams,
                               trial_id='Trial_ID:{}'.format(trial_id))
        callbacks += [hp_callback]
    callbacks += [checkpoint]
    METRICS = [
        metrics.TruePositives(name='TruePositive'),
        metrics.FalsePositives(name='FalsePositive'),
        metrics.TrueNegatives(name='TrueNegative'),
        metrics.FalseNegatives(name='FalseNegative'),
        metrics.BinaryAccuracy(name='Accuracy'),
        metrics.Precision(name='Precision'),
        metrics.Recall(name='Recall'),
        metrics.AUC(name='AUC'),
    ]
    print('\n\n\n\nRunning {}\n\n\n\n'.format(tensorboard_path))
    model.compile(optimizer, loss=loss, metrics=METRICS)
    model.fit(train_generator.data_set,
              epochs=epochs,
              steps_per_epoch=len(train_generator),
              validation_data=validation_generator.data_set,
              validation_steps=len(validation_generator),
              validation_freq=10,
              callbacks=callbacks)
    model.save(os.path.join(model_path, 'final_model.h5'))
    tf.keras.backend.clear_session()
    return None
예제 #16
0
    def train_model(self, themes_weight: ThemeWeights,
                    dataset: TrainValidationDataset, voc_size: int,
                    keras_callback: LambdaCallback):

        input = keras.layers.Input(shape=(dataset.article_length))

        outputs: List[keras.layers.Layer] = []

        for i in range(0, dataset.theme_count):
            print("")
            dense = keras.layers.Embedding(
                input_dim=voc_size, output_dim=self.embedding_size)(input)
            ltsm = keras.layers.Bidirectional(
                keras.layers.LSTM(self.LTSM_output_size,
                                  recurrent_dropout=0.2,
                                  dropout=0.2))(dense)
            dropout = keras.layers.Dropout(0.2)(ltsm)
            dense2 = keras.layers.Dense(units=self.dense2_output_size,
                                        activation=tf.nn.relu)(dropout)
            output = keras.layers.Dense(
                units=1,
                activation=tf.nn.sigmoid,
                name=str(i),
                kernel_regularizer=regularizers.l2(0.01),
                activity_regularizer=regularizers.l1(0.01))(dense2)
            outputs.append(output)

        if len(outputs) > 1:
            outputs = [keras.layers.concatenate(outputs)]
        else:
            outputs = [outputs]

        model = keras.Model(inputs=[input], outputs=outputs)

        model.compile(
            optimizer=tf.keras.optimizers.Adam(clipnorm=1, clipvalue=0.5),
            #loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
            loss=WeightedBinaryCrossEntropy(
                weights=themes_weight.weight_list(), from_logits=True),
            # loss = {"0" : tf.keras.losses.BinaryCrossentropy(from_logits=True),
            #         "1" : tf.keras.losses.BinaryCrossentropy(from_logits=True)},
            metrics=[
                metrics.AUC(multi_label=True),
                metrics.BinaryAccuracy(),
                metrics.TruePositives(),
                metrics.TrueNegatives(),
                metrics.FalseNegatives(),
                metrics.FalsePositives(),
                metrics.Recall(),
                metrics.Precision()
            ],
            run_eagerly=False)

        model.summary()

        keras.utils.plot_model(model,
                               self.__model_name__ + '.png',
                               show_shapes=True)

        callbacks = [ManualInterrupter, keras_callback]

        # model.fit(self.dataset.trainData, epochs=15, steps_per_epoch=self.dataset.train_batch_count,
        #           validation_data=self.dataset.validationData, validation_steps=self.dataset.validation_batch_count,
        #           callbacks=callbacks, class_weight=self.theme_weight)

        # model.fit(self.dataset.trainData, epochs=10, steps_per_epoch=self.dataset.train_batch_count,
        #           validation_data=self.dataset.validationData, validation_steps=self.dataset.validation_batch_count,
        #           callbacks=callbacks, class_weight={ 0 : 1, 1 : 7.8, 2 : 4.3})

        model.fit(dataset.trainData,
                  epochs=40,
                  steps_per_epoch=dataset.train_batch_count,
                  validation_data=dataset.validationData,
                  validation_steps=dataset.validation_batch_count,
                  callbacks=callbacks)

        self.__model__ = model
예제 #17
0
    def compile_fit(self,
                    model_input,
                    q_train_padded,
                    a_train_padded,
                    y_q_label_df,
                    y_a_label_df,
                    y_q_classify_list,
                    y_q_classify_dict,
                    y_a_classify_list,
                    y_a_classify_dict,
                    epoch_num=3):
        """
        This function is used to switch between numrical. The switch controled by hyperparameters self.TYPE
        When self.TYPE == 'num', input will be q_train_padded and y_q_label_df (others are same)
        Meanwhile, switch to ['MSE'] as loss and ['mse', 'mae'] as metrics

        When self.TYPE == 'classify', input will be q_train_padded and y_q_classify_list[0] etc.
        Meanwhile, swith to ['categorical_crossentropy'] as loss and ['accuracy'] as metrics

        """
        start_time = time()
        print("*" * 40, "Start {} Processing".format(model_input._name),
              "*" * 40)
        # loss_fun = 'categorical_crossentropy'
        # loss_fun = 'MSE' #MeanSquaredError
        # loss_fun = '

        METRICS = [
            metrics.TruePositives(name='tp'),
            metrics.FalsePositives(name='fp'),
            metrics.TrueNegatives(name='tn'),
            metrics.FalseNegatives(name='fn'),
            metrics.CategoricalAccuracy(name='accuracy'),
            metrics.Precision(name='precision'),
            metrics.Recall(name='recall'),
            metrics.AUC(name='auc'),
            # F1Score(num_classes = int(y_train.shape[1]), name='F1')
        ]

        loss_fun = None
        metrics_fun = None
        # becase large data input, we want to process automaticaly. So set this arugs to choose
        # question process or answer process automatically
        if self.PART == 'q':
            print("Start processing question part")
            # start to decide complie parameters
            if self.TYPE == 'num':
                print("Start numerical output")
                # call split
                X_train, X_val, y_train, y_val = self.split_data(
                    q_train_padded, y_q_label_df, test_size=0.2)
                loss_fun = losses.MeanSquaredError()
                metrics_fun = ['mse', 'mae']
            elif self.TYPE == 'classify':
                print("Start classify output")
                X_train, X_val, y_train, y_val = self.split_data(
                    q_train_padded, y_q_classify_list[0], test_size=0.2)
                loss_fun = losses.CategoricalCrossentropy()
                metrics_fun = METRICS
            else:
                print("UNKNOW self.TYPE")

        elif self.PART == 'a':
            print("Start processing answer part")
            if self.TYPE == 'num':
                print("Start numerical output")
                # call split
                X_train, X_val, y_train, y_val = self.split_data(
                    a_train_padded, y_a_label_df, test_size=0.2)
                loss_fun = losses.MeanSquaredError()
                metrics_fun = ['mse', 'mae']
            elif self.TYPE == 'classify':
                print("Start classify output")
                X_train, X_val, y_train, y_val = self.split_data(
                    a_train_padded, y_a_classify_list[0], test_size=0.2)
                loss_fun = losses.CategoricalCrossentropy()
                metrics_fun = METRICS
            else:
                print("UNKNOW self.TYPE")

        learning_rate = 1e-3
        opt_adam = optimizers.Adam(lr=learning_rate, decay=1e-5)
        model_input.compile(loss=loss_fun,
                            optimizer=opt_adam,
                            metrics=metrics_fun)
        # batch_size is subjected to my GPU and GPU memory, after testing, 32 is reasonable value size.
        # If vector bigger, this value should dercrease

        history = model_input.fit(
            X_train,
            y_train,
            validation_data=(X_val, y_val),
            epochs=epoch_num,
            batch_size=16,
            verbose=1,
            callbacks=[PredictCallback(X_val, y_val, model_input)])
        # spearmanr_list = PredictCallback(X_val, y_val, model_input).spearmanr_list
        # dic = ['loss', 'accuracy', 'val_loss','val_accuracy']
        history_dict = [x for x in history.history]
        # model_input.predict(train_features[:10])

        cost_time = round((time() - start_time), 4)
        print("*" * 40,
              "End {} with {} seconds".format(model_input._name, cost_time),
              "*" * 40,
              end='\n\n')

        return history, model_input
예제 #18
0
def train(
    csv_path,
    model_save_path,
    tfrecords_path,
    volume_shape=(128, 128, 128),
    image_size=(128, 128),
    dropout=0.2,
    batch_size=16,
    n_classes=2,
    n_epochs=15,
    mode="CV",
):
    """Train a model.

    Parameters
    ----------
    csv_path: str - Path
        Path to the csv file containing training volume paths, labels (X, Y).
    model_save_path: str - Path
        Path to where the save model and model weights.
    tfrecords_path: str - Path
        Path to preprocessed training tfrecords.
    volume_shape: tuple of size 3, optional, default=(128, 128, 128)
        The shape of the preprocessed volumes.
    image_size: tuple of size 2, optional, default=(128, 128)
        The shape of a 2D slice along each volume axis.
    dropout: float, optional, default=0.4
         Float between 0 and 1. Fraction of the input units to drop.
    batch_size: int, optional, default=16
        No. of training examples utilized in each iteration.
    n_classes: int, optional, default=2
        No. of unique classes to train the model on. Default assumption is a
        binary classifier.
    n_epochs: int, optional, default=15
        No. of complete passes through the training dataset.
    mode: str, optional, default=15
        One of "CV" or "full". Indicates the type of training to perform.

    Returns
    -------
    `tf.keras.callbacks.History`
        A History object that records several metrics such as training/validation loss/metrics
        at successive epochs.
    """

    train_csv_path = os.path.join(csv_path, "training.csv")
    train_paths = pd.read_csv(train_csv_path)["X"].values
    train_labels = pd.read_csv(train_csv_path)["Y"].values

    if mode == "CV":
        valid_csv_path = os.path.join(csv_path, "validation.csv")
        valid_paths = pd.read_csv(valid_csv_path)["X"].values
        # valid_labels = pd.read_csv(valid_csv_path)["Y"].values

    weights = class_weight.compute_class_weight("balanced",
                                                np.unique(train_labels),
                                                train_labels)
    weights = dict(enumerate(weights))

    planes = ["axial", "coronal", "sagittal", "combined"]

    global_batch_size = batch_size

    os.makedirs(model_save_path, exist_ok=True)
    cp_save_path = os.path.join(model_save_path, "weights")
    logdir_path = os.path.join(model_save_path, "tb_logs")
    metrics_path = os.path.join(model_save_path, "metrics")

    os.makedirs(metrics_path, exist_ok=True)

    for plane in planes:

        logdir = os.path.join(logdir_path, plane)
        os.makedirs(logdir, exist_ok=True)

        tbCallback = TensorBoard(log_dir=logdir)

        os.makedirs(os.path.join(cp_save_path, plane), exist_ok=True)

        model_checkpoint = ModelCheckpoint(
            os.path.join(cp_save_path, plane, "best-wts.h5"),
            monitor="val_loss",
            save_weights_only=True,
            mode="min",
        )

        if not plane == "combined":
            lr = 1e-3
            model = _model.Submodel(
                input_shape=image_size,
                dropout=dropout,
                name=plane,
                include_top=True,
                weights=None,
            )
        else:
            lr = 5e-4
            model = _model.CombinedClassifier(
                input_shape=image_size,
                dropout=dropout,
                trainable=True,
                wts_root=cp_save_path,
            )

        print("Submodel: ", plane)

        METRICS = [
            metrics.TruePositives(name="tp"),
            metrics.FalsePositives(name="fp"),
            metrics.TrueNegatives(name="tn"),
            metrics.FalseNegatives(name="fn"),
            metrics.BinaryAccuracy(name="accuracy"),
            metrics.Precision(name="precision"),
            metrics.Recall(name="recall"),
            metrics.AUC(name="auc"),
        ]

        model.compile(
            loss=tf.keras.losses.binary_crossentropy,
            optimizer=Adam(learning_rate=lr),
            metrics=METRICS,
        )

        dataset_train = get_dataset(
            file_pattern=os.path.join(tfrecords_path, "data-train_*"),
            n_classes=n_classes,
            batch_size=global_batch_size,
            volume_shape=volume_shape,
            plane=plane,
            shuffle_buffer_size=global_batch_size,
        )

        steps_per_epoch = math.ceil(len(train_paths) / batch_size)

        if mode == "CV":
            earlystopping = EarlyStopping(monitor="val_loss", patience=3)

            dataset_valid = get_dataset(
                file_pattern=os.path.join(tfrecords_path, "data-valid_*"),
                n_classes=n_classes,
                batch_size=global_batch_size,
                volume_shape=volume_shape,
                plane=plane,
                shuffle_buffer_size=global_batch_size,
            )

            validation_steps = math.ceil(len(valid_paths) / batch_size)

            history = model.fit(
                dataset_train,
                epochs=n_epochs,
                steps_per_epoch=steps_per_epoch,
                validation_data=dataset_valid,
                validation_steps=validation_steps,
                callbacks=[tbCallback, model_checkpoint, earlystopping],
                class_weight=weights,
            )

            hist_df = pd.DataFrame(history.history)

        else:
            earlystopping = EarlyStopping(monitor="loss", patience=3)
            print(model.summary())
            print("Steps/Epoch: ", steps_per_epoch)
            history = model.fit(
                dataset_train,
                epochs=n_epochs,
                steps_per_epoch=steps_per_epoch,
                callbacks=[tbCallback, model_checkpoint, earlystopping],
                class_weight=weights,
            )

        hist_df = pd.DataFrame(history.history)
        jsonfile = os.path.join(metrics_path, plane + ".json")

        with open(jsonfile, mode="w") as f:
            hist_df.to_json(f)

    return history
예제 #19
0
def dag_2_cnn(dag, gpuID, input_shape=(256,256,1), target_shape=(256,256,1), pretrained_weights = None, compile=True):
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpuID)
    nodes = list(dag.nodes())
    
    #breadth-first search starting at root
    bfs = nx.bfs_successors(dag, nodes[0])
    modules = dict()

    #root will always have this name
    assert(nodes[0] == 'input_0_0')

    with tf.device('/gpu:{}'.format(gpuID)):
        modules[nodes[0]] = Input(input_shape)
        
        for branch in bfs: #branch: tuple with (node, [list of successors to node])
            for successor in branch[1]:
                modules = traverse(dag, successor, modules)
                
        leaves = [x for x in dag.nodes() if dag.out_degree(x)==0 and dag.in_degree(x)>0]
        
        if len(leaves) == 1:
            output = modules[leaves[0]]
        else:
            raise NotImplementedError
        
        #NOTE: mean iou removed from metrics 21/07/2020
        model = Model(inputs=modules['input_0_0'], outputs=output)
        
        if compile:
            model.compile(optimizer=Adam(lr=1e-4), loss='binary_crossentropy', metrics=['accuracy', metrics.Precision(), metrics.Recall(), metrics.TruePositives(), metrics.TrueNegatives(), metrics.FalsePositives(), metrics.FalseNegatives(), metrics.AUC()])
        
        
        if pretrained_weights:
            model.load_weights(pretrained_weights)

    return model
예제 #20
0
## fine-tune transformer
from tensorflow.keras              import metrics
from tensorflow.keras.optimizers   import RMSprop, SGD

losses = {'clone': "binary_crossentropy",
        'partial': "binary_crossentropy",
        'fufi': "binary_crossentropy"
}

lossWeights = {'clone': weight_ccpf[1],
             'partial': weight_ccpf[2],
             'fufi': weight_ccpf[3]
}

metrics_use = {'clone': [metrics.TruePositives(), 
                     metrics.FalseNegatives(), 
                     metrics.FalsePositives(), 
                     metrics.TrueNegatives()],
             'partial': [metrics.TruePositives(), 
                       metrics.FalseNegatives(), 
                       metrics.FalsePositives(), 
                       metrics.TrueNegatives()],
             'fufi': [metrics.TruePositives(), 
                       metrics.FalseNegatives(), 
                       metrics.FalsePositives(), 
                       metrics.TrueNegatives()]
}

just_trnsf.compile(optimizer=RMSprop(lr=0.0001), loss = losses,
                   loss_weights = lossWeights, metrics = metrics_use)   
                   
예제 #21
0
    def train_model(self, themes_weight: List[float],
                    dataset: TrainValidationDataset, voc_size: int,
                    keras_callback: LambdaCallback):

        article_length = dataset.article_length
        theme_count = dataset.theme_count

        model = tf.keras.Sequential([
            # 1
            # keras.layers.Embedding(input_dim=voc_size, output_dim=firstLayoutOutputDim),
            # keras.layers.Dropout(0.2),
            # keras.layers.Conv1D(200,3,input_shape=(ARTICLE_MAX_WORD_COUNT,firstLayoutOutputDim), activation=tf.nn.relu),
            # keras.layers.GlobalAveragePooling1D(),
            # keras.layers.Dense(250, activation=tf.nn.relu),
            # keras.layers.Dense(theme_count, activation=tf.nn.softmax)

            # 2
            # keras.layers.Embedding(input_dim=voc_size, output_dim=firstLayoutOutputDim),
            # keras.layers.LSTM(ltsmOutputDim, dropout=0.2, recurrent_dropout=0.2, activation='tanh'),
            # keras.layers.Dense(theme_count, activation=tf.nn.softmax)

            # 3
            # keras.layers.Embedding(input_dim=self.voc_size, output_dim=embedding_output_dim),
            # keras.layers.Bidirectional(keras.layers.LSTM(intermediate_dim, return_sequences=True)),
            # # keras.layers.Dropout(0.1),
            # keras.layers.Bidirectional(keras.layers.LSTM(last_dim, dropout=0.05, recurrent_dropout=0.05)),
            # keras.layers.Dense(last_dim, activation=tf.nn.relu),
            # keras.layers.Dense(self.theme_count, activation=tf.nn.softmax)

            # 4
            # keras.layers.Embedding(input_dim=self.voc_size, input_length=self.article_length, output_dim=embedding_output_dim),
            # keras.layers.Bidirectional(keras.layers.LSTM(intermediate_dim, return_sequences=True, dropout=0.2, recurrent_dropout=0.2)),
            # keras.layers.Dropout(0.2),
            # keras.layers.Bidirectional(keras.layers.LSTM(last_dim * 2, recurrent_dropout=0.2)), #was last_dim * 2
            # keras.layers.Dense(last_dim, activation=tf.nn.relu),
            # keras.layers.Dense(self.theme_count, activation=tf.nn.sigmoid)

            # 5
            #keras.layers.Embedding(input_dim=self.voc_size, input_length=self.article_length, output_dim=embedding_output_dim),
            # keras.layers.Conv1D(filters=64, kernel_size=5, input_shape=(self.voc_size, embedding_output_dim), activation="relu"),
            # keras.layers.MaxPool1D(4),
            #keras.layers.Bidirectional(keras.layers.LSTM(intermediate_dim, recurrent_dropout=0.1)),
            #keras.layers.Dense(last_dim, activation=tf.nn.relu),
            #keras.layers.Dense(self.theme_count, activation=tf.nn.sigmoid)

            #6
            keras.layers.Embedding(input_dim=voc_size,
                                   input_length=article_length,
                                   output_dim=128,
                                   mask_zero=True),
            keras.layers.Bidirectional(
                keras.layers.LSTM(128, recurrent_dropout=0.2, dropout=0.2)),
            #keras.layers.Dropout(0.2),
            #keras.layers.Dense(last_dim, activation=tf.nn.relu),
            # keras.layers.Dense(self.theme_count, activation=tf.nn.sigmoid, use_bias=True,bias_initializer=tf.keras.initializers.Constant(-1.22818328))
            keras.layers.Dense(theme_count,
                               activation=tf.nn.sigmoid,
                               kernel_regularizer=regularizers.l2(0.1),
                               activity_regularizer=regularizers.l1(0.05))

            # 7
            # keras.layers.Embedding(input_dim=self.voc_size, input_length=self.article_length,
            #                        output_dim=embedding_output_dim),
            # keras.layers.GlobalAvgPool1D(),
            # keras.layers.Dense(last_dim, activation=tf.nn.relu),
            # keras.layers.Dense(self.theme_count, activation=tf.nn.sigmoid)
        ])

        model.summary()

        model.compile(
            optimizer=tf.keras.optimizers.Adam(clipnorm=1, clipvalue=0.5),
            #loss=WeightedBinaryCrossEntropy(themes_weight, from_logits=True),
            loss=keras.losses.BinaryCrossentropy(from_logits=True),
            metrics=[
                metrics.AUC(),
                metrics.BinaryAccuracy(),
                metrics.TruePositives(),
                metrics.TrueNegatives(),
                metrics.FalseNegatives(),
                metrics.FalsePositives(),
                metrics.Recall(),
                metrics.Precision()
            ],
            run_eagerly=self.run_eagerly)

        keras.utils.plot_model(model, 'Model1.png', show_shapes=True)

        cb_list = [ManualInterrupter, keras_callback]

        model.fit(dataset.trainData,
                  epochs=10,
                  steps_per_epoch=dataset.train_batch_count,
                  validation_data=dataset.validationData,
                  validation_steps=dataset.validation_batch_count,
                  callbacks=cb_list,
                  class_weight={
                      0: 1,
                      1: themes_weight[0]
                  })

        model.save("output/" + self.get_model_name() + ".h5")
        model.save_weights("output/" + self.get_model_name() + "_weight.h5")

        self.__model__ = model
예제 #22
0
            for key in model_parameters.keys():
                if type(model_parameters[key]) is np.int64:
                    model_parameters[key] = int(model_parameters[key])
                elif type(model_parameters[key]) is np.float64:
                    model_parameters[key] = float(model_parameters[key])
            out_parameters['Model_Index'].append(model_index)
            model_base_path = r'H:\Deeplearning_Recurrence_Work\Models\Model_Index_{}'.format(model_index)
            pred_path = os.path.join(model_base_path, 'Predictions.npy')
            truth_path = os.path.join(model_base_path, 'Truth.npy')
            model_path = os.path.join(model_base_path, 'cp-best.cpkt')
            # model_path = os.path.join(model_base_path, 'final_model.h5')
            METRICS = [
                metrics.TruePositives(name='TruePositive'),
                metrics.FalsePositives(name='FalsePositive'),
                metrics.TrueNegatives(name='TrueNegative'),
                metrics.FalseNegatives(name='FalseNegative'),
                metrics.BinaryAccuracy(name='Accuracy'),
                metrics.Precision(name='Precision'),
                metrics.Recall(name='Recall'),
                metrics.AUC(name='AUC', multi_label=True),
            ]

            model = mydensenet(**model_parameters)
            model.load_weights(model_path)
            model.compile(optimizer=optimizers.Adam(), loss=CosineLoss(), metrics=METRICS)
            visualizer = ModelVisualizationClass(model=model, save_images=True,
                                                 out_path=r'H:\Deeplearning_Recurrence_Work\Activation_Images_{}'.format(model_index))
            all_layers = visualizer.all_layers
            desired_layers = [i.name for i in all_layers if i.name.startswith('conv')]
            visualizer.define_desired_layers(desired_layer_names=desired_layers)
            # model = load_model(model_path, custom_objects={'CosineLoss': CosineLoss})
예제 #23
0
    def train_model(self, themes_weight: List[float],
                    dataset: TrainValidationDataset, voc_size: int,
                    keras_callback: LambdaCallback):
        epochs = 60
        embedding_output_dim = 128
        last_dim = 128

        article_length = dataset.article_length
        theme_count = dataset.theme_count

        model = tf.keras.Sequential([
            keras.layers.Embedding(input_dim=voc_size,
                                   input_length=article_length,
                                   output_dim=embedding_output_dim,
                                   mask_zero=True),
            keras.layers.Conv1D(filters=64,
                                kernel_size=3,
                                input_shape=(voc_size, embedding_output_dim),
                                activation=tf.nn.relu),
            keras.layers.GlobalMaxPooling1D(),
            keras.layers.Dropout(0.2),
            keras.layers.Dense(last_dim, activation=tf.nn.relu),
            keras.layers.Dropout(0.2),
            keras.layers.Dense(theme_count,
                               activation=tf.nn.sigmoid,
                               kernel_regularizer=regularizers.l2(0.2),
                               activity_regularizer=regularizers.l1(0.1))
        ])

        model.summary()

        model.compile(optimizer=tf.keras.optimizers.Adam(clipnorm=1,
                                                         clipvalue=0.5),
                      loss=WeightedBinaryCrossEntropy(themes_weight,
                                                      from_logits=True),
                      metrics=[
                          metrics.AUC(),
                          metrics.BinaryAccuracy(),
                          metrics.TruePositives(),
                          metrics.TrueNegatives(),
                          metrics.FalseNegatives(),
                          metrics.FalsePositives(),
                          metrics.Recall(),
                          metrics.Precision()
                      ],
                      run_eagerly=self.run_eagerly)

        keras.utils.plot_model(model,
                               "output/" + self.model_name + ".png",
                               show_shapes=True)

        model.fit(dataset.trainData,
                  epochs=epochs,
                  steps_per_epoch=dataset.train_batch_count,
                  validation_data=dataset.validationData,
                  validation_steps=dataset.validation_batch_count,
                  callbacks=[ManualInterrupter(), keras_callback])

        model.save("output/" + self.model_name + ".h5")
        model.save_weights("output/" + self.model_name + "_weight.h5")

        self.__model__ = model
예제 #24
0
def unet_sep(param,
             input_shape=(1024, 1024, 3),
             roi_pool_size=[10, 10],
             chan_num=3,
             weight_ccpf=[1, 1, 1, 1, 1],
             projection_dim=100,
             transformer_layers=4,
             num_heads=4,
             is_comp=True,
             lr=1e-3):
    num_bbox = param["num_bbox"]

    input_img = layers.Input(shape=input_shape)
    input_bbox = layers.Input(shape=(num_bbox, 4))

    ## get unet stem model
    just_unet = strde_unet_xcept_gn_shallow()
    #    just_unet  = strde_unet_xcept_gn_deep()
    #    just_unet = strde_sepconv_unet_xcept_gn_shallow()
    #    just_unet = strde_sepconv_unet_xcept_gn_deep()
    #    just_unet = res_unet()

    ## and classifier model
    just_trnsf = classify_branch(num_bbox=num_bbox,
                                 crypt_class=param['crypt_class'])

    ## crate instances of models
    inst_cr, inst_fm = just_unet(input_img)

    if param['crypt_class']:
        inst_cl, inst_pa, inst_fu, inst_crcls = just_trnsf(
            [inst_fm, input_bbox])

        ## combine into final model
        final_model = Model(
            inputs=[input_img, input_bbox],
            outputs=[inst_cr, inst_cl, inst_pa, inst_fu, inst_crcls])

        losses = {
            'crypt': "binary_crossentropy",
            'cpf': "binary_crossentropy",
            'cpf_1': "binary_crossentropy",
            'cpf_2': "binary_crossentropy",
            'cpf_3': "binary_crossentropy"
        }
        lossWeights = {
            'crypt': weight_ccpf[0],
            'cpf': weight_ccpf[1],
            'cpf_1': weight_ccpf[2],
            'cpf_2': weight_ccpf[3],
            'cpf_3': weight_ccpf[4]
        }
        metrics_use = {
            'crypt':
            metrics.Accuracy(),
            'cpf': [
                metrics.TruePositives(),
                metrics.FalseNegatives(),
                metrics.FalsePositives(),
                metrics.TrueNegatives()
            ],
            'cpf_1': [
                metrics.TruePositives(),
                metrics.FalseNegatives(),
                metrics.FalsePositives(),
                metrics.TrueNegatives()
            ],
            'cpf_2': [
                metrics.TruePositives(),
                metrics.FalseNegatives(),
                metrics.FalsePositives(),
                metrics.TrueNegatives()
            ],
            'cpf_3': [
                metrics.TruePositives(),
                metrics.FalseNegatives(),
                metrics.FalsePositives(),
                metrics.TrueNegatives()
            ]
        }
    else:
        inst_cl, inst_pa, inst_fu = just_trnsf([inst_fm, input_bbox])

        ## combine into final model
        final_model = Model(inputs=[input_img, input_bbox],
                            outputs=[inst_cr, inst_cl, inst_pa, inst_fu])

        losses = {
            'crypt': "binary_crossentropy",
            'cpf': "binary_crossentropy",
            'cpf_1': "binary_crossentropy",
            'cpf_2': "binary_crossentropy"
        }
        lossWeights = {
            'crypt': weight_ccpf[0],
            'cpf': weight_ccpf[1],
            'cpf_1': weight_ccpf[2],
            'cpf_2': weight_ccpf[3]
        }
        metrics_use = {
            'crypt':
            metrics.Accuracy(),
            'cpf': [
                metrics.TruePositives(),
                metrics.FalseNegatives(),
                metrics.FalsePositives(),
                metrics.TrueNegatives()
            ],
            'cpf_1': [
                metrics.TruePositives(),
                metrics.FalseNegatives(),
                metrics.FalsePositives(),
                metrics.TrueNegatives()
            ],
            'cpf_2': [
                metrics.TruePositives(),
                metrics.FalseNegatives(),
                metrics.FalsePositives(),
                metrics.TrueNegatives()
            ]
        }
    if is_comp:  # compile
        final_model.compile(optimizer=Adam(lr=lr),
                            loss=losses,
                            loss_weights=lossWeights,
                            metrics=metrics_use)
    return final_model, just_trnsf, just_unet
 def confusion_matrix_metric(self):
     return [metrics.TruePositives(name='tp'),
     metrics.FalsePositives(name='fp'),
     metrics.TrueNegatives(name='tn'),
     metrics.FalseNegatives(name='fn')]
예제 #26
0
    LEARNING_RATE = float(args.learning_rate)
    EPOCHS = int(args.epochs)
    BATCH_SIZE = int(args.batch_size)
    DROPOUT = float(args.dropout)
    IMGSIZE = (int(args.imgsize[0]), int(args.imgsize[1]))
    LOGDIR = args.logdir
    DATA = args.data
    BACKBONE = args.backbone
    NAME = args.model

    # --- define model metrics ---
    METRICS = [
        metrics.TruePositives(name="True_Positives"),
        metrics.FalsePositives(name="False_Positives"),
        metrics.TrueNegatives(name="True_Negatives"),
        metrics.FalseNegatives(name="False_Negatives"),
        metrics.BinaryAccuracy(name="Binary_Accuracy"),
        metrics.Precision(name="Precision"),
        metrics.Recall(name="Recall"),
        metrics.AUC(name="AUC")
    ]

    # --- tensorflow calbacks ---
    date = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
    if platform.system().lower() == "windows":
        LOGDIR = LOGDIR + "\\" + NAME + "\\" + date
    else:
        LOGDIR = LOGDIR + "/" + NAME + "/" + date
    if not os.path.isdir(LOGDIR):
        os.makedirs(LOGDIR, exist_ok=True)
def find_best_lr(batch_size=24):
    tf.random.set_seed(3141)
    base_path, morfeus_drive, excel_path = return_paths()

    # if base_path.startswith('H'):  # Only run this locally
    #     create_excel_values(excel_path=excel_path)
    for iteration in [0]:
        out_path = os.path.join(morfeus_drive, 'Learning_Rates')
        model_parameters, out_path = return_model_parameters(
            out_path=out_path, excel_path=excel_path, iteration=iteration)
        if model_parameters is None:
            continue
        model_key = model_parameters['Model_Type']
        optimizer = model_parameters['Optimizer']
        model_base = return_model(model_key=model_key)
        model = model_base(**model_parameters)
        if model_parameters['loss'] == 'CosineLoss':
            loss = CosineLoss()
            min_lr = 1e-6
            max_lr = 1e-1
        elif model_parameters['loss'] == 'CategoricalCrossEntropy':
            loss = tf.keras.losses.CategoricalCrossentropy()
            min_lr = 1e-10
            max_lr = 1e-3
        _, _, train_generator, validation_generator = return_generators(
            batch_size=batch_size,
            model_key=model_key,
            all_training=True,
            cache=True,
            cache_add='LR_Finder_{}'.format(model_key))
        print(out_path)
        k = TensorBoard(log_dir=out_path, profile_batch=0, write_graph=True)
        k.set_model(model)
        k.on_train_begin()
        lr_opt = tf.keras.optimizers.Adam
        if optimizer == 'SGD':
            lr_opt = tf.keras.optimizers.SGD
        elif optimizer == 'Adam':
            lr_opt = tf.keras.optimizers.Adam
        elif optimizer == 'RAdam':
            lr_opt = RectifiedAdam
        METRICS = [
            metrics.TruePositives(name='TruePositive'),
            metrics.FalsePositives(name='FalsePositive'),
            metrics.TrueNegatives(name='TrueNegative'),
            metrics.FalseNegatives(name='FalseNegative'),
            metrics.CategoricalAccuracy(name='Accuracy'),
            metrics.Precision(name='Precision'),
            metrics.Recall(name='Recall'),
            metrics.AUC(name='AUC'),
        ]
        LearningRateFinder(epochs=10,
                           model=model,
                           metrics=METRICS,
                           out_path=out_path,
                           optimizer=lr_opt,
                           loss=loss,
                           steps_per_epoch=1000,
                           train_generator=train_generator.data_set,
                           lower_lr=min_lr,
                           high_lr=max_lr)
        tf.keras.backend.clear_session()
        return False  # repeat!
    return True