コード例 #1
0
    def __init__(self,
                 model,
                 optimizer,
                 log_file_dir=None,
                 data_properties=None):
        """ Init. method.
        :param log_file_dir: If this is not None, then the training performance is stored in that log file directory.
        :param model: The model used for training.
        :param optimizer: Optimizer to be used for the weight updates.
        """
        self._data_properties = data_properties
        self._log_file_dir = log_file_dir
        self._optimizer = optimizer
        self._model = model

        self._tp_obj = TruePositives()
        self._tn_obj = TrueNegatives()
        self._fp_obj = FalsePositives()
        self._fn_obj = FalseNegatives()
        self._pre_obj = Precision()
        self._rec_obj = Recall()
        self._setup_changes = {'train': [], 'valid': []}
        self._loss_tt = {'train': [], 'valid': []}
        self._loss_ms = {'train': [], 'valid': []}
        self._loss_total = {'train': [], 'valid': []}
        self._acc = {'train': [], 'valid': []}
        self._tn = {'train': [], 'valid': []}
        self._tp = {'train': [], 'valid': []}
        self._fn = {'train': [], 'valid': []}
        self._fp = {'train': [], 'valid': []}
        self._rec = {'train': [], 'valid': []}
        self._pre = {'train': [], 'valid': []}
コード例 #2
0
class ConfusionMatrixDerivedMetric(tf.keras.metrics.Metric):
    def __init__(self, f, name, **kwargs):
        super(ConfusionMatrixDerivedMetric, self).__init__(name=name, **kwargs)
        self.tp = TruePositives()
        self.tn = TrueNegatives()
        self.fn = FalseNegatives()
        self.fp = FalsePositives()
        self.f = f

    def update_state(self, y_true, y_pred, sample_weight=None):
        self.tp.update_state(y_true, y_pred, sample_weight=sample_weight)
        self.tn.update_state(y_true, y_pred, sample_weight=sample_weight)
        self.fp.update_state(y_true, y_pred, sample_weight=sample_weight)
        self.fn.update_state(y_true, y_pred, sample_weight=sample_weight)

    def reset_states(self):
        self.tp.reset_states()
        self.tn.reset_states()
        self.fp.reset_states()
        self.fn.reset_states()

    def result(self):
        return tf.reduce_sum(
            self.f(self.tp.result(), self.fn.result(), self.tn.result(),
                   self.fp.result()))
コード例 #3
0
 def __init__(self, f, name, **kwargs):
     super(ConfusionMatrixDerivedMetric, self).__init__(name=name, **kwargs)
     self.tp = TruePositives()
     self.tn = TrueNegatives()
     self.fn = FalseNegatives()
     self.fp = FalsePositives()
     self.f = f
コード例 #4
0
def build_triplet_classifier_model(extractor_model,
                                   dist_type='eucl',
                                   threshold=1.0):
    anchor_in = Input(shape=(224, 224, 3), name="anchor_in")
    anchor_out = extractor_model(anchor_in)

    compare_in = Input(shape=(224, 224, 3), name="compare_in")
    compare_out = extractor_model(compare_in)

    if dist_type == 'cos':
        dist = CosineDistance(name="dist")([anchor_out, compare_out])
    else:
        dist = EuclidianDistanceSquared(name="dist")([anchor_out, compare_out])

    model = Lambda(lambda x: tf.cast((x < threshold), tf.float32))(dist)
    model = Model([anchor_in, compare_in], model)
    model.compile(optimizer=Adamax(),
                  loss=None,
                  metrics=[
                      BinaryAccuracy(),
                      Precision(),
                      Recall(),
                      TrueNegatives(),
                      FalsePositives(),
                      FalseNegatives(),
                      TruePositives()
                  ])

    return model
コード例 #5
0
def build_base_model(input_size):
    in_1 = Input(shape=(input_size, ), name="input_1")
    in_2 = Input(shape=(input_size, ), name="input_2")

    norm_1 = Lambda(lambda tensor: tf.norm(tensor, axis=1, keepdims=True),
                    name="norm_input_1")(in_1)
    norm_2 = Lambda(lambda tensor: tf.norm(tensor, axis=1, keepdims=True),
                    name="norm_input_2")(in_2)
    norm_mul = Multiply(name="multiply_norms")([norm_1, norm_2])

    model = Multiply(name="pointwise_multiply")([in_1, in_2])
    model = Lambda(lambda tensor: tf.reduce_sum(tensor, axis=1, keepdims=True),
                   name="sum")(model)

    model = Lambda(lambda tensors: tf.divide(tensors[0], tensors[1]),
                   name="divide")([model, norm_mul])
    model = ValueMinusInput(1, name="one_minus_input")(model)

    model = LessThan(0.4)(model)
    model_out = Lambda(lambda tensor: tf.cast(tensor, tf.float32),
                       name="cast")(model)

    model = Model([in_1, in_2], model_out)
    model.compile(loss=MeanSquaredError(),
                  optimizer=SGD(),
                  metrics=[
                      BinaryAccuracy(),
                      Precision(),
                      Recall(),
                      TrueNegatives(),
                      FalsePositives(),
                      FalseNegatives(),
                      TruePositives()
                  ])
    return model
コード例 #6
0
ファイル: run_experiment.py プロジェクト: musergi/netquake
 def _compile_model(self):
     self.model.compile(
         optimizer=Adam(),
         loss=BinaryCrossentropy(),
         metrics=['accuracy', TruePositives(), TrueNegatives(), FalsePositives(), FalseNegatives(), AUC()]
     )
     self.model.summary()
コード例 #7
0
    def train_model(self, themes_weight: ThemeWeights,
                    dataset: TrainValidationDataset, voc_size: int,
                    keras_callback: LambdaCallback):

        article_length = dataset.article_length
        theme_count = dataset.theme_count

        model = tf.keras.Sequential([
            keras.layers.Embedding(input_dim=voc_size,
                                   input_length=article_length,
                                   output_dim=self.embedding_output_dim,
                                   mask_zero=True),
            Dropout(0.3),
            keras.layers.Conv1D(filters=64,
                                kernel_size=3,
                                input_shape=(voc_size,
                                             self.embedding_output_dim),
                                activation=tf.nn.relu),
            #keras.layers.MaxPooling1D(3),
            #keras.layers.Bidirectional(keras.layers.LSTM(64)),
            keras.layers.GlobalAveragePooling1D(),
            Dropout(0.3),
            keras.layers.Dense(theme_count, activation=tf.nn.sigmoid)
        ])

        model.compile(optimizer=tf.keras.optimizers.Adam(clipnorm=1),
                      loss=WeightedBinaryCrossEntropy(
                          themes_weight.weight_array()),
                      metrics=[
                          AUC(multi_label=True),
                          BinaryAccuracy(),
                          TruePositives(),
                          TrueNegatives(),
                          FalseNegatives(),
                          FalsePositives(),
                          Recall(),
                          Precision()
                      ],
                      run_eagerly=True)

        model.summary()
        self.__model__ = model

        if self.__plot_directory is not None:
            self.plot_model(self.__plot_directory)

        # Fix for https://github.com/tensorflow/tensorflow/issues/38988
        model._layers = [
            layer for layer in model._layers if not isinstance(layer, dict)
        ]

        callbacks = [ManualInterrupter(), keras_callback]

        model.fit(dataset.trainData,
                  epochs=self.epochs,
                  steps_per_epoch=dataset.train_batch_count,
                  validation_data=dataset.validationData,
                  validation_steps=dataset.validation_batch_count,
                  callbacks=callbacks)
コード例 #8
0
 def get_custom_metrics():
     custom_metrics: dict = {
         "precision": Precision(),
         "recall": Recall(),
         "true_positives": TruePositives(),
         "true_negatives": TrueNegatives(),
         "false_negatives": FalseNegatives(),
         "false_positives": FalsePositives()
     }
     return custom_metrics
コード例 #9
0
def build_cos_model(input_size,
                    cos_dist_lvl,
                    n_neurons,
                    n_layers,
                    batch_norm=True,
                    loss=MeanSquaredError(),
                    optimizer=SGD(learning_rate=0.05, momentum=0.025)):
    in_1 = Input(shape=(input_size, ), name="input_1")
    in_2 = Input(shape=(input_size, ), name="input_2")

    if cos_dist_lvl == 0:
        model = Concatenate(name="concatenate")([in_1, in_2])
    else:
        model = Multiply(name="pointwise_multiply")([in_1, in_2])
        if cos_dist_lvl >= 2:
            norm_1 = Lambda(
                lambda tensor: tf.norm(tensor, axis=1, keepdims=True),
                name="norm_input_1")(in_1)
            norm_2 = Lambda(
                lambda tensor: tf.norm(tensor, axis=1, keepdims=True),
                name="norm_input_2")(in_2)
            norm_mul = Multiply(name="multiply_norms")([norm_1, norm_2])
            model = Lambda(lambda tensors: tf.divide(tensors[0], tensors[1]),
                           name="divide")([model, norm_mul])
        if cos_dist_lvl >= 3:
            model = Lambda(
                lambda tensor: tf.reduce_sum(tensor, axis=1, keepdims=True),
                name="sum")(model)
        if cos_dist_lvl >= 4:
            model = ValueMinusInput(1, name="one_minus_input")(model)

    if batch_norm:
        model = BatchNormalization(name="input_normalization")(model)

    for i in range(n_layers):
        model = Dense(n_neurons,
                      activation='sigmoid',
                      name="dense_{}".format(i))(model)
    model_out = Dense(1, activation='sigmoid', name="classify")(model)

    model = Model([in_1, in_2], model_out)
    model.compile(loss=loss,
                  optimizer=optimizer,
                  metrics=[
                      BinaryAccuracy(),
                      Precision(),
                      Recall(),
                      TrueNegatives(),
                      FalsePositives(),
                      FalseNegatives(),
                      TruePositives()
                  ])

    return model
コード例 #10
0
 def build_model(self):
     with tf.name_scope('Model'):
         self.model = self.build()
         self.model.compile(optimizer=Adam(lr=self.learning_rate),
                            loss=FocalLoss(alpha=.25, gamma=2),
                            metrics=[
                                'accuracy', f1,
                                TrueNegatives(),
                                FalseNegatives(),
                                TruePositives(),
                                FalsePositives()
                            ])
     print(self.model.summary())
コード例 #11
0
def build_eucl_model(input_size,
                     eucl_dist_lvl,
                     n_neurons,
                     n_layers,
                     batch_norm=True,
                     loss=MeanSquaredError(),
                     optimizer=SGD(learning_rate=0.05, momentum=0.025)):
    in_1 = Input(shape=(input_size, ), name="input_1")
    in_2 = Input(shape=(input_size, ), name="input_2")

    if eucl_dist_lvl == 0:
        model = Concatenate(name="concatenate")([in_1, in_2])
    else:
        model = Subtract(name="subtract")([in_1, in_2])
        if eucl_dist_lvl >= 2:
            model = Lambda(lambda tensor: tf.square(tensor),
                           name="square")(model)
        if eucl_dist_lvl >= 3:
            model = Lambda(
                lambda tensor: tf.reduce_sum(tensor, axis=1, keepdims=True),
                name="sum")(model)
        if eucl_dist_lvl >= 4:
            model = Lambda(lambda tensor: tf.sqrt(tensor), name="root")(model)

    if batch_norm:
        model = BatchNormalization(name="input_normalization")(model)

    for i in range(n_layers):
        model = Dense(n_neurons,
                      activation='sigmoid',
                      name="dense_{}".format(i))(model)
    model_out = Dense(1, activation='sigmoid', name="classify")(model)

    model = Model([in_1, in_2], model_out)
    model.compile(loss=loss,
                  optimizer=optimizer,
                  metrics=[
                      BinaryAccuracy(),
                      Precision(),
                      Recall(),
                      TrueNegatives(),
                      FalsePositives(),
                      FalseNegatives(),
                      TruePositives()
                  ])

    return model
コード例 #12
0
def define_lstm(TOP_WORDS, max_sent_len):
    embedding_vector_length = 16
    model = Sequential()
    num_neurons = 16
    # Input layer
    model.add(
        Embedding(TOP_WORDS,
                  embedding_vector_length,
                  input_length=max_sent_len))

    model.add(LSTM(16, return_sequences=True))
    model.add(Dropout(0.4))

    model.add(LSTM(16))
    model.add(Dropout(0.4))

    # model.add(Dense(units=num_neurons,
    #                 activation = 'sigmoid'))
    # model.add(Dropout(0.3))

    model.add(Dense(units=num_neurons, activation='relu'))
    model.add(Dropout(0.4))

    # Output layer
    model.add(Dense(1, activation='sigmoid'))

    metrics = [
        FalseNegatives(name='fn'),
        FalsePositives(name='fp'),
        TrueNegatives(name='tn'),
        TruePositives(name='tp'),
        Precision(name='precision'),
        Recall(name='recall'),
    ]

    model.summary()

    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=metrics)

    return model
コード例 #13
0
def compile_model(model, metric_names, optimizer="adam", build=True):
    loss = BinaryCrossentropy(name="loss")

    metrics = []
    if "acc" in metric_names:
        metrics.append(BinaryAccuracy(name="acc"))
    if "auprc" in metric_names:
        metrics.append(AUC(curve="PR", name="auprc"))
    if "auroc" in metric_names:
        metrics.append(AUC(curve="ROC", name="auroc"))
    if "fp" in metric_names:
        metrics.append(FalsePositives(name="fp"))
    if "fn" in metric_names:
        metrics.append(FalseNegatives(name="fn"))

    model.compile(loss=loss, metrics=metrics, optimizer=optimizer)

    if build:
        # See https://stackoverflow.com/a/59356545 for the usage of build() function
        model.build()
コード例 #14
0
def create_model_RNN_LSTM(optimizer='adam',
                          units=200,
                          activation='sigmoid',
                          EMBEDDING_DIM=25,
                          max_length=37,
                          vocab_size=350,
                          hidden_dims=1,
                          hidden_dim_units=25):
    print("Parameters", "units", units, "activation", activation,
          "EMBEDDING_DIM", EMBEDDING_DIM, "max_length", max_length,
          "vocab_size", vocab_size, " hidden_dims  ", hidden_dims)
    keras_eval_metric = [[
        TruePositives(name='tp'),
        FalsePositives(name='fp'),
        TrueNegatives(name='tn'),
        FalseNegatives(name='fn'),
        BinaryAccuracy(name='accuracy'),
        Precision(name='precision'),
        Recall(name='recall'),
        AUC(name='auc'),
    ]]
    model = Sequential()
    model.add(Embedding(vocab_size, EMBEDDING_DIM, input_length=max_length))
    model.add(
        LSTM(units=hidden_dim_units,
             dropout=0.2,
             recurrent_dropout=0.2,
             activation=activation))
    for i in range(hidden_dims):
        model.add(Dense(hidden_dim_units))
        model.add(Activation(activation))
        model.add(Dropout(0.2))
    model.add(Dense(1))
    model.add(Activation(activation))
    model.compile(loss='binary_crossentropy',
                  optimizer=optimizer,
                  metrics=keras_eval_metric)
    print("\n\n", model.summary(), "\n\n", model.get_config(), "\n\n")

    return model
コード例 #15
0
def define_mlp_model(n_input):
    """ 
        define the multi-layer-perceptron neural network
    Args:
        n_input(int): number of features
    Returns:
        a defined mlp model, not fitted
    """
    model = Sequential()
    num_neurons = 256  #256

    # hidden layer
    model.add(
        Dense(units=num_neurons,
              input_dim=n_input,
              kernel_initializer='he_uniform',
              activation='sigmoid'))
    model.add(Dense(units=num_neurons * 2, activation='sigmoid'))
    model.add(Dropout(0.3))
    model.add(Dense(units=num_neurons * 2, activation='sigmoid'))
    model.add(Dropout(0.3))

    # output layer
    model.add(Dense(units=1, activation='sigmoid'))
    model.summary()

    metrics = [
        FalseNegatives(name='fn'),
        FalsePositives(name='fp'),
        TrueNegatives(name='tn'),
        TruePositives(name='tp'),
        Precision(name='precision'),
        Recall(name='recall'),
    ]
    #sgd = SGD(lr=0.001, decay=1e-7, momentum=.9)
    adam = Adam(1e-2)
    model.compile(loss='binary_crossentropy', optimizer=adam, metrics=metrics)
    return model
コード例 #16
0
ファイル: Metrics.py プロジェクト: PhiCtl/Fireflies
def perf_measure(y_te, y_pred):
    TP = np.zeros(8)
    FN = np.zeros(8)
    FP = np.zeros(8)
    TN = np.zeros(8)
    for i in range(y_pred.shape[1]):
      tp = TruePositives()
      fn = FalseNegatives()
      fp = FalsePositives()
      tn = TrueNegatives()
      tp.update_state(y_te[:,i], y_pred[:,i])
      fn.update_state(y_te[:,i], y_pred[:,i])
      fp.update_state(y_te[:,i], y_pred[:,i])
      tn.update_state(y_te[:,i], y_pred[:,i])
      TP[i] = tp.result().numpy()
      FN[i] = fn.result().numpy()
      FP[i] = fp.result().numpy()
      TN[i] = tn.result().numpy()
      tp.reset_states()
      fn.reset_states()
      fp.reset_states()
      tn.reset_states()
    return [TP, TN, FN, FP]
コード例 #17
0
def build_model_hpconfig(args):
    """
    Description:
        Building models for hyperparameter Tuning

    Args:
        args: input arguments

    Returns:
        model (keras model)
    """

    #parsing and assigning hyperparameter variables from argparse
    conv1_filters = int(args.conv1_filters)
    conv2_filters = int(args.conv2_filters)
    conv3_filters = int(args.conv3_filters)
    window_size = int(args.window_size)
    kernel_regularizer = args.kernel_regularizer
    max_pool_size = int(args.pool_size)
    conv_dropout = float(args.conv_dropout)
    conv1d_initializer = args.conv_weight_initializer
    recurrent_layer1 = int(args.recurrent_layer1)
    recurrent_layer2 = int(args.recurrent_layer2)
    recurrent_dropout = float(args.recurrent_dropout)
    after_recurrent_dropout = float(args.after_recurrent_dropout)
    recurrent_recurrent_dropout = float(args.recurrent_recurrent_dropout)
    recurrent_initalizer = args.recurrent_weight_initializer
    optimizer = args.optimizer
    learning_rate = float(args.learning_rate)
    bidirection = args.bidirection
    recurrent_layer = str(args.recurrent_layer)
    dense_dropout = float(args.dense_dropout)
    dense_1 = int(args.dense_1)
    dense_initializer = args.dense_weight_initializer
    train_data = str(args.train_input_data)

    #main input is the length of the amino acid in the protein sequence (700,)
    main_input = Input(shape=(700, ), dtype='float32', name='main_input')

    #Embedding Layer used as input to the neural network
    embed = Embedding(output_dim=21, input_dim=21,
                      input_length=700)(main_input)

    #secondary input is the protein profile features
    auxiliary_input = Input(shape=(700, 21), name='aux_input')

    #get shape of input layers
    print("Protein Sequence shape: ", main_input.get_shape())
    print("Protein Profile shape: ", auxiliary_input.get_shape())

    #concatenate input layers
    concat = Concatenate(axis=-1)([embed, auxiliary_input])

    #3x1D Convolutional Hidden Layers with BatchNormalization, Dropout and MaxPooling
    conv_layer1 = Conv1D(conv1_filters,
                         window_size,
                         kernel_regularizer=kernel_regularizer,
                         padding='same',
                         kernel_initializer=conv1d_initializer)(concat)
    batch_norm = BatchNormalization()(conv_layer1)
    conv_act = activations.relu(batch_norm)
    conv_dropout = Dropout(conv_dropout)(conv_act)
    max_pool_1D_1 = MaxPooling1D(pool_size=max_pool_size,
                                 strides=1,
                                 padding='same')(conv_dropout)

    conv_layer2 = Conv1D(conv2_filters,
                         window_size,
                         padding='same',
                         kernel_initializer=conv1d_initializer)(concat)
    batch_norm = BatchNormalization()(conv_layer2)
    conv_act = activations.relu(batch_norm)
    conv_dropout = Dropout(conv_dropout)(conv_act)
    max_pool_1D_2 = MaxPooling1D(pool_size=max_pool_size,
                                 strides=1,
                                 padding='same')(conv_dropout)

    conv_layer3 = Conv1D(conv3_filters,
                         window_size,
                         kernel_regularizer=kernel_regularizer,
                         padding='same',
                         kernel_initializer=conv1d_initializer)(concat)
    batch_norm = BatchNormalization()(conv_layer3)
    conv_act = activations.relu(batch_norm)
    conv_dropout = Dropout(conv_dropout)(conv_act)
    max_pool_1D_3 = MaxPooling1D(pool_size=max_pool_size,
                                 strides=1,
                                 padding='same')(conv_dropout)

    #concat pooling layers
    conv_features = Concatenate(axis=-1)(
        [max_pool_1D_1, max_pool_1D_2, max_pool_1D_3])
    print("Shape of convolutional output: ", conv_features.get_shape())

    conv_features = Dense(600, activation='relu')(conv_features)

    ######## Recurrent Layers ########
    if (recurrent_layer == 'lstm'):
        if (bidirection):
            print('Entering LSTM Layers')
            #Creating Bidirectional LSTM layers
            lstm_f1 = Bidirectional(
                LSTM(recurrent_layer1,
                     return_sequences=True,
                     activation='tanh',
                     recurrent_activation='sigmoid',
                     dropout=recurrent_dropout,
                     recurrent_dropout=recurrent_recurrent_dropout,
                     kernel_initializer=recurrent_initalizer))(conv_features)
            lstm_f2 = Bidirectional(
                LSTM(recurrent_layer2,
                     return_sequences=True,
                     activation='tanh',
                     recurrent_activation='sigmoid',
                     dropout=recurrent_dropout,
                     recurrent_dropout=recurrent_recurrent_dropout,
                     kernel_initializer=recurrent_initalizer))(lstm_f1)

            #concatenate LSTM with convolutional layers
            concat_features = Concatenate(axis=-1)(
                [lstm_f1, lstm_f2, conv_features])
            concat_features = Dropout(after_recurrent_dropout)(concat_features)
            print('Concatenated LSTM layers')

        else:
            #Creating unidirectional LSTM Layers
            lstm_f1 = LSTM(
                recurrent_layer1,
                return_sequences=True,
                activation='tanh',
                recurrent_activation='sigmoid',
                dropout=recurrent_dropout,
                recurrent_dropout=recurrent_recurrent_dropout,
                kernel_initializer=recurrent_initalizer)(conv_features)

            lstm_f2 = LSTM(recurrent_layer2,
                           return_sequences=True,
                           activation='tanh',
                           recurrent_activation='sigmoid',
                           dropout=recurrent_dropout,
                           recurrent_dropout=recurrent_recurrent_dropout,
                           kernel_initializer=recurrent_initalizer)(lstm_f1)

            #concatenate LSTM with convolutional layers
            concat_features = Concatenate(axis=-1)(
                [lstm_f1, lstm_f2, conv_features])
            concat_features = Dropout(after_recurrent_dropout)(concat_features)

    elif (recurrent_layer == 'gru'):
        if (bidirection):

            #Creating Bidirectional GRU layers
            gru_f1 = Bidirectional(
                GRU(recurrent_layer1,
                    return_sequences=True,
                    activation='tanh',
                    recurrent_activation='sigmoid',
                    dropout=recurrent_dropout,
                    recurrent_dropout=recurrent_recurrent_dropout,
                    kernel_initializer=recurrent_initalizer))(conv_features)

            gru_f2 = Bidirectional(
                GRU(recurrent_layer2,
                    return_sequences=True,
                    activation='tanh',
                    recurrent_activation='sigmoid',
                    dropout=recurrent_dropout,
                    recurrent_dropout=recurrent_recurrent_dropout,
                    kernel_initializer=recurrent_initalizer))(gru_f1)

            #concatenate LSTM with convolutional layers
            concat_features = Concatenate(axis=-1)(
                [gru_f1, gru_f2, conv_features])
            concat_features = Dropout(after_recurrent_dropout)(concat_features)

        else:
            #Creating unidirectional GRU Layers
            gru_f1 = GRU(
                recurrent_layer1,
                return_sequences=True,
                activation='tanh',
                recurrent_activation='sigmoid',
                dropout=recurrent_dropout,
                recurrent_dropout=recurrent_recurrent_dropout,
                kernel_initializer=recurrent_initalizer)(conv_features)

            gru_f2 = GRU(recurrent_layer1,
                         return_sequences=True,
                         activation='tanh',
                         recurrent_activation='sigmoid',
                         dropout=recurrent_dropout,
                         recurrent_dropout=recurrent_recurrent_dropout,
                         kernel_initializer=recurrent_initalizer)(gru_f1)

            #concatenate LSTM with convolutional layers
            concat_features = Concatenate(axis=-1)(
                [gru_f1, gru_f2, conv_features])
            concat_features = Dropout(after_recurrent_dropout)(concat_features)
    else:

        print('Only LSTM and GRU recurrent layers are used in this model')
        return

    #Dense Fully-Connected DNN layers
    fc_dense1 = Dense(dense_1,
                      activation='relu',
                      kernel_initializer=dense_initializer)(concat_features)
    fc_dense1_dropout = Dropout(dense_dropout)(fc_dense1)

    #Final Output layer with 8 nodes for the 8 output classifications
    main_output = Dense(8, activation='softmax',
                        name='main_output')(fc_dense1_dropout)

    #create model from inputs and outputs
    model = Model(inputs=[main_input, auxiliary_input], outputs=[main_output])

    #Set optimizer to be used with the model, default is Adam
    if optimizer == 'adam':
        optimizer = Adam(lr=learning_rate, name='adam')
    elif optimizer == 'sgd':
        optimizer = SGD(lr=0.01, momentum=0.0, nesterov=False, name='SGD')
    elif optimizer == 'rmsprop':
        optimizer = RMSprop(learning_rate=learning_rate,
                            centered=True,
                            name='RMSprop')
    elif optimizer == 'adagrad':
        optimizer = Adagrad(learning_rate=learning_rate, name='Adagrad')
    elif optimizer == 'adamax':
        optimizer = Adamax(learning_rate=learning_rate, name='Adamax')
    else:
        optimizer = 'adam'
        optimizer = Adam(lr=learning_rate, name='adam')

    #compile model using optimizer and the cateogorical crossentropy loss function
    model.compile(optimizer=optimizer,
                  loss={'main_output': 'categorical_crossentropy'},
                  metrics=[
                      'accuracy',
                      MeanSquaredError(),
                      FalseNegatives(),
                      FalsePositives(),
                      TrueNegatives(),
                      TruePositives(),
                      MeanAbsoluteError(),
                      Recall(),
                      Precision()
                  ])

    #get summary of model including its layers and num parameters
    model.summary()

    return model
コード例 #18
0
def generate_compiled_segmentation_model(
        model_name,
        model_parameters,
        num_classes,
        loss,
        optimizer,
        weights_to_load=None,
        optimizing_threshold_class_metric=None,
        optimizing_class_id=None,
        optimizing_input_threshold=None,
        optimized_class_thresholds=None):

    # These are the only model, loss, and optimizer currently supported
    assert model_name == 'Unet'
    assert loss == 'cross_entropy'
    assert optimizer == 'adam'

    loss_fn = BinaryCrossentropyL()

    all_metrics = [
    ]  # one-hot versions are generally preferred for given metric
    # make first metric a copy of loss, to continually verify `val_loss` is correct
    if isinstance(loss_fn, BinaryCrossentropyL):
        all_metrics.append(BinaryCrossentropyM(name='binary_ce_metric'))
    else:
        all_metrics.append(CategoricalCrossentropyM(name='categ_ce_metric'))

    # standard thresholded version (default threshold is 0.5) also kept below, in case it's desired in certain scenario
    for class_num in range(num_classes + 1):
        if class_num == 0 and optimizing_threshold_class_metric is None:  # all class metrics
            # note, `loss_fn` for all classes placed before `all_metrics` in lineup of command window metrics and plots
            if not isinstance(loss_fn, BinaryCrossentropyL):
                all_metrics.extend([CategoricalCELoss()])
                all_metrics[1].name = str('categ_cross_entropy_sm')
            all_metrics.extend([
                AccuracyTfKeras(),
                # OneHotAccuracyTfKeras(),  # `global_threshold` built-in
                ClassBinaryAccuracyTfKeras(thresholds=global_threshold),
                # OneHotClassBinaryAccuracyTfKeras(thresholds=global_threshold),
                ClassBinaryAccuracySM(threshold=global_threshold),
                # OneHotClassBinaryAccuracySM(threshold=global_threshold),
                BinaryAccuracy(threshold=global_threshold),
                CategoricalAccuracy(),
                FalseNegatives(name='false_neg', thresholds=global_threshold),
                # OneHotFalseNegatives(name='false_neg_1H', thresholds=global_threshold),
                TrueNegatives(name='true_neg', thresholds=global_threshold),
                # OneHotTrueNegatives(name='true_neg_1H', thresholds=global_threshold),
                FalsePositives(name='false_pos', thresholds=global_threshold),
                # OneHotFalsePositives(name='false_pos_1H', thresholds=global_threshold),
                TruePositives(name='true_pos', thresholds=global_threshold),
                # OneHotTruePositives(name='true_pos_1H', thresholds=global_threshold),
                Recall(name='recall', thresholds=global_threshold),
                # OneHotRecall(name='recall_1H', thresholds=global_threshold),
                Precision(name='precision', thresholds=global_threshold),
                # OneHotPrecision(name='precision_1H', thresholds=global_threshold),
                FBetaScore(name='f1_score',
                           beta=1,
                           thresholds=global_threshold),
                # OneHotFBetaScore(name='f1_score_1H', beta=1, thresholds=global_threshold),
                IoUScore(name='iou_score', thresholds=global_threshold),
                # OneHotIoUScore(name='iou_score_1H', thresholds=global_threshold)
            ])
        elif class_num == 0 and optimizing_threshold_class_metric is not None:  # all class metrics
            continue
        else:  # per class metrics
            if optimizing_threshold_class_metric is not None:
                class_threshold = optimizing_input_threshold
                class_num = optimizing_class_id + 1
            elif optimized_class_thresholds is None:
                class_threshold = global_threshold
            else:
                class_threshold = optimized_class_thresholds[str(
                    'class' + str(class_num - 1))]

            all_metrics.append(CategoricalCELoss(class_indexes=class_num - 1))
            all_metrics[-1].name = str('class' + str(class_num - 1) +
                                       '_binary_cross_entropy')
            all_metrics.append(
                ClassBinaryAccuracySM(name=str('class' + str(class_num - 1) +
                                               '_binary_accuracy_sm'),
                                      class_indexes=class_num - 1,
                                      threshold=class_threshold))
            all_metrics.append(
                ClassBinaryAccuracyTfKeras(
                    name=str('class' + str(class_num - 1) +
                             '_binary_accuracy_tfkeras'),
                    class_id=class_num - 1,
                    thresholds=class_threshold))
            all_metrics.append(
                IoUScore(name=str('class' + str(class_num - 1) + '_iou_score'),
                         class_id=class_num - 1,
                         thresholds=class_threshold))
            all_metrics.append(
                FBetaScore(name=str('class' + str(class_num - 1) +
                                    '_f1_score'),
                           class_id=class_num - 1,
                           beta=1,
                           thresholds=class_threshold))
            all_metrics.append(
                Precision(name=str('class' + str(class_num - 1) +
                                   '_precision'),
                          class_id=class_num - 1,
                          thresholds=class_threshold))
            all_metrics.append(
                Recall(name=str('class' + str(class_num - 1) + '_recall'),
                       class_id=class_num - 1,
                       thresholds=class_threshold))

            if optimizing_threshold_class_metric is not None:
                break

        if num_classes == 1:
            break

    # strategy = tf.distribute.MirroredStrategy()
    # with strategy.scope():
    model = Unet(input_shape=(None, None, 1),
                 classes=num_classes,
                 **model_parameters)
    model.compile(optimizer=Adam(), loss=loss_fn, metrics=all_metrics)

    if weights_to_load:
        model.load_weights(weights_to_load)

    if optimizing_threshold_class_metric is None:
        print(model.summary())

    return model
コード例 #19
0
    for w in words:
        bag.append(1) if w in pattern_words else bag.append(0)

    output_row = list(output_empty)
    output_row[classes.index(doc[1])] = 1

    training.append([bag, output_row])
random.shuffle(training)
training = np.array(training)
train_x = list(training[:,0])
train_y = list(training[:,1])
print("Training data created")

x_train, x_test, y_train, y_test = train_test_split(train_x, train_y, test_size=0.33)

model = Sequential()
model.add(Dense(128, input_shape=(len(train_x[0]),), activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(len(train_y[0]), activation='softmax'))

sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['acc', TruePositives(), TrueNegatives(), FalsePositives(), FalseNegatives()])

history = model.fit(np.array(x_train), np.array(y_train), validation_data=(x_test, y_test), epochs=300, batch_size=5, verbose=1)
model.save('chatbot_model.h5', history)

print("model created")
コード例 #20
0
    'id': [],
    'rating': [],
    'text': [],
    'time_created': [],
    'url': []
}

FULL_REVIEWS_DICT = {'rating': [], 'text': []}
'''
Modeling
'''

# VOCAB_SIZE = 10386
# VOCAB_SIZE = 9566
VOCAB_SIZE = 27886

METRICS = [
    TruePositives(name='tp'),
    FalsePositives(name='fp'),
    TrueNegatives(name='tn'),
    FalseNegatives(name='fn'),
    BinaryAccuracy(name='accuracy'),
    Precision(name='precision'),
    Recall(name='recall'),
    AUC(name='auc')
]

PARAMS = {'batch_size': '', 'epochs': ''}

PREDICTIONS = {}
def build_model():

    #main input is the length of the amino acid in the protein sequence (700,)
    main_input = Input(shape=(700, ), dtype='float32', name='main_input')

    #Embedding Layer used as input to the neural network
    embed = Embedding(output_dim=21, input_dim=21,
                      input_length=700)(main_input)

    #secondary input is the protein profile features
    auxiliary_input = Input(shape=(700, 21), name='aux_input')

    #get shape of input layers
    print("Protein Sequence shape: ", main_input.get_shape())
    print("Protein Profile shape: ", auxiliary_input.get_shape())

    #concatenate 2 input layers
    concat = Concatenate(axis=-1)([embed, auxiliary_input])

    #3x1D Convolutional Hidden Layers with BatchNormalization and MaxPooling
    conv_layer1 = Convolution1D(64, 7, kernel_regularizer="l2",
                                padding='same')(concat)
    batch_norm = BatchNormalization()(conv_layer1)
    conv2D_act = activations.relu(batch_norm)
    conv_dropout = Dropout(0.5)(conv2D_act)
    max_pool_2D_1 = MaxPooling1D(pool_size=2, strides=1,
                                 padding='same')(conv_dropout)

    conv_layer2 = Convolution1D(128, 7, padding='same')(concat)
    batch_norm = BatchNormalization()(conv_layer2)
    conv2D_act = activations.relu(batch_norm)
    conv_dropout = Dropout(0.5)(conv2D_act)
    max_pool_2D_2 = MaxPooling1D(pool_size=2, strides=1,
                                 padding='same')(conv_dropout)

    conv_layer3 = Convolution1D(256,
                                7,
                                kernel_regularizer="l2",
                                padding='same')(concat)
    batch_norm = BatchNormalization()(conv_layer3)
    conv2D_act = activations.relu(batch_norm)
    conv_dropout = Dropout(0.5)(conv2D_act)
    max_pool_2D_3 = MaxPooling1D(pool_size=2, strides=1,
                                 padding='same')(conv_dropout)

    #concatenate convolutional layers
    conv_features = Concatenate(axis=-1)(
        [max_pool_2D_1, max_pool_2D_2, max_pool_2D_3])

    #Dense Fully-Connected DNN layers
    dense_1 = Dense(300, activation='relu')(conv_features)
    dense_1_dropout = Dropout(dense_dropout)(dense_1)
    dense_2 = Dense(100, activation='relu')(dense_1_dropout)
    dense_2_dropout = Dropout(dense_dropout)(dense_2)
    dense_3 = Dense(50, activation='relu')(dense_2_dropout)
    dense_3_dropout = Dropout(dense_dropout)(dense_3)
    dense_4 = Dense(16, activation='relu')(dense_3_dropout)
    dense_4_dropout = Dropout(dense_dropout)(dense_4)

    #Final Dense layer with 8 nodes for the 8 output classifications
    main_output = Dense(8, activation='softmax',
                        name='main_output')(protein_features_dropout)

    #create model from inputs and outputs
    model = Model(inputs=[main_input, auxiliary_input], outputs=[main_output])
    #use Adam optimizer
    adam = Adam(lr=lr)

    #Adam is fast, but tends to over-fit
    #SGD is low but gives great results, sometimes RMSProp works best, SWA can easily improve quality, AdaTune

    #compile model using adam optimizer and the cateogorical crossentropy loss function
    model.compile(optimizer=adam,
                  loss={'main_output': 'categorical_crossentropy'},
                  metrics=[
                      'accuracy',
                      MeanSquaredError(),
                      FalseNegatives(),
                      FalsePositives(),
                      TrueNegatives(),
                      TruePositives(),
                      MeanAbsoluteError(),
                      Recall(),
                      Precision()
                  ])
    model.summary()

    #set earlyStopping and checkpoint callback
    earlyStopping = EarlyStopping(monitor='val_loss',
                                  patience=5,
                                  verbose=1,
                                  mode='min')
    checkpoint_path = "/3x1DConv_dnn_" + str(datetime.date(
        datetime.now())) + ".h5"
    checkpointer = ModelCheckpoint(filepath=checkpoint_path,
                                   verbose=1,
                                   save_best_only=True,
                                   monitor='val_acc',
                                   mode='max')

    return model
def build_model():

    #main input is the length of the amino acid in the protein sequence (700,)
    main_input = Input(shape=(700, ), dtype='float32', name='main_input')

    #Embedding Layer used as input to the neural network
    embed = Embedding(output_dim=21, input_dim=21,
                      input_length=700)(main_input)

    #secondary input is the protein profile features
    auxiliary_input = Input(shape=(700, 21), name='aux_input')
    #auxiliary_input = Masking(mask_value=0)(auxiliary_input)

    #get shape of input layers
    print("Protein Sequence shape: ", main_input.get_shape())
    print("Protein Profile shape: ", auxiliary_input.get_shape())

    #concatenate 2 input layers
    concat = Concatenate(axis=-1)([embed, auxiliary_input])

    ######## Recurrent Bi-Directional Long-Short-Term-Memory Layers ########
    lstm_f1 = Bidirectional(
        LSTM(400,
             return_sequences=True,
             activation='tanh',
             recurrent_activation='sigmoid',
             dropout=0.5,
             recurrent_dropout=0.5))(conv_features)

    lstm_f2 = Bidirectional(
        LSTM(300,
             return_sequences=True,
             activation='tanh',
             recurrent_activation='sigmoid',
             dropout=0.5,
             recurrent_dropout=0.5))(lstm_f1)

    #concatenate LSTM with convolutional layers
    concat_features = Concatenate(axis=-1)([lstm_f1, lstm_f2, conv_features])
    concat_features = Dropout(0.4)(concat_features)

    #Dense Fully-Connected DNN layers
    dense_1 = Dense(300, activation='relu')(conv_features)
    dense_1_dropout = Dropout(dense_dropout)(dense_1)
    dense_2 = Dense(100, activation='relu')(dense_1_dropout)
    dense_2_dropout = Dropout(dense_dropout)(dense_2)
    dense_3 = Dense(50, activation='relu')(dense_2_dropout)
    dense_3_dropout = Dropout(dense_dropout)(dense_3)
    dense_4 = Dense(16, activation='relu')(dense_3_dropout)
    dense_4_dropout = Dropout(dense_dropout)(dense_4)

    #Final Dense layer with 8 nodes for the 8 output classifications
    main_output = Dense(8, activation='softmax',
                        name='main_output')(dense_4_dropout)

    #create model from inputs and outputs
    model = Model(inputs=[main_input, auxiliary_input], outputs=[main_output])

    #use Adam optimizer
    adam = Adam(lr=0.0003)
    #Adam is fast, but tends to over-fit
    #SGD is low but gives great results, sometimes RMSProp works best, SWA can easily improve quality, AdaTune

    #compile model using adam optimizer and the cateogorical crossentropy loss function
    model.compile(optimizer=adam,
                  loss={'main_output': 'categorical_crossentropy'},
                  metrics=[
                      'accuracy',
                      MeanSquaredError(),
                      FalseNegatives(),
                      FalsePositives(),
                      TrueNegatives(),
                      TruePositives(),
                      MeanAbsoluteError(),
                      Recall(),
                      Precision()
                  ])
    model.summary()

    #set earlyStopping and checkpoint callback
    earlyStopping = EarlyStopping(monitor='val_loss',
                                  patience=5,
                                  verbose=1,
                                  mode='min')
    checkpoint_path = "/blstm_3x1Dconv_dnn_" + str(
        datetime.date(datetime.now())) + ".h5"
    checkpointer = ModelCheckpoint(filepath=checkpoint_path,
                                   verbose=1,
                                   save_best_only=True,
                                   monitor='val_acc',
                                   mode='max')

    return model
コード例 #23
0
def build_model():
    """
    Description:
        Building DCBGRU model
    Args:
        None
    Returns:
        None

    """

    #main input is the length of the amino acid in the protein sequence (700,)
    main_input = Input(shape=(700, ), dtype='float32', name='main_input')

    #Embedding Layer used as input to the neural network
    embed = Embedding(output_dim=21, input_dim=21,
                      input_length=700)(main_input)

    #secondary input is the protein profile features
    auxiliary_input = Input(shape=(700, 21), name='aux_input')

    #get shape of input layers
    print("Protein Sequence shape: ", main_input.get_shape())
    print("Protein Profile shape: ", auxiliary_input.get_shape())

    #concatenate 2 input layers
    concat = Concatenate(axis=-1)([embed, auxiliary_input])

    #3x1D Convolutional Hidden Layers with BatchNormalization, Dropout and MaxPooling
    conv_layer1 = Conv1D(16, 7, kernel_regularizer="l2",
                         padding='same')(concat)
    batch_norm = BatchNormalization()(conv_layer1)
    conv_act = activations.relu(batch_norm)
    conv_dropout = Dropout(0.2)(conv_act)
    max_pool_1D_1 = MaxPooling1D(pool_size=2, strides=1,
                                 padding='same')(conv_dropout)

    conv_layer2 = Conv1D(32, 7, padding='same')(concat)
    batch_norm = BatchNormalization()(conv_layer2)
    conv_act = activations.relu(batch_norm)
    conv_dropout = Dropout(0.2)(conv_act)
    max_pool_1D_2 = MaxPooling1D(pool_size=2, strides=1,
                                 padding='same')(conv_dropout)

    conv_layer3 = Conv1D(64, 7, kernel_regularizer="l2",
                         padding='same')(concat)
    batch_norm = BatchNormalization()(conv_layer3)
    conv_act = activations.relu(batch_norm)
    conv_dropout = Dropout(0.2)(conv_act)
    max_pool_1D_3 = MaxPooling1D(pool_size=2, strides=1,
                                 padding='same')(conv_dropout)

    ############################################################################################

    #concatenate convolutional layers
    conv_features = Concatenate(axis=-1)(
        [max_pool_1D_1, max_pool_1D_2, max_pool_1D_3])

    #dense layer before GRU's
    gru_dense = Dense(600, activation='relu',
                      name="after_cnn_dense")(conv_features)

    ######## Recurrent Unidirectional Long-Short-Term-Memory Layers ########
    gru_f1 = Bidirectional(
        GRU(200,
            return_sequences=True,
            activation='tanh',
            recurrent_activation='sigmoid',
            dropout=0.5,
            recurrent_dropout=0.5))(gru_dense)

    gru_f2 = Bidirectional(
        GRU(200,
            return_sequences=True,
            activation='tanh',
            recurrent_activation='sigmoid',
            dropout=0.5,
            recurrent_dropout=0.5))(gru_f1)

    ############################################################################################

    #concatenate GRU with convolutional layers
    concat_features = Concatenate(axis=-1)([gru_f1, gru_f2, gru_dense])
    concat_features = Dropout(0.4)(concat_features)

    #Dense Fully-Connected DNN layers
    after_gru_dense = Dense(600, activation='relu')(concat_features)
    after_gru_dense_dropout = Dropout(0.3)(after_gru_dense)

    #Final Dense layer with 8 nodes for the 8 output classifications
    main_output = Dense(8, activation='softmax',
                        name='main_output')(after_gru_dense_dropout)

    #create model from inputs and outputs
    model = Model(inputs=[main_input, auxiliary_input], outputs=[main_output])

    #use Adam optimizer
    adam = Adam(lr=0.00015)

    #compile model using adam optimizer and the cateogorical crossentropy loss function
    model.compile(optimizer=adam,
                  loss={'main_output': 'categorical_crossentropy'},
                  metrics=[
                      'accuracy',
                      MeanSquaredError(),
                      FalseNegatives(),
                      FalsePositives(),
                      TrueNegatives(),
                      TruePositives(),
                      MeanAbsoluteError(),
                      Recall(),
                      Precision()
                  ])

    #print model summary
    model.summary()

    return model
コード例 #24
0
def build_model():
    """
    Description:
        Building PSP-CD model
    Args:
        None
    Returns:
        None

    """

    #main input is the length of the amino acid in the protein sequence (700,)
    main_input = Input(shape=(700, ), dtype='float32', name='main_input')

    #Embedding Layer used as input to the neural network
    embed = Embedding(output_dim=21, input_dim=21,
                      input_length=700)(main_input)

    #secondary input is the protein profile features
    auxiliary_input = Input(shape=(700, 21), name='aux_input')

    #get shape of input layers
    print("Protein Sequence shape: ", main_input.get_shape())
    print("Protein Profile shape: ", auxiliary_input.get_shape())

    #concatenate 2 input layers
    concat_features = Concatenate(axis=-1)([embed, auxiliary_input])

    ############################################################################################

    #Dense Fully-Connected DNN layers
    dense_1 = Dense(512, activation='relu')(concat_features)
    dense_1_dropout = Dropout(0.3)(dense_1)
    dense_2 = Dense(256, activation='relu')(dense_1_dropout)
    dense_2_dropout = Dropout(0.3)(dense_2)
    dense_3 = Dense(128, activation='relu')(dense_2_dropout)
    dense_3_dropout = Dropout(0.3)(dense_3)
    dense_4 = Dense(64, activation='relu')(dense_3_dropout)
    dense_4_dropout = Dropout(0.3)(dense_4)
    dense_5 = Dense(32, activation='relu')(dense_4_dropout)
    dense_5_dropout = Dropout(0.3)(dense_5)
    dense_6 = Dense(16, activation='relu')(dense_5_dropout)
    dense_6_dropout = Dropout(0.3)(dense_6)

    #Final Dense layer with 8 nodes for the 8 output classifications
    main_output = Dense(8, activation='softmax',
                        name='main_output')(dense_6_dropout)

    #create model from inputs and outputs
    model = Model(inputs=[main_input, auxiliary_input], outputs=[main_output])

    #use Adam optimizer
    adam = Adam(lr=0.00015)

    #compile model using adam optimizer and the cateogorical crossentropy loss function
    model.compile(optimizer=adam,
                  loss={'main_output': 'categorical_crossentropy'},
                  metrics=[
                      'accuracy',
                      MeanSquaredError(),
                      FalseNegatives(),
                      FalsePositives(),
                      TrueNegatives(),
                      TruePositives(),
                      MeanAbsoluteError(),
                      Recall(),
                      Precision()
                  ])

    #print model summary
    model.summary()

    return model
コード例 #25
0
def main():
    args = cmd_parser()
    if_fast_run = False

    print(f"TensorFlow version: {tf.__version__}.")
    print(f"Keras version: {keras.__version__}.")
    print("If in eager mode: ", tf.executing_eagerly())
    assert tf.__version__[0] == "2"

    model_type = "three_conv2d_net"

    # data path
    competition_name = "dogs-vs-cats-redux-kernels-edition"
    data_dir = os.path.expanduser(f"~/.kaggle/competitions/{competition_name}")

    # experiment time
    date_time = datetime.now().strftime("%Y%m%d-%H%M%S")
    ckpt_dir = os.path.expanduser(
        f"~/Documents/DeepLearningData/{competition_name}/ckpts/{model_type}/{date_time}"
    )
    log_dir = os.path.expanduser(
        f"~/Documents/DeepLearningData/{competition_name}/logs/{model_type}/{date_time}"
    )
    makedir_exist_ok(ckpt_dir)
    makedir_exist_ok(log_dir)

    # Input parameters
    IMAGE_WIDTH = 128
    IMAGE_HEIGHT = 128
    IMAGE_SIZE = (IMAGE_WIDTH, IMAGE_HEIGHT)
    IMAGE_CHANNELS = 3
    input_shape = (IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_CHANNELS)

    # Create model
    metrics = [
        TruePositives(name='tp'),  # thresholds=0.5
        FalsePositives(name='fp'),
        TrueNegatives(name='tn'),
        FalseNegatives(name='fn'),
        BinaryAccuracy(name='accuracy'),
        # AUC0(name='auc_cat_0'),  # 以 good 为 positive 的 AUC
        AUC(name='auc_dog_1')  # 以 bad 为 positive 的 AUC
    ]

    model = three_conv2d_net(input_shape=input_shape, metrics=metrics)
    model.summary()

    earlystop = EarlyStopping(patience=10)
    learning_rate_reduction = ReduceLROnPlateau(monitor='val_accuracy',
                                                patience=2,
                                                verbose=1,
                                                factor=0.5,
                                                min_lr=0.00001)

    model_name = "%s-epoch-{epoch:02d}-val_accuracy-{val_accuracy:.4f}.h5" % model_type
    filepath = os.path.join(ckpt_dir, model_name)
    checkpoint = ModelCheckpoint(filepath=filepath,
                                 monitor="val_accuracy",
                                 verbose=1,
                                 period=1)
    callbacks = [earlystop, learning_rate_reduction, checkpoint]

    # Prepare DataFrame
    filenames = os.listdir(os.path.join(data_dir, "train"))
    random.shuffle(filenames)
    categories = []
    for filename in filenames:
        category = filename.split('.')[0]
        if category == 'dog':
            categories.append(1)
        elif category == 'cat':
            categories.append(0)
    df = pd.DataFrame({'filename': filenames, 'category': categories})
    df["category"] = df["category"].replace({0: 'cat', 1: 'dog'})

    # Show sample image
    # sample = np.random.choice(filenames)
    # image = load_img("./data/train/"+sample)
    # plt.imshow(image)
    # plt.show()
    """ 这里用来自动划分 train 集和 val 集 """
    train_df, validate_df = train_test_split(df,
                                             test_size=0.20,
                                             random_state=42)
    train_df = train_df.reset_index(drop=True)
    validate_df = validate_df.reset_index(drop=True)

    # train_df['category'].value_counts().plot.bar()

    total_train = train_df.shape[0]
    total_validate = validate_df.shape[0]

    classes = ["cat", "dog"]

    # Traning Generator
    train_datagen = ImageDataGenerator(rotation_range=15,
                                       rescale=1. / 255,
                                       shear_range=0.1,
                                       zoom_range=0.2,
                                       horizontal_flip=True,
                                       width_shift_range=0.1,
                                       height_shift_range=0.1)
    train_generator = train_datagen.flow_from_dataframe(
        train_df,
        os.path.join(data_dir, "train"),
        x_col='filename',
        y_col='category',
        target_size=IMAGE_SIZE,
        class_mode='categorical',
        batch_size=args.batch_size)

    # Validation Generator
    validation_datagen = ImageDataGenerator(rescale=1. / 255)
    validation_generator = validation_datagen.flow_from_dataframe(
        validate_df,
        os.path.join(data_dir, "train"),
        x_col='filename',
        y_col='category',
        target_size=IMAGE_SIZE,
        class_mode='categorical',
        batch_size=args.batch_size)

    # Example Generation
    example_df = train_df.sample(n=1).reset_index(drop=True)
    example_generator = train_datagen.flow_from_dataframe(
        example_df,
        os.path.join(data_dir, "train"),
        x_col='filename',
        y_col='category',
        target_size=IMAGE_SIZE,
        class_mode='categorical')

    # Example Generation Ploting
    # plt.figure(figsize=(12, 12))
    # for i in range(0, 15):
    #     plt.subplot(5, 3, i+1)
    #     for X_batch, Y_batch in example_generator:
    #         image = X_batch[0]
    #         plt.imshow(image)
    #         break
    # plt.tight_layout()
    # plt.show()

    # Fit model
    epochs = 3 if if_fast_run else args.train_epochs
    history = model.fit(train_generator,
                        epochs=epochs,
                        validation_data=validation_generator,
                        callbacks=callbacks,
                        initial_epoch=args.start_epoch)

    # Save last model ckpt
    model.save_weights(f"./{model_type}-last_ckpt.h5")
def build_model():

    #main input is the length of the amino acid in the protein sequence (700,)
    main_input = Input(shape=(700, ), dtype='float32', name='main_input')

    #Embedding Layer used as input to the neural network
    embed = Embedding(output_dim=21, input_dim=21,
                      input_length=700)(main_input)

    #secondary input is the protein profile features
    auxiliary_input = Input(shape=(700, 21), name='aux_input')

    #get shape of input layers
    print("Protein Sequence shape: ", main_input.get_shape())
    print("Protein Profile shape: ", auxiliary_input.get_shape())

    #concatenate 2 input layers
    concat = Concatenate(axis=-1)([embed, auxiliary_input])

    #3x1D Convolutional Hidden Layers with BatchNormalization and MaxPooling
    conv_layer1 = Conv1D(64, 7, kernel_regularizer="l2",
                         padding='same')(concat)
    batch_norm = BatchNormalization()(conv_layer1)
    conv2D_act = activations.relu(batch_norm)
    conv_dropout = Dropout(0.5)(conv2D_act)
    # ave_pool_1 = AveragePooling1D(2, 1, padding='same')(conv_dropout)
    max_pool_1D_1 = MaxPooling1D(pool_size=2, strides=1,
                                 padding='same')(conv_dropout)

    conv_layer2 = Conv1D(128, 7, padding='same')(concat)
    batch_norm = BatchNormalization()(conv_layer2)
    conv2D_act = activations.relu(batch_norm)
    conv_dropout = Dropout(0.5)(conv2D_act)
    # ave_pool_2 = AveragePooling1D(2, 1, padding='same')(conv_dropout)
    max_pool_1D_2 = MaxPooling1D(pool_size=2, strides=1,
                                 padding='same')(conv_dropout)

    conv_layer3 = Conv1D(256, 7, kernel_regularizer="l2",
                         padding='same')(concat)
    batch_norm = BatchNormalization()(conv_layer3)
    conv2D_act = activations.relu(batch_norm)
    conv_dropout = Dropout(0.5)(conv2D_act)
    max_pool_1D_3 = MaxPooling1D(pool_size=2, strides=1,
                                 padding='same')(conv_dropout)
    # ave_pool_3 = AveragePooling1D(2, 1, padding='same')(conv_dropout)

    #concatenate convolutional layers
    conv_features = Concatenate(axis=-1)(
        [max_pool_1D_1, max_pool_1D_2, max_pool_1D_3])

    #output node is 1D convolutional layer with 8 filters for the 8 different categories
    main_output = Conv1D(8,
                         7,
                         padding='same',
                         activation='softmax',
                         name='main_output')(conv_features)

    #create model from inputs and outputs
    model = Model(inputs=[main_input, auxiliary_input], outputs=[main_output])
    #use Adam optimizer
    adam = Adam(lr=0.0003)

    #compile model using adam optimizer and the cateogorical crossentropy loss function
    model.compile(optimizer=adam,
                  loss={'main_output': 'categorical_crossentropy'},
                  metrics=[
                      'accuracy',
                      MeanSquaredError(),
                      FalseNegatives(),
                      FalsePositives(),
                      TrueNegatives(),
                      TruePositives(),
                      MeanAbsoluteError(),
                      Recall(),
                      Precision()
                  ])
    model.summary()

    #set earlyStopping and checkpoint callback
    earlyStopping = EarlyStopping(monitor='val_loss',
                                  patience=5,
                                  verbose=1,
                                  mode='min')
    checkpoint_path = "checkpoints/3xConv_cnn_" + str(
        datetime.date(datetime.now())) + ".h5"
    checkpointer = ModelCheckpoint(filepath=checkpoint_path,
                                   verbose=1,
                                   save_best_only=True,
                                   monitor='val_acc',
                                   mode='max')

    return model
コード例 #27
0
model = upsample(model, filters=12)

output = Conv2DTranspose(filters=OUTPUT_CHANNELS,
                         kernel_size=[1, 1],
                         activation='sigmoid',
                         padding='same')(model)

model = Model(inputs, output)
print(model.summary())

model.compile(optimizer='adam',
              loss=TverskyLoss(alpha=0.2, smooth=1e3),
              metrics=[
                  'acc',
                  FalseNegatives(),
                  FalsePositives(),
                  TruePositives(),
                  TrueNegatives()
              ])


def show_predictions(dataset=None, num=1):
    if dataset:
        for image, mask in dataset.take(num):
            pred_mask = model.predict(image)
            display([image[0], mask[0], pred_mask[0]])
    else:
        display([
            sample_image, sample_mask,
            model.predict(sample_image[tf.newaxis, ...])[0]
        ],
コード例 #28
0
ファイル: train.py プロジェクト: Bill-Bi/ADAS
                              batch_size=cfg.BATCH_SIZE,
                              shuffle=True)
valid_dataloader = Dataloader(valid_dataset,
                              batch_size=cfg.BATCH_SIZE,
                              shuffle=False)

loss = cfg.loss
optim = tf.keras.optimizers.SGD(lr=cfg.LR, momentum=0.9, nesterov=True)
metrics = [
    IOUScore(threshold=cfg.metric_threshold, per_image=cfg.metric_per_image),
    FScore(threshold=cfg.metric_threshold, per_image=cfg.metric_per_image),
    Precision(threshold=cfg.metric_threshold, per_image=cfg.metric_per_image),
    Recall(threshold=cfg.metric_threshold, per_image=cfg.metric_per_image),
    TruePositives(thresholds=cfg.metric_threshold),
    TrueNegatives(thresholds=cfg.metric_threshold),
    FalsePositives(thresholds=cfg.metric_threshold),
    FalseNegatives(thresholds=cfg.metric_threshold)
]

# with mirrored_strategy.scope():
model = sm.Unet(backbone_name=cfg.backbone_name,
                input_shape=cfg.input_shape,
                classes=n_classes,
                activation='sigmoid' if n_classes == 1 else 'softmax',
                weights=None,
                encoder_weights=cfg.encoder_weights,
                encoder_freeze=cfg.encoder_freeze,
                encoder_features=cfg.encoder_features,
                decoder_block_type=cfg.decoder_block_type,
                decoder_filters=cfg.decoder_filters,
                decoder_use_batchnorm=True)
コード例 #29
0
    def train_model(self, themes_weight: ThemeWeights,
                    dataset: TrainValidationDataset,
                    voc_size: int,
                    keras_callback: LambdaCallback):

        conv_reg = 0#0.015
        dense_reg = 0#0.015
        dropout_conv = 0.1#0.2
        dropout_dense = 0.1#0.2

        article_length = dataset.article_length
        theme_count = dataset.theme_count

        input = keras.layers.Input(shape=(dataset.article_length,))

        layer = keras.layers.Embedding(input_dim=voc_size, input_length=article_length, output_dim=self.embedding_output_dim,
                                       mask_zero=True)(input)
        layer = Dropout(dropout_conv)(layer)

        # Each convolution will have filter looking for some specific word combinations. For example a filter might
        # return a small value except for "apple iphone".
        conv1 = keras.layers.Conv1D(filters=64, kernel_size=2, input_shape=(voc_size, self.embedding_output_dim),
                                    activation=tf.nn.relu, kernel_regularizer=l2(l=conv_reg))(layer)
        conv1 = keras.layers.GlobalMaxPooling1D()(conv1)
        conv1 = keras.layers.Dense(32, activation=tf.nn.relu)(conv1)
        conv1 = Dropout(dropout_conv)(conv1)

        conv2 = keras.layers.Conv1D(filters=64, kernel_size=3, input_shape=(voc_size, self.embedding_output_dim),
                                    activation=tf.nn.relu, kernel_regularizer=l2(l=conv_reg))(layer)
        conv2 = keras.layers.GlobalMaxPooling1D()(conv2)
        conv2 = keras.layers.Dense(32, activation=tf.nn.relu)(conv2)
        conv2 = Dropout(dropout_conv)(conv2)

        conv3 = keras.layers.Conv1D(filters=64, kernel_size=1, input_shape=(voc_size, self.embedding_output_dim),
                                    activation=tf.nn.relu, kernel_regularizer=l2(l=conv_reg))(layer)
        conv3 = keras.layers.GlobalMaxPooling1D()(conv3)
        conv3 = keras.layers.Dense(32, activation=tf.nn.relu)(conv3)
        conv3 = Dropout(dropout_conv)(conv3)

        conv4 = keras.layers.Conv1D(filters=40, kernel_size=5, input_shape=(voc_size, self.embedding_output_dim),
                                    activation=tf.nn.relu, kernel_regularizer=l2(l=conv_reg))(layer)
        conv4 = keras.layers.GlobalMaxPooling1D()(conv4)
        conv4 = keras.layers.Dense(20, activation=tf.nn.relu)(conv4)
        conv4 = Dropout(dropout_conv)(conv4)

        layer = keras.layers.Concatenate()([conv1, conv2, conv3, conv4])
        layer = keras.layers.Dense(32, activation=tf.nn.relu, kernel_regularizer=l2(l=conv_reg))(layer)
        layer = keras.layers.Dropout(dropout_dense)(layer)
        # layer = keras.layers.Dense(64, activation=tf.nn.relu, kernel_regularizer=l2(l=conv_reg))(layer)
        # layer = keras.layers.Dropout(dropout_dense)(layer)

        layer = keras.layers.Dense(theme_count, activation=tf.nn.sigmoid, kernel_regularizer=l2(l=dense_reg))(layer)

        model = keras.Model(inputs=input, outputs=layer)


        model.compile(optimizer=tf.keras.optimizers.Adam(clipnorm=1, learning_rate=0.00009),
                      loss=WeightedBinaryCrossEntropy(themes_weight.weight_array()),
                      metrics=[AUC(multi_label=True), BinaryAccuracy(), TruePositives(),
                         TrueNegatives(), FalseNegatives(), FalsePositives(),
                         Recall(), Precision()],
                      run_eagerly=None)

        model.summary()
        self._model = model

        if self.__plot_directory is not None:
            self.plot_model(self.__plot_directory)

        # Fix for https://github.com/tensorflow/tensorflow/issues/38988
        model._layers = [layer for layer in model._layers if not isinstance(layer, dict)]

        callbacks = [ManualInterrupter(), keras_callback]

        model.fit(dataset.trainData,
                  epochs=self.epochs,
                  steps_per_epoch=dataset.train_batch_count,
                  validation_data=dataset.validationData,
                  validation_steps=dataset.validation_batch_count,
                  callbacks=callbacks,
                  )
コード例 #30
0
class TrainingController:
    """ Custom training loop with custom loss.
    """
    def __init__(self,
                 model,
                 optimizer,
                 log_file_dir=None,
                 data_properties=None):
        """ Init. method.
        :param log_file_dir: If this is not None, then the training performance is stored in that log file directory.
        :param model: The model used for training.
        :param optimizer: Optimizer to be used for the weight updates.
        """
        self._data_properties = data_properties
        self._log_file_dir = log_file_dir
        self._optimizer = optimizer
        self._model = model

        self._tp_obj = TruePositives()
        self._tn_obj = TrueNegatives()
        self._fp_obj = FalsePositives()
        self._fn_obj = FalseNegatives()
        self._pre_obj = Precision()
        self._rec_obj = Recall()
        self._setup_changes = {'train': [], 'valid': []}
        self._loss_tt = {'train': [], 'valid': []}
        self._loss_ms = {'train': [], 'valid': []}
        self._loss_total = {'train': [], 'valid': []}
        self._acc = {'train': [], 'valid': []}
        self._tn = {'train': [], 'valid': []}
        self._tp = {'train': [], 'valid': []}
        self._fn = {'train': [], 'valid': []}
        self._fp = {'train': [], 'valid': []}
        self._rec = {'train': [], 'valid': []}
        self._pre = {'train': [], 'valid': []}

    def _tb_update(self,
                   grads,
                   y_true,
                   y_pred,
                   m_idx,
                   emb,
                   attn,
                   epoch,
                   batch,
                   batch_size,
                   prefix='train/'):
        step = epoch * batch_size + batch

        if grads is not None:
            for var, grad in zip(self._model.trainable_variables, grads):
                tf.summary.histogram(prefix + var.name + '/gradient',
                                     grad,
                                     step=step)

        if attn is not None:
            self._plot_attention_weights(
                attention=attn,
                step=step,
                prefix=prefix + 'layer{}/enc_pad/'.format(0),
                description='x: input jobs | y: output jobs')

        m_idx = tf.tile(m_idx[:, :, tf.newaxis, tf.newaxis],
                        [1, 1, tf.shape(m_idx)[1], 1])
        tf.summary.image(prefix + "selected_machine", m_idx, step=step)

        for var in self._model.trainable_variables:
            tf.summary.histogram(prefix + var.name + '/weight', var, step=step)

        for m in tf.range(tf.shape(y_true)[1]):
            tf.summary.image(prefix + "y_true_m{}".format(m),
                             tf.expand_dims(y_true[:, m, :, :], -1),
                             step=step)
            tf.summary.image(prefix + "y_pred_m{}".format(m),
                             tf.expand_dims(y_pred[:, m, :, :], -1),
                             step=step)

    @staticmethod
    def _plot_attention_weights(attention,
                                step,
                                description='x: input, y: output',
                                prefix='train/'):
        for head in range(attention.shape[1]):
            data = []
            for attn_matrix in tf.unstack(attention, axis=0):
                attn_matrix = attn_matrix.numpy()
                cmap = cm.get_cmap('Greens')
                norm = Normalize(vmin=attn_matrix.min(),
                                 vmax=attn_matrix.max())
                data.append(cmap(norm(attn_matrix)))
            tf.summary.image(prefix + "head{}".format(head),
                             np.array(data, np.float32)[:, head, :, :, :],
                             step=step,
                             description=description)

    def train(self,
              train_data,
              val_data=None,
              epochs=1,
              steps_per_epoch=100,
              checkpoint_path=None,
              validation_steps=10):
        """ Custom training loop with custom loss.
        :param train_data: training data set
        :param val_data: validation data set
        :param epochs: number of training epochs
        :param steps_per_epoch: steps per epochs (required if generator used).
        If set to None, the number will be computed automatically.
        :param checkpoint_path: save checkpoints epoch-wise if directory provided.
        :param validation_steps:
        :return accumulated loss and accuracy
        """
        log_path = self._log_file_dir + '/' + datetime.now().strftime(
            "%y%m%d-%H%M%S")
        Path(log_path).parent.mkdir(parents=True, exist_ok=True)
        writer = tf.summary.create_file_writer(log_path)
        writer.set_as_default()
        for step in tf.range(steps_per_epoch * epochs, dtype=tf.int64):
            tf.summary.scalar('learning_rate',
                              self._optimizer.lr(tf.cast(step, tf.float32)),
                              step=step)
        for epoch in range(epochs):
            for batch, (x, y_true) in enumerate(train_data):
                if batch == 0:
                    self._target_shape = x.shape
                if batch >= steps_per_epoch:
                    break

                loss_total, grads, y_pred, m, emb, attn = iterative_optimize(
                    optimizer=self._optimizer,
                    model=self._model,
                    x=x,
                    y_true=y_true,
                    data_properties=self._data_properties,
                    training=True)

                loss_tt = lateness(x, y_pred)
                loss_ms = makespan(x, y_pred)
                setup_changes = count_setup_changes(x, y_pred)
                self._update_metric('train', y_true, y_pred,
                                    (loss_tt, loss_ms, loss_total),
                                    setup_changes, batch)
                self._print('train', epoch, epochs, batch, steps_per_epoch)

                if batch % TB_LOG_INV_FREQ == 0:
                    self._tb_update(grads, y_true, y_pred, m, emb, attn, epoch,
                                    batch, steps_per_epoch, 'train/')
                    self._log('train', epoch * steps_per_epoch + batch)

            self._validation_loop(val_data, validation_steps, epoch)

            self._empty_metric()

            if checkpoint_path and (epoch % CKPT_SAVE_INV_FREQ == 0):
                Path(checkpoint_path).parent.mkdir(parents=True, exist_ok=True)
                self._model.save_weights(checkpoint_path.format(epoch=epoch))

        writer.close()

    def _empty_metric(self):
        """ This will empty all metric dict, to avoid memory overflows.
        """
        for key in ['train', 'valid']:

            self._loss_tt.get(key).clear()
            self._loss_ms.get(key).clear()
            self._loss_total.get(key).clear()

            self._acc.get(key).clear()

            self._setup_changes.get(key).clear()

            self._tp_obj.reset_states()
            self._tp.get(key).clear()

            self._tn_obj.reset_states()
            self._tn.get(key).clear()

            self._fp_obj.reset_states()
            self._fp.get(key).clear()

            self._fn_obj.reset_states()
            self._fn.get(key).clear()

            self._pre_obj.reset_states()
            self._pre.get(key).clear()

            self._rec_obj.reset_states()
            self._rec.get(key).clear()

    def _print(self, key: str, epoch: int, epochs_max: int, batch: int,
               batch_max: int):
        """ Prints the performance results in the console.
        :param key:
        :param epoch:
        :param epochs_max:
        :param batch:
        :param batch_max:
        """
        mean_loss = tf.reduce_mean(self._loss_total.get(key))
        mean_acc = tf.reduce_mean(self._acc.get(key))
        mean_pre = tf.reduce_mean(self._pre.get(key))
        mean_rec = tf.reduce_mean(self._rec.get(key))

        if key == 'train':
            tf.print(
                '\r[Train] [E {0}/{1}] [B {2}/{3}] Loss: {4} Acc: {5} Pre: {6} Rec: {7}'
                .format(epoch + 1, epochs_max, batch + 1, batch_max, mean_loss,
                        mean_acc, mean_pre, mean_rec),
                end='')
        else:
            tf.print(
                '\n[Valid] [E {0}/{1}] [B {2}/{3}] Loss: {4} Acc: {5} Pre: {6} Rec: {7}\n'
                .format(epoch, epochs_max, batch + 1, batch_max, mean_loss,
                        mean_acc, mean_pre, mean_rec))

    def _validation_loop(self, validation_data, validation_steps: int,
                         epoch: int):
        """ Looping through the validation set and ouputs the validation performance
        results in a final step.
        :param validation_data:
        :param validation_steps:
        """
        for batch, (x, y_true) in enumerate(validation_data):
            if batch >= validation_steps:
                break
                optimizer = optimizer,

            loss_total, grads, y_pred, m, emb, attn = iterative_optimize(
                optimizer=self._optimizer,
                model=self._model,
                x=x,
                y_true=y_true,
                data_properties=self._data_properties,
                training=False)
            loss_tt = lateness(x, y_pred)
            loss_ms = makespan(x, y_pred)
            setup_changes = count_setup_changes(x, y_pred)
            self._update_metric('valid', y_true, y_pred,
                                (loss_tt, loss_ms, loss_total), setup_changes,
                                batch)

            if batch % (TB_LOG_INV_FREQ * 0.1) == 0:
                self._tb_update(None, y_true, y_pred, m, emb, attn, epoch,
                                batch, validation_steps, 'valid/')
                self._log('valid', epoch * validation_steps + batch)

        self._print('valid', 0, 0, validation_steps - 1, validation_steps)

    def _update_metric(self,
                       key: str,
                       y_true: tf.Tensor,
                       y_pred: tf.Tensor,
                       loss: tuple,
                       setup_changes: tf.Tensor,
                       step=0):
        """ Updates the metrics.
        :param key:
        :param y_true:
        :param y_pred:
        :param loss:
        :param grads:
        """
        loss_tt, loss_ms, loss_total = loss

        self._loss_tt.get(key).append(loss_tt)
        self._loss_ms.get(key).append(loss_ms)
        self._loss_total.get(key).append(loss_total)

        self._setup_changes.get(key).append(setup_changes)

        self._tp_obj.update_state(y_true, y_pred)
        self._tp.get(key).append(self._tp_obj.result())

        self._tn_obj.update_state(y_true, y_pred)
        self._tn.get(key).append(self._tn_obj.result())

        self._fp_obj.update_state(y_true, y_pred)
        self._fp.get(key).append(self._fp_obj.result())

        self._fn_obj.update_state(y_true, y_pred)
        self._fn.get(key).append(self._fn_obj.result())

        self._pre_obj.update_state(y_true, y_pred)
        self._pre.get(key).append(self._pre_obj.result())

        self._rec_obj.update_state(y_true, y_pred)
        self._rec.get(key).append(self._rec_obj.result())

        shape = tf.shape(y_true)
        y_true = tf.squeeze(tf.transpose(y_true, [0, 2, 1, 3]))
        y_pred = tf.squeeze(tf.transpose(y_pred, [0, 2, 1, 3]))
        y_pred = tf.reshape(y_pred, [shape[0], shape[2], -1])
        y_true = tf.reshape(y_true, [shape[0], shape[2], -1])

        self._acc.get(key).append(categorical_accuracy(y_true, y_pred))

    def _log(self, key: str, epoch: int):
        """ Logs the training progress in a log file. If the log file dir parameter is set.
        :param key:
        :param epoch:
        """
        if not self._log_file_dir:
            return
        if not os.path.exists(self._log_file_dir):
            os.mkdir(self._log_file_dir)

        tf.summary.scalar(key + '/tardiness',
                          data=tf.reduce_mean(self._loss_ms.get(key)),
                          step=epoch)
        tf.summary.scalar(key + '/makespan',
                          data=tf.reduce_mean(self._loss_tt.get(key)),
                          step=epoch)
        tf.summary.scalar(key + '/loss',
                          data=tf.reduce_mean(self._loss_total.get(key)),
                          step=epoch)
        tf.summary.scalar(key + '/acc',
                          data=tf.reduce_mean(self._acc.get(key)),
                          step=epoch)
        tf.summary.scalar(key + '/setup_changes',
                          data=tf.reduce_mean(self._setup_changes.get(key)),
                          step=epoch)
        tf.summary.scalar(key + '/tp',
                          data=tf.reduce_mean(self._tp.get(key)),
                          step=epoch)
        tf.summary.scalar(key + '/fp',
                          data=tf.reduce_mean(self._fp.get(key)),
                          step=epoch)
        tf.summary.scalar(key + '/tn',
                          data=tf.reduce_mean(self._tn.get(key)),
                          step=epoch)
        tf.summary.scalar(key + '/fn',
                          data=tf.reduce_mean(self._fn.get(key)),
                          step=epoch)
        tf.summary.scalar(key + '/pre',
                          data=tf.reduce_mean(self._pre.get(key)),
                          step=epoch)
        tf.summary.scalar(key + '/rec',
                          data=tf.reduce_mean(self._rec.get(key)),
                          step=epoch)