Ejemplo n.º 1
0
def autoencoder_model():
    
    input_img = layers.Input(shape=(28, 28, 1))

    x = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(input_img)
    #print(x.shape)
    x = layers.MaxPooling2D((2, 2), padding='same')(x)
    #print(x.shape)
    x = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(x)
    #print(x.shape)
    x = layers.MaxPooling2D((2, 2), padding='same')(x) #Encoded
    #print(x.shape)
    
    
    x = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(x)
    #print(x.shape)
    x = layers.UpSampling2D((2, 2))(x)
    #print(x.shape)
    x = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(x)
    #print(x.shape)
    x = layers.UpSampling2D((2, 2))(x)
    #print(x.shape)
    decoded = layers.Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x)
    #print(decoded.shape)
    autoencoder = models.Model(input_img, decoded)

    ########################################################################

    autoencoder.compile(optimizer=optimizers.Adam(learning_rate=0.002),#optimizers.SGD(0.01), 
                        metrics=metrics.BinaryAccuracy(),
                        loss=losses.BinaryCrossentropy())

    return autoencoder
Ejemplo n.º 2
0
def train(model, x_train, y_train):
    model.compile(loss='binary_crossentropy',
                  optimizer=keras.optimizers.Adam(lr=LEARNING_RATE),
                  metrics=[metrics.BinaryAccuracy(), metrics.Precision(), metrics.Recall()])
    checkpoint = ModelCheckpoint(NN_ATTACK_WEIGHTS_PATH, monitor='precision', verbose=1, save_best_only=True,
                                 mode='max')
    model.fit(x_train, y_train,
              epochs=EPOCHS,
              batch_size=BATCH_SIZE,
              callbacks=[checkpoint])
Ejemplo n.º 3
0
def inference(tfrecords_path, weights_path, wts_root):
    """ Inference function to reproduce original model scores. This
    script can be run as a standalone using python inference.py.
    For more information try: `python inference.py -h`

    Parameters
    ----------
    tfrecords_path: str
        The path to directory containing preprocessed tfrecords.
    weights_path: str
        The path to the combined model weights. A copy of the
        weights can be found here:
        https://gin.g-node.org/shashankbansal56/nondefaced-detector-reproducibility/src/master/pretrained_weights/combined
    wts_root: str
        The path to the root directory of all the model weights.
        A copy of the weights can be found here:
        https://gin.g-node.org/shashankbansal56/nondefaced-detector-reproducibility/src/master/pretrained_weights
    """

    model = CombinedClassifier(input_shape=(128, 128),
                               dropout=0.4,
                               wts_root=wts_root,
                               trainable=False)

    model.load_weights(os.path.abspath(weights_path))
    model.trainable = False

    dataset_test = get_dataset(
        file_pattern=os.path.join(tfrecords_path, "data-test_*"),
        n_classes=2,
        batch_size=16,
        volume_shape=(128, 128, 128),
        plane="combined",
        mode="test",
    )

    METRICS = [
        metrics.BinaryAccuracy(name="accuracy"),
        metrics.Precision(name="precision"),
        metrics.Recall(name="recall"),
        metrics.AUC(name="auc"),
    ]

    model.compile(
        loss=tf.keras.losses.binary_crossentropy,
        optimizer=Adam(learning_rate=1e-3),
        metrics=METRICS,
    )

    model.evaluate(dataset_test)
Ejemplo n.º 4
0
def load_badword_model() -> Model:
    """
    학습된 모델을 불러옵니다. 불러온 모델은 compile 작업을 마친 상태입니다.
    
    return: 사전학습된 tf.keras.Model 객체가 compile된 상태로 반환됩니다.
    """
    model = load_model(get_path('model.h5'))
    model.compile(loss="binary_crossentropy",
                  optimizer="adam",
                  metrics=[
                      metrics.BinaryAccuracy(name="acc"),
                      metrics.Recall(name="recall"),
                      metrics.Precision(name="prec"),
                  ])

    return model
Ejemplo n.º 5
0
 def __get_metric(self, metric):
     if metric == "auc":
         return m.AUC()
     elif metric == "accuracy":
         return m.Accuracy()
     elif metric == "binary_accuracy":
         return m.BinaryAccuracy()
     elif metric == "categorical_accuracy":
         return m.CategoricalAccuracy()
     elif metric == "binary_crossentropy":
         return m.BinaryCrossentropy()
     elif metric == "categorical_crossentropy":
         return m.CategoricalCrossentropy()
     elif metric == "sparse_categorical_crossentropy":
         return m.SparseCategoricalCrossentropy()
     elif metric == "kl_divergence":
         return m.KLDivergence()
     elif metric == "poisson":
         return m.Poission()
     elif metric == "mse":
         return m.MeanSquaredError()
     elif metric == "rmse":
         return m.RootMeanSquaredError()
     elif metric == "mae":
         return m.MeanAbsoluteError()
     elif metric == "mean_absolute_percentage_error":
         return m.MeanAbsolutePercentageError()
     elif metric == "mean_squared_logarithm_error":
         return m.MeanSquaredLogarithmError()
     elif metric == "cosine_similarity":
         return m.CosineSimilarity()
     elif metric == "log_cosh_error":
         return m.LogCoshError()
     elif metric == "precision":
         return m.Precision()
     elif metric == "recall":
         return m.Recall()
     elif metric == "true_positive":
         return m.TruePositives()
     elif metric == "true_negative":
         return m.TrueNegatives()
     elif metric == "false_positive":
         return m.FalsePositives()
     elif metric == "false_negative":
         return m.FalseNegatives()
     else:
         raise Exception("specified metric not defined")
Ejemplo n.º 6
0
def model_vgg16_cifar(n_clasif, xshape):
    input_shape = xshape[1:]
    
    model = Sequential()
    # 2 x Conv
    model.add(Conv2D(64, (3, 3), input_shape=input_shape, padding='same', activation='relu'))
    model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    
    # 2 x Conv
    model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
    #model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    
    # 3 x Conv 
    model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
    #model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
    model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    
    # 3 x Conv
    model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
    #model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
    #model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    
    # 3 x Conv
    model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
    #model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
    #model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    
    model.add(Flatten())
    model.add(Dense(500, activation='relu', activity_regularizer=regularizers.l2(0.001)))
    model.add(Dense(500 , activation='relu', activity_regularizer=regularizers.l2(0.001)))
    #model.add(Dense(4096 , activation='relu'))
    model.add(Dense(n_clasif , activation='linear',  activity_regularizer=regularizers.l2(0.001) ))

    model.summary()

    # Compile the model
    model.compile(loss=losses.BinaryCrossentropy(from_logits=True), 
                  optimizer=optimizers.Adam(learning_rate=0.0001), #optimizers.SGD(lr=0.03), 
                  metrics=[metrics.BinaryAccuracy('acc')]) 
    return model
Ejemplo n.º 7
0
def ejer1_incep(n_epochs, n_clasif):
    (x_train, y_train), (x_test, y_test) = preprocesing()
    print(x_train.shape)

    model = tf.keras.applications.InceptionV3(include_top=False,
                                              weights='none',
                                              input_tensor=None,
                                              input_shape=None,
                                              pooling=None,
                                              classes=1000,
                                              classifier_activation='softmax')

    # Compile the model
    model.compile(
        loss=losses.BinaryCrossentropy(),
        optimizer=optimizers.Adam(
            learning_rate=0.0001),  #optimizers.SGD(lr=0.03), 
        metrics=[metrics.BinaryAccuracy('acc')])

    IDG = ImageDataGenerator(rotation_range=30,
                             width_shift_range=5,
                             height_shift_range=5,
                             shear_range=0.0,
                             fill_mode='nearest',
                             horizontal_flip=True,
                             vertical_flip=True)

    # history = model.fit(IDG.flow(x_train, y_train, batch_size=100),
    #             epochs=n_epochs,
    #             validation_data=(x_test,y_test))

    history = model.fit(x_train,
                        y_train,
                        batch_size=10,
                        epochs=n_epochs,
                        validation_data=(x_test, y_test))

    acc, val_acc, loss, val_loss = plot_ejercicio(history)

    np.savetxt("ejer1{}epochs{}_incep.txt".format("small", n_epochs),
               np.array([acc, val_acc, loss, val_loss]).T)
Ejemplo n.º 8
0
    def experiment(self, under=False, ratio=3, plot=False):
        METRICS = [
            metrics.TruePositives(name='tp'),
            metrics.FalsePositives(name='fp'),
            metrics.TrueNegatives(name='tn'),
            metrics.FalseNegatives(name='fn'),
            metrics.BinaryAccuracy(name='accuracy'),
            metrics.Precision(name='precision'),
            metrics.Recall(name='recall'),
            metrics.AUC(name='auc')
        ]

        data = DataLoader()
        model = LeNet(data.X, METRICS)
        augmenter = Augmenter(data.X, data.Y)

        if under:
            data.X, data.Y = augmenter.undersample(ratio=ratio)

        if self.augmentation.type == 1 or self.augmentation.type == 2:
            data.X, data.Y = augmenter.duplicate(noise=self.augmentation.noise,
                                                 sigma=self.augmentation.sigma)
        elif self.augmentation.type == 3:
            data.X, data.Y = augmenter.SMOTE()

        #data.normalize()
        #print(len(data.X))
        #print(len(data.valX))

        data.summarize(test=False)
        his = model.fit(data.X, data.Y, data.valX, data.valY)
        RES, fpr, tpr = model.predict(data.testX, data.testY)
        #self.model_summary(RES)
        if plot:
            self.plot(his)
            self.ROC(fpr, tpr)
        return RES
Ejemplo n.º 9
0
    def _fit_transductive_embedder(self, train_graph):
        """Fit transductive embedder (no model, just embeddings)."""
        if self.model_name == "node2vec":
            return _fit_node2vec(train_graph,
                                 self.params,
                                 edge_weight=self.graph_configs["edge_weight"])

        if self.model_name in ["gcn_dgi", "gat_dgi", "graphsage_dgi"]:
            return _fit_deep_graph_infomax(train_graph, self.params,
                                           self.model_name)

        generator = _dispatch_generator(train_graph, self.model_name,
                                        self.params)
        embedding_layer = _dispatch_transductive_layer(generator,
                                                       self.model_name,
                                                       self.params)

        x_inp, x_out = embedding_layer.in_out_tensors()

        # Create an embedding model
        model = Model(inputs=x_inp, outputs=x_out)
        model.compile(
            optimizer=optimizers.Adam(lr=0.001),
            loss=LOSSES[self.model_name],
            metrics=[metrics.BinaryAccuracy(threshold=0.0)],
        )

        # Train the embedding model
        train_generator = _generate_transductive_train_flow(
            train_graph, generator, self.model_name, self.params)

        model.fit(train_generator, epochs=self.params["epochs"], verbose=0)
        if self.model_name == "watchyourstep":
            embeddings = embedding_layer.embeddings()[0]
        else:
            embeddings = embedding_layer.embeddings()[0]
        return embeddings
Ejemplo n.º 10
0
    def compile(self, model, train_generator, valid_generator):
        """:arg
        This function contain model compile and model fit process, input a model and output history and trained model

        """
        start_time = time()
        print("*" * 40, "Start {} Processing".format(model._name), "*" * 40)

        # we use a lot of metric to evalute our binary classification result
        METRICS = [
              metrics.TruePositives(name='tp'),
              metrics.FalsePositives(name='fp'),
              metrics.TrueNegatives(name='tn'),
              metrics.FalseNegatives(name='fn'),
              metrics.BinaryAccuracy(name='binary_accuracy'),
              #metrics.CategoricalAccuracy(name='accuracy'),
              metrics.Precision(name='precision'),
              metrics.Recall(name='recall'),
              metrics.AUC(name='auc'),
              # F1Score(num_classes = int(y_train.shape[1]), name='F1')
        ]

        # define a optimizer
        opt_rms = optimizers.RMSprop(lr = 1e-4, decay = 1e-5)
        # define compile parameters
        model.compile(loss = 'binary_crossentropy', optimizer = opt_rms, metrics = ['accuracy'])
        # start to fit
        history = model.fit(
            train_generator,
            steps_per_epoch=20,
            epochs=5,
            validation_data=valid_generator,
            validation_steps=20
        )

        return history
                               threshold_range=(0.01, 0.99),
                               verbose=1):
    y_hat = visual_model.predict_generator(
        generator,
        steps=generator.steps,
        workers=FLAGS.generator_workers,
        max_queue_size=FLAGS.generator_queue_length,
        verbose=verbose)
    y = generator.get_y_true()
    if FLAGS.multi_label_classification:
        get_multilabel_evaluation_metrics(y_hat,
                                          y,
                                          FLAGS.classes,
                                          thresh_range=threshold_range)
    else:
        y_hat = y_hat.argmax(axis=1)
        get_evaluation_metrics(y_hat, y, FLAGS.classes)


if FLAGS.multi_label_classification:
    visual_model.compile(loss='binary_crossentropy',
                         metrics=[metrics.BinaryAccuracy(threshold=0.5)])
else:
    visual_model.compile(loss='sparse_categorical_crossentropy',
                         metrics=['accuracy'])

print("***************Train Metrics*********************")
get_metrics_from_generator(train_generator, FLAGS.multilabel_threshold_range)
print("***************Test Metrics**********************")
get_metrics_from_generator(test_generator, FLAGS.multilabel_threshold_range)
Ejemplo n.º 12
0
model.compile(loss=contrastive_loss, optimizer="adam", metrics=[accuracy])

model.summary()

"""
Now we can train our model using the pairs we generated from the train and validation
sets.
"""

history = model.fit(
    x=[x_pairs_train[:, 0], x_pairs_train[:, 1]],
    y=y_pairs_train[:],
    validation_data=([x_pairs_val[:, 0], x_pairs_val[:, 1]], y_pairs_val[:]),
    batch_size=64,
    epochs=15,
)

plot_history(history.history)

"""
Finally, we can use the pairs we generated from the test set to predict them with our
model and display some of the results and the overall accuracy of the model.
"""

predictions = np.round(1 - model.predict([x_pairs_test[:, 0], x_pairs_test[:, 1]]))
display_pairs(x_pairs_test, predictions, predictions == y_pairs_test)

accuracy = metrics.BinaryAccuracy()
accuracy.update_state(y_pairs_test, predictions)
print(f"\nAccuracy: {accuracy.result().numpy()}")
    def build_and_compile(self, local_model_name, local_settings,
                          local_hyperparameters):
        try:
            # keras,tf session/random seed reset/fix
            # kb.clear_session()
            # tf.compat.v1.reset_default_graph()
            np.random.seed(11)
            tf.random.set_seed(2)

            # load hyperparameters
            units_layer_1 = local_hyperparameters['units_layer_1']
            units_layer_2 = local_hyperparameters['units_layer_2']
            units_layer_3 = local_hyperparameters['units_layer_3']
            units_layer_4 = local_hyperparameters['units_layer_4']
            units_dense_layer_4 = local_hyperparameters['units_dense_layer_4']
            units_final_layer = local_hyperparameters['units_final_layer']
            activation_1 = local_hyperparameters['activation_1']
            activation_2 = local_hyperparameters['activation_2']
            activation_3 = local_hyperparameters['activation_3']
            activation_4 = local_hyperparameters['activation_4']
            activation_dense_layer_4 = local_hyperparameters[
                'activation_dense_layer_4']
            activation_final_layer = local_hyperparameters[
                'activation_final_layer']
            dropout_layer_1 = local_hyperparameters['dropout_layer_1']
            dropout_layer_2 = local_hyperparameters['dropout_layer_2']
            dropout_layer_3 = local_hyperparameters['dropout_layer_3']
            dropout_layer_4 = local_hyperparameters['dropout_layer_4']
            dropout_dense_layer_4 = local_hyperparameters[
                'dropout_dense_layer_4']
            input_shape_y = local_hyperparameters['input_shape_y']
            input_shape_x = local_hyperparameters['input_shape_x']
            nof_channels = local_hyperparameters['nof_channels']
            stride_y_1 = local_hyperparameters['stride_y_1']
            stride_x_1 = local_hyperparameters['stride_x_1']
            kernel_size_y_1 = local_hyperparameters['kernel_size_y_1']
            kernel_size_x_1 = local_hyperparameters['kernel_size_x_1']
            kernel_size_y_2 = local_hyperparameters['kernel_size_y_2']
            kernel_size_x_2 = local_hyperparameters['kernel_size_x_2']
            kernel_size_y_3 = local_hyperparameters['kernel_size_y_3']
            kernel_size_x_3 = local_hyperparameters['kernel_size_x_3']
            kernel_size_y_4 = local_hyperparameters['kernel_size_y_4']
            kernel_size_x_4 = local_hyperparameters['kernel_size_x_4']
            pool_size_y_1 = local_hyperparameters['pool_size_y_1']
            pool_size_x_1 = local_hyperparameters['pool_size_x_1']
            pool_size_y_2 = local_hyperparameters['pool_size_y_2']
            pool_size_x_2 = local_hyperparameters['pool_size_x_2']
            pool_size_y_3 = local_hyperparameters['pool_size_y_3']
            pool_size_x_3 = local_hyperparameters['pool_size_x_3']
            pool_size_y_4 = local_hyperparameters['pool_size_y_4']
            pool_size_x_4 = local_hyperparameters['pool_size_x_4']
            optimizer_function = local_hyperparameters['optimizer']
            optimizer_learning_rate = local_hyperparameters['learning_rate']
            epsilon_adam = local_hyperparameters['epsilon_adam']
            if optimizer_function == 'adam':
                optimizer_function = optimizers.Adam(
                    learning_rate=optimizer_learning_rate,
                    epsilon=epsilon_adam)
            elif optimizer_function == 'ftrl':
                optimizer_function = optimizers.Ftrl(optimizer_learning_rate)
            elif optimizer_function == 'sgd':
                optimizer_function = optimizers.SGD(optimizer_learning_rate)
            elif optimizer_function == 'rmsp':
                optimizer_function = optimizers.RMSprop(
                    optimizer_learning_rate, epsilon=epsilon_adam)
            optimizer_function = tf.train.experimental.enable_mixed_precision_graph_rewrite(
                optimizer_function)
            loss_1 = local_hyperparameters['loss_1']
            loss_2 = local_hyperparameters['loss_2']
            loss_3 = local_hyperparameters['loss_3']
            label_smoothing = local_hyperparameters['label_smoothing']
            losses_list = []
            union_settings_losses = [loss_1, loss_2, loss_3]
            if 'CategoricalCrossentropy' in union_settings_losses:
                losses_list.append(
                    losses.CategoricalCrossentropy(
                        label_smoothing=label_smoothing))
            if 'BinaryCrossentropy' in union_settings_losses:
                losses_list.append(losses.BinaryCrossentropy())
            if 'CategoricalHinge' in union_settings_losses:
                losses_list.append(losses.CategoricalHinge())
            if 'KLD' in union_settings_losses:
                losses_list.append(losses.KLDivergence())
            if 'customized_loss_function' in union_settings_losses:
                losses_list.append(customized_loss())
            if 'customized_loss_t2' in union_settings_losses:
                losses_list.append(customized_loss_t2)
            if "Huber" in union_settings_losses:
                losses_list.append(losses.Huber())
            metrics_list = []
            metric1 = local_hyperparameters['metrics1']
            metric2 = local_hyperparameters['metrics2']
            union_settings_metrics = [metric1, metric2]
            if 'auc_roc' in union_settings_metrics:
                metrics_list.append(metrics.AUC())
            if 'customized_metric_auc_roc' in union_settings_metrics:
                metrics_list.append(customized_metric_auc_roc())
            if 'CategoricalAccuracy' in union_settings_metrics:
                metrics_list.append(metrics.CategoricalAccuracy())
            if 'CategoricalHinge' in union_settings_metrics:
                metrics_list.append(metrics.CategoricalHinge())
            if 'BinaryAccuracy' in union_settings_metrics:
                metrics_list.append(metrics.BinaryAccuracy())
            if local_settings['use_efficientNetB2'] == 'False':
                type_of_model = '_custom'
                if local_hyperparameters['regularizers_l1_l2_1'] == 'True':
                    l1_1 = local_hyperparameters['l1_1']
                    l2_1 = local_hyperparameters['l2_1']
                    activation_regularizer_1 = regularizers.l1_l2(l1=l1_1,
                                                                  l2=l2_1)
                else:
                    activation_regularizer_1 = None
                if local_hyperparameters['regularizers_l1_l2_2'] == 'True':
                    l1_2 = local_hyperparameters['l1_2']
                    l2_2 = local_hyperparameters['l2_2']
                    activation_regularizer_2 = regularizers.l1_l2(l1=l1_2,
                                                                  l2=l2_2)
                else:
                    activation_regularizer_2 = None
                if local_hyperparameters['regularizers_l1_l2_3'] == 'True':
                    l1_3 = local_hyperparameters['l1_3']
                    l2_3 = local_hyperparameters['l2_3']
                    activation_regularizer_3 = regularizers.l1_l2(l1=l1_3,
                                                                  l2=l2_3)
                else:
                    activation_regularizer_3 = None
                if local_hyperparameters['regularizers_l1_l2_4'] == 'True':
                    l1_4 = local_hyperparameters['l1_4']
                    l2_4 = local_hyperparameters['l2_4']
                    activation_regularizer_4 = regularizers.l1_l2(l1=l1_4,
                                                                  l2=l2_4)
                else:
                    activation_regularizer_4 = None
                if local_hyperparameters[
                        'regularizers_l1_l2_dense_4'] == 'True':
                    l1_dense_4 = local_hyperparameters['l1_dense_4']
                    l2_dense_4 = local_hyperparameters['l2_dense_4']
                    activation_regularizer_dense_layer_4 = regularizers.l1_l2(
                        l1=l1_dense_4, l2=l2_dense_4)
                else:
                    activation_regularizer_dense_layer_4 = None

                # building model
                classifier_ = tf.keras.models.Sequential()
                # first layer
                classifier_.add(
                    layers.Input(shape=(input_shape_y, input_shape_x,
                                        nof_channels)))
                # classifier_.add(layers.ZeroPadding2D(padding=((0, 1), (0, 1))))
                classifier_.add(
                    layers.Conv2D(
                        units_layer_1,
                        kernel_size=(kernel_size_y_1, kernel_size_x_1),
                        strides=(stride_y_1, stride_x_1),
                        activity_regularizer=activation_regularizer_1,
                        activation=activation_1,
                        padding='same',
                        kernel_initializer=tf.keras.initializers.
                        VarianceScaling(scale=2.,
                                        mode='fan_out',
                                        distribution='truncated_normal')))
                classifier_.add(layers.BatchNormalization(axis=-1))
                classifier_.add(layers.Activation(tf.keras.activations.swish))
                classifier_.add(layers.GlobalAveragePooling2D())
                classifier_.add(layers.Dropout(dropout_layer_1))
                # LAYER 1.5
                classifier_.add(
                    layers.Conv2D(
                        units_layer_1,
                        kernel_size=(kernel_size_y_1, kernel_size_x_1),
                        input_shape=(input_shape_y, input_shape_x,
                                     nof_channels),
                        strides=(stride_y_1, stride_x_1),
                        activity_regularizer=activation_regularizer_1,
                        activation=activation_1,
                        padding='same',
                        kernel_initializer=tf.keras.initializers.
                        VarianceScaling(scale=2.,
                                        mode='fan_out',
                                        distribution='truncated_normal')))
                classifier_.add(layers.BatchNormalization(axis=-1))
                classifier_.add(layers.Activation(tf.keras.activations.swish))
                classifier_.add(layers.GlobalAveragePooling2D())
                classifier_.add(layers.Dropout(dropout_layer_1))
                # second layer
                classifier_.add(
                    layers.Conv2D(
                        units_layer_2,
                        kernel_size=(kernel_size_y_2, kernel_size_x_2),
                        activity_regularizer=activation_regularizer_2,
                        activation=activation_2,
                        padding='same',
                        kernel_initializer=tf.keras.initializers.
                        VarianceScaling(scale=2.,
                                        mode='fan_out',
                                        distribution='truncated_normal')))
                classifier_.add(layers.BatchNormalization(axis=-1))
                classifier_.add(layers.Activation(tf.keras.activations.swish))
                classifier_.add(layers.GlobalAveragePooling2D())
                classifier_.add(layers.Dropout(dropout_layer_2))
                # LAYER 2.5
                classifier_.add(
                    layers.Conv2D(
                        units_layer_2,
                        kernel_size=(kernel_size_y_2, kernel_size_x_2),
                        activity_regularizer=activation_regularizer_2,
                        activation=activation_2,
                        padding='same',
                        kernel_initializer=tf.keras.initializers.
                        VarianceScaling(scale=2.,
                                        mode='fan_out',
                                        distribution='truncated_normal')))
                classifier_.add(layers.BatchNormalization(axis=-1))
                classifier_.add(layers.Activation(tf.keras.activations.swish))
                classifier_.add(layers.GlobalAveragePooling2D())
                classifier_.add(layers.Dropout(dropout_layer_2))
                # third layer
                classifier_.add(
                    layers.Conv2D(
                        units_layer_3,
                        kernel_size=(kernel_size_y_3, kernel_size_x_3),
                        activity_regularizer=activation_regularizer_3,
                        activation=activation_3,
                        padding='same',
                        kernel_initializer=tf.keras.initializers.
                        VarianceScaling(scale=2.,
                                        mode='fan_out',
                                        distribution='truncated_normal')))
                classifier_.add(layers.BatchNormalization(axis=-1))
                classifier_.add(layers.Activation(tf.keras.activations.swish))
                classifier_.add(layers.GlobalAveragePooling2D())
                classifier_.add(layers.Dropout(dropout_layer_3))
                # LAYER 3.5
                classifier_.add(
                    layers.Conv2D(
                        units_layer_3,
                        kernel_size=(kernel_size_y_3, kernel_size_x_3),
                        activity_regularizer=activation_regularizer_3,
                        activation=activation_3,
                        padding='same',
                        kernel_initializer=tf.keras.initializers.
                        VarianceScaling(scale=2.,
                                        mode='fan_out',
                                        distribution='truncated_normal')))
                classifier_.add(layers.BatchNormalization(axis=-1))
                classifier_.add(layers.Activation(tf.keras.activations.swish))
                classifier_.add(layers.GlobalAveragePooling2D())
                classifier_.add(layers.Dropout(dropout_layer_3))
                # fourth layer
                classifier_.add(
                    layers.Conv2D(
                        units_layer_4,
                        kernel_size=(kernel_size_y_4, kernel_size_x_4),
                        activity_regularizer=activation_regularizer_4,
                        activation=activation_4,
                        padding='same',
                        kernel_initializer=tf.keras.initializers.
                        VarianceScaling(scale=2.,
                                        mode='fan_out',
                                        distribution='truncated_normal')))
                classifier_.add(layers.BatchNormalization(axis=-1))
                classifier_.add(layers.Activation(tf.keras.activations.swish))
                classifier_.add(layers.GlobalAveragePooling2D())
                classifier_.add(layers.Dropout(dropout_layer_4))
                # Full connection and final layer
                classifier_.add(
                    layers.Dense(units=units_final_layer,
                                 activation=activation_final_layer))
                # Compile model
                classifier_.compile(optimizer=optimizer_function,
                                    loss=losses_list,
                                    metrics=metrics_list)

            elif local_settings['use_efficientNetB2'] == 'True':
                type_of_model = '_EfficientNetB2'
                # pretrained_weights = ''.join([local_settings['models_path'],
                #                               local_hyperparameters['weights_for_training_efficientnetb2']])
                classifier_pretrained = tf.keras.applications.EfficientNetB2(
                    include_top=False,
                    weights='imagenet',
                    input_tensor=None,
                    input_shape=(input_shape_y, input_shape_x, 3),
                    pooling=None,
                    classifier_activation=None)
                # classifier_pretrained.save_weights(''.join([local_settings['models_path'],
                #                                             'pretrained_efficientnetb2_weights.h5']))
                #
                # classifier_receptor = tf.keras.applications.EfficientNetB2(include_top=False, weights=None,
                #                                                              input_tensor=None,
                #                                                              input_shape=(input_shape_y,
                #                                                                           input_shape_x, 1),
                #                                                              pooling=None,
                #                                                              classifier_activation=None)
                #
                # classifier_receptor.load_weights(''.join([local_settings['models_path'],
                #                                             'pretrained_efficientnetb2_weights.h5']), by_name=True)
                #
                # classifier_pretrained = classifier_receptor

                if local_settings['nof_classes'] == 2 or local_hyperparameters[
                        'use_bias_always'] == 'True':
                    # if two classes, log(pos/neg) = log(0.75/0.25) = 0.477121254719
                    bias_initializer = tf.keras.initializers.Constant(
                        local_hyperparameters['bias_initializer'])
                else:
                    # assuming balanced classes...
                    bias_initializer = tf.keras.initializers.Constant(0)

                effnb2_model = models.Sequential(classifier_pretrained)
                effnb2_model.add(layers.GlobalAveragePooling2D())
                effnb2_model.add(layers.Dropout(dropout_dense_layer_4))
                # effnb2_model.add(layers.Dense(units=units_dense_layer_4, activation=activation_dense_layer_4,
                #                  kernel_initializer=tf.keras.initializers.VarianceScaling(scale=0.333333333,
                #                                                                           mode='fan_out',
                #                                                                           distribution='uniform'),
                #                               bias_initializer=bias_initializer))
                # effnb2_model.add(layers.Dropout(dropout_dense_layer_4))
                effnb2_model.add(
                    layers.Dense(units_final_layer,
                                 activation=activation_final_layer,
                                 kernel_initializer=tf.keras.initializers.
                                 VarianceScaling(scale=0.333333333,
                                                 mode='fan_out',
                                                 distribution='uniform'),
                                 bias_initializer=bias_initializer))
                classifier_ = effnb2_model

                if local_settings[
                        'use_local_pretrained_weights_for_retraining'] != 'False':
                    classifier_.load_weights(''.join([
                        local_settings['models_path'], local_settings[
                            'use_local_pretrained_weights_for_retraining']
                    ]))
                    for layer in classifier_.layers[0].layers:
                        layer.trainable = True
                        # if 'excite' in layer.name:
                        #     layer.trainable = True
                        # if 'top_conv' in layer.name:
                        #     layer.trainable = True
                        # if 'project_conv' in layer.name:
                        #     layer.trainable = True

                classifier_.build(input_shape=(input_shape_y, input_shape_x,
                                               nof_channels))
                classifier_.compile(optimizer=optimizer_function,
                                    loss=losses_list,
                                    metrics=metrics_list)

            # Summary of model
            classifier_.summary()

            # save_model
            classifier_json = classifier_.to_json()
            with open(''.join([local_settings['models_path'], local_model_name, type_of_model,
                               '_classifier_.json']), 'w') \
                    as json_file:
                json_file.write(classifier_json)
                json_file.close()
            classifier_.save(''.join([
                local_settings['models_path'], local_model_name, type_of_model,
                '_classifier_.h5'
            ]))
            classifier_.save(''.join([
                local_settings['models_path'], local_model_name, type_of_model,
                '/'
            ]),
                             save_format='tf')
            print('model architecture saved')

            # output png and pdf with model, additionally saves a json file model_name_analyzed.json
            if local_settings['model_analyzer'] == 'True':
                model_architecture = model_structure()
                model_architecture_review = model_architecture.analize(
                    ''.join(
                        [local_model_name, type_of_model, '_classifier_.h5']),
                    local_settings, local_hyperparameters)
        except Exception as e:
            print('error in build or compile of customized model')
            print(e)
            classifier_ = None
            logger.error(str(e), exc_info=True)
        return classifier_
Ejemplo n.º 14
0
 def __init__(self, classes=10):
     self.build_generator(classes=classes)
     self.build_solver(classes=classes, name="Solver")
     self.ce = losses.BinaryCrossentropy(from_logits=True)
     self.gen_acc = metrics.BinaryAccuracy()
     super().__init__()
test_generator = get_generator(FLAGS.test_csv,FLAGS)

if FLAGS.load_model_path != '' and FLAGS.load_model_path is not None:
    visual_model = load_model(FLAGS.load_model_path)
    if FLAGS.show_model_summary:
        visual_model.summary()
else:
    visual_model = model_factory.get_model(FLAGS)

def get_metrics_from_generator(generator,threshold=0.5, verbose=1):
    y_hat = visual_model.predict(generator, steps=generator.steps, workers=FLAGS.generator_workers,
                                           max_queue_size=FLAGS.generator_queue_length, verbose=verbose)
    y = generator.get_y_true()
    if FLAGS.multi_label_classification:
        get_multilabel_evaluation_metrics(y_hat, y, FLAGS.classes, threshold=threshold,image_names=generator.get_images_names(),save_path=os.path.join(FLAGS.save_model_path,'exact_match.csv'))
    else:
        y_hat = y_hat.argmax(axis=1)
        get_evaluation_metrics(y_hat, y, FLAGS.classes)

if FLAGS.multi_label_classification:
    visual_model.compile(loss='binary_crossentropy',
                         metrics=[metrics.BinaryAccuracy(threshold=FLAGS.multilabel_threshold)])
else:
    visual_model.compile(loss='sparse_categorical_crossentropy', metrics=['accuracy'])

print("***************Train Metrics*********************")
get_metrics_from_generator(train_generator, FLAGS.multilabel_threshold)
print("***************Test Metrics**********************")
get_metrics_from_generator(test_generator, FLAGS.multilabel_threshold)

Ejemplo n.º 16
0
    for epoch in range(epochs):
        train_loss = 0.
        x_, t_ = shuffle(x_train, t_train)

        for batch in range(n_batches):
            start = batch * batch_size
            end = start + batch_size
            loss = train_step(x_[start:end], t_[start:end])
            train_loss += loss.numpy()

        print('epoch: {}, loss: {:.3}'.format(epoch + 1, train_loss))
    '''
    4. モデルの評価
    '''
    test_loss = metrics.Mean()
    test_acc = metrics.BinaryAccuracy()

    def test_step(x, t):
        preds = model(x)
        loss = compute_loss(t, preds)
        test_loss(loss)
        test_acc(t, preds)

        return loss

    test_step(x_test, t_test)

    print('test_loss: {:.3f}, test_acc: {:.3f}'.format(test_loss.result(),
                                                       test_acc.result()))
Ejemplo n.º 17
0
import tensorflow as tf
from os import listdir
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.applications.resnet import ResNet50
from tensorflow.keras.applications.inception_v3 import InceptionV3
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras import layers, models, optimizers, losses, metrics, Model
from sklearn.metrics import f1_score, recall_score, precision_score, confusion_matrix, accuracy_score, classification_report

LABELS = listdir("asl_data/train")
NUM_LABELS = len(LABELS)
INPUT_SHAPE = (200, 200, 3)
LEARNING_RATE = 0.0001
DROPOUT_RATE = 0.1
NUM_NODES = 256
METRICS = [metrics.BinaryAccuracy(), metrics.Precision(), metrics.Recall()]


def get_baseline_model(print_summary=True):
    """
    Builds and compiles TensorFlow model with one Dense sigmoid node.
    """
    baseline_model = tf.keras.Sequential()
    baseline_model.add(layers.Flatten(input_shape=INPUT_SHAPE))
    baseline_model.add(layers.Dense(NUM_LABELS, activation="softmax"))
    baseline_model.compile(
        loss=losses.CategoricalCrossentropy(),
        optimizer=optimizers.Adam(learning_rate=LEARNING_RATE),
        metrics=METRICS)
    if print_summary:
        baseline_model.summary()
Ejemplo n.º 18
0
def main(train_dir):

    # Divide into train and test set.
    train_start_idx, train_end_idx = (0, 272)
    val_start_idx, val_end_idx = (272, 287)

    train_epochs = 10
    batch_size = 4

    # Getting filenames from the kitti dataset
    image_names, segmentation_names = kitti_image_filenames('data_road')

    preprocess_train = preprocess
    preprocess_val = preprocess

    # Get image tensors from the filenames
    train_set = kitti_dataset_from_filenames(
        image_names[train_start_idx:train_end_idx],
        segmentation_names[train_start_idx:train_end_idx],
        preprocess=preprocess_train,
        batch_size=batch_size
    )
    # Get the validation tensors
    val_set = kitti_dataset_from_filenames(
        image_names[val_start_idx:val_end_idx],
        segmentation_names[val_start_idx:val_end_idx],
        batch_size=batch_size,
        preprocess=preprocess_val,
        shuffle=False
    )

    #model = segmentation_models.simple_model((HEIGHT, WIDTH, 3))
    model = segmentation_models.unet((HEIGHT, WIDTH, 3))

    model.summary()

    optimizer = optimizers.Adam(lr=1e-4)
    loss_fn = losses.BinaryCrossentropy(from_logits=False)

    print("Summaries are written to '%s'." % train_dir)
    writer = tf.summary.create_file_writer(train_dir, flush_millis=3000)
    summary_interval = 10

    train_accuracy = metrics.BinaryAccuracy(threshold=0.5)
    train_loss = metrics.Mean()
    val_accuracy = metrics.BinaryAccuracy(threshold=0.5)
    val_loss = metrics.Mean()
    step = 0
    start_training = start = time.time()
    for epoch in range(train_epochs):

        print("Training epoch: %d" % epoch)
        for image, y in train_set:
            with tf.GradientTape() as tape:
                y_pred = model(image)
                loss = loss_fn(y, y_pred)

            grads = tape.gradient(loss, model.trainable_variables)
            optimizer.apply_gradients(zip(grads, model.trainable_variables))

            # update metrics and step
            train_loss.update_state(loss)
            train_accuracy.update_state(y, y_pred)
            step += 1

            if step % summary_interval == 0:
                duration = time.time() - start
                print("step %d. sec/batch: %g. Train loss: %g" % (
                    step, duration/summary_interval, train_loss.result().numpy()))
                # write summaries to TensorBoard
                with writer.as_default():
                    tf.summary.scalar("train_loss", train_loss.result(), step=step)
                    tf.summary.scalar("train_accuracy", train_accuracy.result(), step=step)
                    vis = vis_mask(image, y_pred >= 0.5)
                    tf.summary.image("train_image", vis, step=step)

                # reset metrics and time
                train_loss.reset_states()
                train_accuracy.reset_states()
                start = time.time()


        # Do validation after each epoch
        for i, (image, y) in enumerate(val_set):
            y_pred = model(image)
            loss = loss_fn(y, y_pred)
            val_loss.update_state(loss)
            val_accuracy.update_state(y, y_pred)

            with writer.as_default():
                vis = vis_mask(image, y_pred >= 0.5)
                tf.summary.image("val_image_batch_%d" % i, vis, step=step, max_outputs=batch_size)

        with writer.as_default():
            tf.summary.scalar("val_loss", val_loss.result(), step=step)
            tf.summary.scalar("val_accuracy", val_accuracy.result(), step=step)
        val_loss.reset_states()
        val_accuracy.reset_states()

    print("Finished training %d epochs in %g minutes." % (
        train_epochs, (time.time() - start_training)/60))
    # save a model which we can later load by tf.keras.models.load_model(model_path)
    model_path = os.path.join(train_dir, "model.h5")
    print("Saving model to '%s'." % model_path)
    model.save(model_path)
Ejemplo n.º 19
0
def use_class_model():
    model = Mymodel((None, 33), 1)

    from tensorflow import function, GradientTape

    @function
    def train(features, labels):
        with GradientTape() as tape:
            predictions = model(features)
            loss = loss_fn(labels, predictions)
        grad = tape.gradient(loss, model.trainable_variables)
        optimizer.apply_gradients(zip(grad, model.trainable_variables))

        train_loss(loss)
        train_accuracy(labels, predictions)

    @function
    def test(features, labels):
        predictions = model(features)
        loss = loss_fn(labels, predictions)

        test_loss(loss)
        test_accuracy(labels, predictions)

    lr = 0.001
    beta_1 = 0.8
    beta_2 = 0.9
    EPOCHS = 100
    loss_fn = losses.BinaryCrossentropy()
    optimizer = optimizers.Adam(learning_rate=lr)
    train_loss = metrics.Mean(name='train_loss')
    train_accuracy = metrics.BinaryAccuracy(name='train_accuracy')

    test_loss = metrics.Mean(name='test_loss')
    test_accuracy = metrics.BinaryAccuracy(name='test_accuracy')

    from tensorflow.data import Dataset
    train_ds = Dataset.from_tensor_slices(
        (train_features, train_labels)).shuffle(10000).batch(32)

    test_ds = Dataset.from_tensor_slices(
        (test_features, test_labels)).shuffle(10000).batch(32)

    train_loss_vec = []
    test_loss_vec = []
    train_acc_vec = []
    test_acc_vec = []

    for epoch in range(EPOCHS):
        train_loss.reset_states()
        train_accuracy.reset_states()
        test_loss.reset_states()
        test_accuracy.reset_states()
        for features, labels in train_ds:
            train(features, labels)
        for features, labels in test_ds:
            test(features, labels)

        train_loss_vec.append(train_loss)
        test_loss_vec.append(test_loss)

        train_acc_vec.append(train_accuracy)
        test_acc_vec.append(test_accuracy)

        if epoch % 10 == 0:
            print(f'Epoch {epoch + 1}, '
                  f'Loss: {train_loss.result():.2f}, '
                  f'Accuracy: {train_accuracy.result() * 100:.2f}, '
                  f'Test Loss: {test_loss.result():.2f}, '
                  f'Test Accuracy: {test_accuracy.result() * 100:.2f}')
Ejemplo n.º 20
0
    def train_model(self, themes_weight: List[float],
                    dataset: TrainValidationDataset, voc_size: int,
                    keras_callback: LambdaCallback):
        epochs = 60
        embedding_output_dim = 128
        last_dim = 128

        article_length = dataset.article_length
        theme_count = dataset.theme_count

        model = tf.keras.Sequential([
            keras.layers.Embedding(input_dim=voc_size,
                                   input_length=article_length,
                                   output_dim=embedding_output_dim,
                                   mask_zero=True),
            keras.layers.Conv1D(filters=64,
                                kernel_size=3,
                                input_shape=(voc_size, embedding_output_dim),
                                activation=tf.nn.relu),
            keras.layers.GlobalMaxPooling1D(),
            keras.layers.Dropout(0.2),
            keras.layers.Dense(last_dim, activation=tf.nn.relu),
            keras.layers.Dropout(0.2),
            keras.layers.Dense(theme_count,
                               activation=tf.nn.sigmoid,
                               kernel_regularizer=regularizers.l2(0.2),
                               activity_regularizer=regularizers.l1(0.1))
        ])

        model.summary()

        model.compile(optimizer=tf.keras.optimizers.Adam(clipnorm=1,
                                                         clipvalue=0.5),
                      loss=WeightedBinaryCrossEntropy(themes_weight,
                                                      from_logits=True),
                      metrics=[
                          metrics.AUC(),
                          metrics.BinaryAccuracy(),
                          metrics.TruePositives(),
                          metrics.TrueNegatives(),
                          metrics.FalseNegatives(),
                          metrics.FalsePositives(),
                          metrics.Recall(),
                          metrics.Precision()
                      ],
                      run_eagerly=self.run_eagerly)

        keras.utils.plot_model(model,
                               "output/" + self.model_name + ".png",
                               show_shapes=True)

        model.fit(dataset.trainData,
                  epochs=epochs,
                  steps_per_epoch=dataset.train_batch_count,
                  validation_data=dataset.validationData,
                  validation_steps=dataset.validation_batch_count,
                  callbacks=[ManualInterrupter(), keras_callback])

        model.save("output/" + self.model_name + ".h5")
        model.save_weights("output/" + self.model_name + "_weight.h5")

        self.__model__ = model
Ejemplo n.º 21
0
def train_gan(gan,
              preprocessing,
              gan_number,
              images,
              batch_size,
              codings_size,
              learning_ratio=5,
              n_epochs=50,
              resume_training_at_epoch=1,
              images_saving_ratio=5,
              model_saving_ratio=5,
              verbose=True):
    generator, discriminator = gan.layers
    m = metrics.BinaryAccuracy(threshold=0.5)

    resume_training_at_epoch = max(resume_training_at_epoch, 1)

    # if training begins, logs and random samples are created
    if resume_training_at_epoch == 1:
        # those samples will be saved at each epoch
        samples = tf.random.normal(shape=[16, codings_size])
        np.savetxt('./DCGAN_{}/params/samples.csv'.format(gan_number),
                   samples.numpy(),
                   delimiter=',')

        logs = pd.DataFrame(columns=pd.MultiIndex.from_arrays([
            np.repeat([
                "running_loss_g", "running_loss_d_fake", "running_loss_d_real",
                "running_accuracy_g", "running_accuracy_d_fake",
                "running_accuracy_d_real"
            ], 6),
            np.tile(["mean", "min", "25%", "50%", "75%", "max"], 6)
        ]))
    # when training is resume
    else:
        samples = tf.convert_to_tensor(
            np.loadtxt('./DCGAN_{}/params/samples.csv'.format(gan_number),
                       delimiter=','))
        logs = pd.read_excel("./DCGAN_{}/logs.xlsx".format(gan_number),
                             index_col=0,
                             header=[0, 1])

    for epoch in np.arange(resume_training_at_epoch, n_epochs + 1):
        n = 0
        if verbose:
            print(
                "------------------------------------ Epoch {}/{} ------------------------------------"
                .format(epoch, n_epochs))

        start_epoch = datetime.datetime.now()

        dataset = tf.data.Dataset.from_tensor_slices(images).shuffle(
            1000).batch(batch_size, drop_remainder=True).prefetch(1)

        running_loss_g = []
        running_loss_d_fake = []
        running_loss_d_real = []
        running_accuracy_g = []
        running_accuracy_d_fake = []
        running_accuracy_d_real = []

        for X_batch in dataset:
            # phase 1 - training the discriminator
            # --------- random noise is generated
            noise = tf.random.normal(shape=[batch_size, codings_size])
            # --------- random noise is passed through generator to create fake samples
            X_fake = generator(noise, training=True)
            # --------- target for fake samples is between 0 and 0.2
            y_fake = tf.constant([[(np.random.random() * 0.2)]
                                  for i in range(batch_size)])
            # --------- target for true samples is between 0.8 and 1
            y_batch = tf.constant([[(np.random.random() * 0.2) + 0.8]
                                   for i in range(batch_size)])

            # --------- set discriminator trainable
            discriminator.trainable = True
            # --------- true samples are passed through discriminator
            predicted_real = discriminator(preprocessing(X_batch))
            # --------- accuracy of discriminator on real samples is calculated (with targets = 1)
            m.update_state(tf.constant([[1.0]] * batch_size), predicted_real)
            d_accuracy_real = m.result().numpy()
            running_accuracy_d_real += [d_accuracy_real]
            m.reset_states()
            # --------- discriminator is trained on true samples
            d_loss_real = discriminator.train_on_batch(preprocessing(X_batch),
                                                       y_batch)
            running_loss_d_real += [d_loss_real]

            # --------- fake samples are passed through discriminator
            predicted_fake = discriminator(X_fake)
            # --------- accuracy of discriminator on fake samples is calculated (with targets = 0)
            m.update_state(tf.constant([[0.0]] * batch_size), predicted_fake)
            d_accuracy_fake = m.result().numpy()
            running_accuracy_d_fake += [d_accuracy_fake]
            m.reset_states()

            # --------- discriminator is trained on fake samples
            d_loss_fake = discriminator.train_on_batch(X_fake, y_fake)
            running_loss_d_fake += [d_loss_fake]

            n += 1

            # in some papers, discriminator is trained more than generator
            if n % learning_ratio == 0:
                # phase 2 - training the generator
                discriminator.trainable = False
                # --------- random noise is generated
                noise = tf.random.normal(shape=[batch_size, codings_size])
                # --------- target for random noise is 1
                y_train = tf.constant([[1.0]] * batch_size)
                # --------- fake samples are passed through discriminator
                predicted = discriminator(generator(noise, training=True))
                # --------- accuracy of generator is computed
                m.update_state(tf.constant([[1.0]] * batch_size), predicted)
                g_accuracy = m.result().numpy()
                running_accuracy_g += [g_accuracy]
                m.reset_states()

                # --------- generator is trained on noise
                g_loss = gan.train_on_batch(noise, y_train)
                running_loss_g += [g_loss]

        # saving metrics
        logs = save_metrics(logs, epoch, running_loss_g, running_loss_d_fake,
                            running_loss_d_real, running_accuracy_g,
                            running_accuracy_d_fake, running_accuracy_d_real)
        logs.to_excel("./DCGAN_{}/logs.xlsx".format(gan_number))
        if verbose:
            print(logs.iloc[epoch - 1, [18, 24, 30]])

        duration_epoch = datetime.datetime.now() - start_epoch
        if verbose:
            print("Time to train GAN on this epoch: {} seconds".format(
                duration_epoch.seconds))

        # followed random samples are passed through generator and saved at each epoch
        start_samples = datetime.datetime.now()
        X_samples = tf.concat([
            generator(tf.reshape(s, shape=[1, codings_size]), training=True)
            for s in samples
        ],
                              axis=0)
        for k in np.arange(16):
            plot_image(np.squeeze(X_samples[k], axis=-1))
            plt.savefig('./DCGAN_{}/samples/{}/epoch_{:04d}.png'.format(
                gan_number, k, epoch),
                        bbox_inches='tight',
                        pad_inches=0)
            plt.close()
        duration_samples = datetime.datetime.now() - start_samples

        if verbose:
            print("Time to generate and save samples: {} seconds".format(
                duration_samples.seconds))

        # every X epochs, best random samples are saved to assess generator quality
        if epoch % images_saving_ratio == 0:
            start_images = datetime.datetime.now()
            predicted_samples = discriminator(X_samples).numpy().reshape(16)
            best_fakes, best = scores_latent_space(gan,
                                                   codings_size,
                                                   sample_size=1000,
                                                   output_size=16)
            plot_multiple_images_with_scores(
                tf.concat([X_samples, best_fakes], 0),
                np.concatenate([predicted_samples, best]), 8)
            plt.savefig('./DCGAN_{}/epoch_{:04d}.png'.format(
                gan_number, epoch),
                        dpi=1200)
            plt.close()
            duration_images = datetime.datetime.now() - start_images
            if verbose:
                print("Time to generate and save images: {} seconds".format(
                    duration_images.seconds))

        # every X epochs, gan is saved
        if epoch % model_saving_ratio == 0:
            start_checkpoint = datetime.datetime.now()
            generator.save('./DCGAN_{}/models/generator_at_epoch_{}.h5'.format(
                gan_number, epoch))
            discriminator.save(
                './DCGAN_{}/models/discriminator_at_epoch_{}.h5'.format(
                    gan_number, epoch))
            duration_checkpoint = datetime.datetime.now() - start_checkpoint
            if verbose:
                print("Time to save model checkpoint: {} seconds".format(
                    duration_checkpoint.seconds))
Ejemplo n.º 22
0
    EPOCHS = int(args.epochs)
    BATCH_SIZE = int(args.batch_size)
    DROPOUT = float(args.dropout)
    IMGSIZE = (int(args.imgsize[0]), int(args.imgsize[1]))
    LOGDIR = args.logdir
    DATA = args.data
    BACKBONE = args.backbone
    NAME = args.model

    # --- define model metrics ---
    METRICS = [
        metrics.TruePositives(name="True_Positives"),
        metrics.FalsePositives(name="False_Positives"),
        metrics.TrueNegatives(name="True_Negatives"),
        metrics.FalseNegatives(name="False_Negatives"),
        metrics.BinaryAccuracy(name="Binary_Accuracy"),
        metrics.Precision(name="Precision"),
        metrics.Recall(name="Recall"),
        metrics.AUC(name="AUC")
    ]

    # --- tensorflow calbacks ---
    date = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
    if platform.system().lower() == "windows":
        LOGDIR = LOGDIR + "\\" + NAME + "\\" + date
    else:
        LOGDIR = LOGDIR + "/" + NAME + "/" + date
    if not os.path.isdir(LOGDIR):
        os.makedirs(LOGDIR, exist_ok=True)

    tensorboard = callbacks.TensorBoard(log_dir=LOGDIR,
Ejemplo n.º 23
0
    def train_model(self, themes_weight: List[float],
                    dataset: TrainValidationDataset, voc_size: int,
                    keras_callback: LambdaCallback):

        article_length = dataset.article_length
        theme_count = dataset.theme_count

        model = tf.keras.Sequential([
            # 1
            # keras.layers.Embedding(input_dim=voc_size, output_dim=firstLayoutOutputDim),
            # keras.layers.Dropout(0.2),
            # keras.layers.Conv1D(200,3,input_shape=(ARTICLE_MAX_WORD_COUNT,firstLayoutOutputDim), activation=tf.nn.relu),
            # keras.layers.GlobalAveragePooling1D(),
            # keras.layers.Dense(250, activation=tf.nn.relu),
            # keras.layers.Dense(theme_count, activation=tf.nn.softmax)

            # 2
            # keras.layers.Embedding(input_dim=voc_size, output_dim=firstLayoutOutputDim),
            # keras.layers.LSTM(ltsmOutputDim, dropout=0.2, recurrent_dropout=0.2, activation='tanh'),
            # keras.layers.Dense(theme_count, activation=tf.nn.softmax)

            # 3
            # keras.layers.Embedding(input_dim=self.voc_size, output_dim=embedding_output_dim),
            # keras.layers.Bidirectional(keras.layers.LSTM(intermediate_dim, return_sequences=True)),
            # # keras.layers.Dropout(0.1),
            # keras.layers.Bidirectional(keras.layers.LSTM(last_dim, dropout=0.05, recurrent_dropout=0.05)),
            # keras.layers.Dense(last_dim, activation=tf.nn.relu),
            # keras.layers.Dense(self.theme_count, activation=tf.nn.softmax)

            # 4
            # keras.layers.Embedding(input_dim=self.voc_size, input_length=self.article_length, output_dim=embedding_output_dim),
            # keras.layers.Bidirectional(keras.layers.LSTM(intermediate_dim, return_sequences=True, dropout=0.2, recurrent_dropout=0.2)),
            # keras.layers.Dropout(0.2),
            # keras.layers.Bidirectional(keras.layers.LSTM(last_dim * 2, recurrent_dropout=0.2)), #was last_dim * 2
            # keras.layers.Dense(last_dim, activation=tf.nn.relu),
            # keras.layers.Dense(self.theme_count, activation=tf.nn.sigmoid)

            # 5
            #keras.layers.Embedding(input_dim=self.voc_size, input_length=self.article_length, output_dim=embedding_output_dim),
            # keras.layers.Conv1D(filters=64, kernel_size=5, input_shape=(self.voc_size, embedding_output_dim), activation="relu"),
            # keras.layers.MaxPool1D(4),
            #keras.layers.Bidirectional(keras.layers.LSTM(intermediate_dim, recurrent_dropout=0.1)),
            #keras.layers.Dense(last_dim, activation=tf.nn.relu),
            #keras.layers.Dense(self.theme_count, activation=tf.nn.sigmoid)

            #6
            keras.layers.Embedding(input_dim=voc_size,
                                   input_length=article_length,
                                   output_dim=128,
                                   mask_zero=True),
            keras.layers.Bidirectional(
                keras.layers.LSTM(128, recurrent_dropout=0.2, dropout=0.2)),
            #keras.layers.Dropout(0.2),
            #keras.layers.Dense(last_dim, activation=tf.nn.relu),
            # keras.layers.Dense(self.theme_count, activation=tf.nn.sigmoid, use_bias=True,bias_initializer=tf.keras.initializers.Constant(-1.22818328))
            keras.layers.Dense(theme_count,
                               activation=tf.nn.sigmoid,
                               kernel_regularizer=regularizers.l2(0.1),
                               activity_regularizer=regularizers.l1(0.05))

            # 7
            # keras.layers.Embedding(input_dim=self.voc_size, input_length=self.article_length,
            #                        output_dim=embedding_output_dim),
            # keras.layers.GlobalAvgPool1D(),
            # keras.layers.Dense(last_dim, activation=tf.nn.relu),
            # keras.layers.Dense(self.theme_count, activation=tf.nn.sigmoid)
        ])

        model.summary()

        model.compile(
            optimizer=tf.keras.optimizers.Adam(clipnorm=1, clipvalue=0.5),
            #loss=WeightedBinaryCrossEntropy(themes_weight, from_logits=True),
            loss=keras.losses.BinaryCrossentropy(from_logits=True),
            metrics=[
                metrics.AUC(),
                metrics.BinaryAccuracy(),
                metrics.TruePositives(),
                metrics.TrueNegatives(),
                metrics.FalseNegatives(),
                metrics.FalsePositives(),
                metrics.Recall(),
                metrics.Precision()
            ],
            run_eagerly=self.run_eagerly)

        keras.utils.plot_model(model, 'Model1.png', show_shapes=True)

        cb_list = [ManualInterrupter, keras_callback]

        model.fit(dataset.trainData,
                  epochs=10,
                  steps_per_epoch=dataset.train_batch_count,
                  validation_data=dataset.validationData,
                  validation_steps=dataset.validation_batch_count,
                  callbacks=cb_list,
                  class_weight={
                      0: 1,
                      1: themes_weight[0]
                  })

        model.save("output/" + self.get_model_name() + ".h5")
        model.save_weights("output/" + self.get_model_name() + "_weight.h5")

        self.__model__ = model
Ejemplo n.º 24
0
    def timeformat(m):
        if tf.strings.length(tf.strings.format("{}", m)) == 1:
            return (tf.strings.format("0{}", m))
        else:
            return (tf.strings.format("{}", m))

    timestring = tf.strings.join([timeformat(hour), timeformat(minite),
                                  timeformat(second)], separator=":")
    tf.print("==========" * 8 + timestring)


optimizer = optimizers.Nadam()
loss_func = losses.BinaryCrossentropy()

train_loss = metrics.Mean(name='train_loss')
train_metric = metrics.BinaryAccuracy(name='train_accuracy')

valid_loss = metrics.Mean(name='valid_loss')
valid_metric = metrics.BinaryAccuracy(name='valid_accuracy')


@tf.function
def train_step(model, features, labels):
    with tf.GradientTape() as tape:
        predictions = model(features, training=True)
        loss = loss_func(labels, predictions)
    gradients = tape.gradient(loss, model.trainable_variables)
    optimizer.apply_gradients(zip(gradients, model.trainable_variables))

    train_loss.update_state(loss)
    train_metric.update_state(labels, predictions)
Ejemplo n.º 25
0
def train(
    csv_path,
    model_save_path,
    tfrecords_path,
    volume_shape=(128, 128, 128),
    image_size=(128, 128),
    dropout=0.2,
    batch_size=16,
    n_classes=2,
    n_epochs=15,
    mode="CV",
):
    """Train a model.

    Parameters
    ----------
    csv_path: str - Path
        Path to the csv file containing training volume paths, labels (X, Y).
    model_save_path: str - Path
        Path to where the save model and model weights.
    tfrecords_path: str - Path
        Path to preprocessed training tfrecords.
    volume_shape: tuple of size 3, optional, default=(128, 128, 128)
        The shape of the preprocessed volumes.
    image_size: tuple of size 2, optional, default=(128, 128)
        The shape of a 2D slice along each volume axis.
    dropout: float, optional, default=0.4
         Float between 0 and 1. Fraction of the input units to drop.
    batch_size: int, optional, default=16
        No. of training examples utilized in each iteration.
    n_classes: int, optional, default=2
        No. of unique classes to train the model on. Default assumption is a
        binary classifier.
    n_epochs: int, optional, default=15
        No. of complete passes through the training dataset.
    mode: str, optional, default=15
        One of "CV" or "full". Indicates the type of training to perform.

    Returns
    -------
    `tf.keras.callbacks.History`
        A History object that records several metrics such as training/validation loss/metrics
        at successive epochs.
    """

    train_csv_path = os.path.join(csv_path, "training.csv")
    train_paths = pd.read_csv(train_csv_path)["X"].values
    train_labels = pd.read_csv(train_csv_path)["Y"].values

    if mode == "CV":
        valid_csv_path = os.path.join(csv_path, "validation.csv")
        valid_paths = pd.read_csv(valid_csv_path)["X"].values
        # valid_labels = pd.read_csv(valid_csv_path)["Y"].values

    weights = class_weight.compute_class_weight("balanced",
                                                np.unique(train_labels),
                                                train_labels)
    weights = dict(enumerate(weights))

    planes = ["axial", "coronal", "sagittal", "combined"]

    global_batch_size = batch_size

    os.makedirs(model_save_path, exist_ok=True)
    cp_save_path = os.path.join(model_save_path, "weights")
    logdir_path = os.path.join(model_save_path, "tb_logs")
    metrics_path = os.path.join(model_save_path, "metrics")

    os.makedirs(metrics_path, exist_ok=True)

    for plane in planes:

        logdir = os.path.join(logdir_path, plane)
        os.makedirs(logdir, exist_ok=True)

        tbCallback = TensorBoard(log_dir=logdir)

        os.makedirs(os.path.join(cp_save_path, plane), exist_ok=True)

        model_checkpoint = ModelCheckpoint(
            os.path.join(cp_save_path, plane, "best-wts.h5"),
            monitor="val_loss",
            save_weights_only=True,
            mode="min",
        )

        if not plane == "combined":
            lr = 1e-3
            model = _model.Submodel(
                input_shape=image_size,
                dropout=dropout,
                name=plane,
                include_top=True,
                weights=None,
            )
        else:
            lr = 5e-4
            model = _model.CombinedClassifier(
                input_shape=image_size,
                dropout=dropout,
                trainable=True,
                wts_root=cp_save_path,
            )

        print("Submodel: ", plane)

        METRICS = [
            metrics.TruePositives(name="tp"),
            metrics.FalsePositives(name="fp"),
            metrics.TrueNegatives(name="tn"),
            metrics.FalseNegatives(name="fn"),
            metrics.BinaryAccuracy(name="accuracy"),
            metrics.Precision(name="precision"),
            metrics.Recall(name="recall"),
            metrics.AUC(name="auc"),
        ]

        model.compile(
            loss=tf.keras.losses.binary_crossentropy,
            optimizer=Adam(learning_rate=lr),
            metrics=METRICS,
        )

        dataset_train = get_dataset(
            file_pattern=os.path.join(tfrecords_path, "data-train_*"),
            n_classes=n_classes,
            batch_size=global_batch_size,
            volume_shape=volume_shape,
            plane=plane,
            shuffle_buffer_size=global_batch_size,
        )

        steps_per_epoch = math.ceil(len(train_paths) / batch_size)

        if mode == "CV":
            earlystopping = EarlyStopping(monitor="val_loss", patience=3)

            dataset_valid = get_dataset(
                file_pattern=os.path.join(tfrecords_path, "data-valid_*"),
                n_classes=n_classes,
                batch_size=global_batch_size,
                volume_shape=volume_shape,
                plane=plane,
                shuffle_buffer_size=global_batch_size,
            )

            validation_steps = math.ceil(len(valid_paths) / batch_size)

            history = model.fit(
                dataset_train,
                epochs=n_epochs,
                steps_per_epoch=steps_per_epoch,
                validation_data=dataset_valid,
                validation_steps=validation_steps,
                callbacks=[tbCallback, model_checkpoint, earlystopping],
                class_weight=weights,
            )

            hist_df = pd.DataFrame(history.history)

        else:
            earlystopping = EarlyStopping(monitor="loss", patience=3)
            print(model.summary())
            print("Steps/Epoch: ", steps_per_epoch)
            history = model.fit(
                dataset_train,
                epochs=n_epochs,
                steps_per_epoch=steps_per_epoch,
                callbacks=[tbCallback, model_checkpoint, earlystopping],
                class_weight=weights,
            )

        hist_df = pd.DataFrame(history.history)
        jsonfile = os.path.join(metrics_path, plane + ".json")

        with open(jsonfile, mode="w") as f:
            hist_df.to_json(f)

    return history
 def binary_category(self):
     return [
         metrics.BinaryAccuracy(name='binary_accuracy', dtype=None, threshold=0.5),
         metrics.BinaryCrossentropy(name='binary_crossentropy', dtype=None, from_logits=False, label_smoothing=0)
     ]
Ejemplo n.º 27
0
    def cross_validate(self, train_data_list, **build_kwargs):
        """
        Performs cross-validation on the model.

        Parameters
        ----------
        train_data_list: list
            A list where each element contains multiple images.
            (Each element corresponds to a fold)
        Returns
        -------
        Model
            A trained model
        """
        print(f"Cross validating {str(self)}")
        # Create lists for holding the metric values
        acc_per_fold = []  # accuracy
        auc_per_fold = []  # AUC
        loss_per_fold = []  # loss

        # Create 2 empty lists for holding the training history losses and test history losses
        fold_train_loss, fold_test_loss = list(), list()

        # Iterate an index to size of folds
        for i in range(len(train_data_list)):
            # Extract validation data
            x_val, y_val = train_data_list[i]

            # Indices for train
            indices_to_keep = np.delete(range(len(train_data_list)), i)

            # Get train data
            x_train, y_train = _build_train_data(train_data_list,
                                                 indices_to_keep)

            # Compute class weights for balanced learning. Returns a list
            weights = compute_class_weight("balanced",
                                           classes=np.unique(y_train),
                                           y=y_train)

            # Convert the list do dict.
            weights_dict = {idx: value for idx, value in enumerate(weights)}

            # Build the model
            baseline_model = self.build(
                input_shape=x_train[0].shape,
                **build_kwargs,
            )
            # Compile the model
            baseline_model.compile(
                optimizer=optimizers.Adam(),
                loss=losses.BinaryCrossentropy(),
                metrics=[metrics.BinaryAccuracy(),
                         metrics.AUC(name="auc")],
            )

            # Train the model with the training indices
            history = baseline_model.fit(
                x_train,
                y_train,
                validation_data=(x_val, y_val),
                batch_size=64,
                epochs=40,
                class_weight=weights_dict,
                verbose=0,
            )

            fold_train_loss.append(history.history["loss"])
            fold_test_loss.append(history.history["val_loss"])
            # Evaluate the model with the testing indices
            scores = baseline_model.evaluate(x_val, y_val, verbose=0)

            # Scores has format : ['loss', 'binary_accuracy', 'auc']
            # Save the loss value on the val data
            loss_per_fold.append(scores[0])
            # Save the Accuracy value on the val data
            acc_per_fold.append(scores[1])
            # Save the Accuracy value on the val data
            auc_per_fold.append(scores[2])

        # Plot the loss curve
        self._plot_losses(
            fold_train_loss,
            fold_test_loss,
        )

        # Display the results
        print(
            f"Accuracy: {acc_per_fold.mean():.2f} (+/- {acc_per_fold.std():.2f})"
        )
        print(f"AUC: {auc_per_fold.mean():.2f} (+/- {auc_per_fold.std():.2f})")

        return baseline_model
Ejemplo n.º 28
0
    def __init__(self, name=None):
        super(CustomLossMetric, self).__init__(name=name)
        self.loss_fn = losses.BinaryCrossentropy()

        self.accuracy_fn = metrics.BinaryAccuracy()
Ejemplo n.º 29
0
        ],
                   name='DenseCell-2'),

        # final output layer
        Dense(1, activation='sigmoid', name='output'),
    ],
    name='Binary-Classifier')

NAME = 'SSBML-Transfer-Model'

OPTIMIZER = 'adam'

LOSS = Focal()

METRICS = [
    metrics.BinaryAccuracy(name='accuracy'),
    metrics.Precision(),
    metrics.Recall(),

    # this is an ugly hack but it is neccessary as
    # keras does not have simply a "specificity" metric
    metrics.SpecificityAtSensitivity(
        sensitivity=.01,  # this doesn't matter
        num_thresholds=1,  # so we only get score at threshold = .5
        name='specificity')
]


def remove_head(base_model, trainable=False):
    ''' 
    Returns a copy of the base model with the head removed.
Ejemplo n.º 30
0
    def train_model(self, themes_weight: ThemeWeights,
                    dataset: TrainValidationDataset, voc_size: int,
                    keras_callback: LambdaCallback):

        input = keras.layers.Input(shape=(dataset.article_length))

        outputs: List[keras.layers.Layer] = []

        for i in range(0, dataset.theme_count):
            print("")
            dense = keras.layers.Embedding(
                input_dim=voc_size, output_dim=self.embedding_size)(input)
            ltsm = keras.layers.Bidirectional(
                keras.layers.LSTM(self.LTSM_output_size,
                                  recurrent_dropout=0.2,
                                  dropout=0.2))(dense)
            dropout = keras.layers.Dropout(0.2)(ltsm)
            dense2 = keras.layers.Dense(units=self.dense2_output_size,
                                        activation=tf.nn.relu)(dropout)
            output = keras.layers.Dense(
                units=1,
                activation=tf.nn.sigmoid,
                name=str(i),
                kernel_regularizer=regularizers.l2(0.01),
                activity_regularizer=regularizers.l1(0.01))(dense2)
            outputs.append(output)

        if len(outputs) > 1:
            outputs = [keras.layers.concatenate(outputs)]
        else:
            outputs = [outputs]

        model = keras.Model(inputs=[input], outputs=outputs)

        model.compile(
            optimizer=tf.keras.optimizers.Adam(clipnorm=1, clipvalue=0.5),
            #loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
            loss=WeightedBinaryCrossEntropy(
                weights=themes_weight.weight_list(), from_logits=True),
            # loss = {"0" : tf.keras.losses.BinaryCrossentropy(from_logits=True),
            #         "1" : tf.keras.losses.BinaryCrossentropy(from_logits=True)},
            metrics=[
                metrics.AUC(multi_label=True),
                metrics.BinaryAccuracy(),
                metrics.TruePositives(),
                metrics.TrueNegatives(),
                metrics.FalseNegatives(),
                metrics.FalsePositives(),
                metrics.Recall(),
                metrics.Precision()
            ],
            run_eagerly=False)

        model.summary()

        keras.utils.plot_model(model,
                               self.__model_name__ + '.png',
                               show_shapes=True)

        callbacks = [ManualInterrupter, keras_callback]

        # model.fit(self.dataset.trainData, epochs=15, steps_per_epoch=self.dataset.train_batch_count,
        #           validation_data=self.dataset.validationData, validation_steps=self.dataset.validation_batch_count,
        #           callbacks=callbacks, class_weight=self.theme_weight)

        # model.fit(self.dataset.trainData, epochs=10, steps_per_epoch=self.dataset.train_batch_count,
        #           validation_data=self.dataset.validationData, validation_steps=self.dataset.validation_batch_count,
        #           callbacks=callbacks, class_weight={ 0 : 1, 1 : 7.8, 2 : 4.3})

        model.fit(dataset.trainData,
                  epochs=40,
                  steps_per_epoch=dataset.train_batch_count,
                  validation_data=dataset.validationData,
                  validation_steps=dataset.validation_batch_count,
                  callbacks=callbacks)

        self.__model__ = model