Beispiel #1
0
def train_mnist(config):
    # https://github.com/tensorflow/tensorflow/issues/32159
    import tensorflow as tf
    print('Is cuda available:', tf.test.is_gpu_available())
    batch_size = 128
    num_classes = 10
    epochs = 12

    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train, x_test = x_train / 255.0, x_test / 255.0
    model = tf.keras.models.Sequential([
        tf.keras.layers.Flatten(input_shape=(28, 28)),
        tf.keras.layers.Dense(config["hidden"], activation="relu"),
        tf.keras.layers.Dropout(0.2),
        tf.keras.layers.Dense(num_classes, activation="softmax")
    ])

    model.compile(loss="sparse_categorical_crossentropy",
                  optimizer=tf.keras.optimizers.SGD(
                      lr=config["lr"], momentum=config["momentum"]),
                  metrics=["accuracy"])

    model.fit(x_train,
              y_train,
              batch_size=batch_size,
              epochs=epochs,
              verbose=0,
              validation_data=(x_test, y_test),
              callbacks=[TuneReportCallback({"mean_accuracy": "accuracy"})])
Beispiel #2
0
def train_mnist(config):
    # https://github.com/tensorflow/tensorflow/issues/32159
    import tensorflow as tf
    print('Is cuda available for trainer:', tf.test.is_gpu_available())
    batch_size = 128
    num_classes = 10
    epochs = 200

    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train, x_test = x_train / 255.0, x_test / 255.0

    # define model
    inputs = tf.keras.layers.Input(shape=(28, 28))
    x = tf.keras.layers.Flatten()(inputs)
    # x=tf.keras.layers.LayerNormalization()(x)
    for i in range(config["layers"]):
        x = tf.keras.layers.Dense(units=config["hidden"],
                                  activation=config["activation"])(x)
        x = tf.keras.layers.Dropout(config["dropout"])(x)
    outputs = tf.keras.layers.Dense(units=num_classes, activation="softmax")(x)

    model = tf.keras.Model(inputs=inputs, outputs=outputs, name="mnist_model")
    model.compile(loss="sparse_categorical_crossentropy",
                  optimizer=tf.keras.optimizers.Adam(lr=config["lr"]),
                  metrics=["accuracy"])

    model.fit(x_train,
              y_train,
              batch_size=batch_size,
              epochs=epochs,
              verbose=0,
              validation_data=(x_test, y_test),
              callbacks=[TuneReportCallback({"mean_accuracy": "accuracy"})])
Beispiel #3
0
def train_unet(config,
               train_dataloader=None,
               val_dataloader=None,
               loss=None,
               metrics=None,
               checkpoint_dir=None):

    epochs = 15
    batch_size = 1

    model = get_model(backbone='vvg16',
                      encoder_freeze=config["encoder_freeze"],
                      n_classes=1,
                      activation='sigmoid',
                      dropout=config["dropout"])

    model.compile(optimizer=config["optimizer"](config["learning_rate"]),
                  loss=loss,
                  metrics=metrics)

    history = model.fit(train_dataloader,
                        steps_per_epoch=len(train_dataloader),
                        epochs=epochs,
                        verbose=0,
                        batch_size=batch_size,
                        validation_data=val_dataloader,
                        validation_steps=len(val_dataloader),
                        callbacks=[
                            TuneReportCallback(
                                {
                                    "loss": "loss",
                                    "iou_score": "iou_score",
                                    "val_loss": "val_loss",
                                    "val_iou_score": "val_iou_score",
                                },
                                on="epoch_end"),
                            TqdmCallback(verbose=2),
                        ])

    # save best model of the trial
    with tune.checkpoint_dir(step=1) as checkpoint_dir:
        checkpoint_dir = os.path.dirname(
            os.path.dirname(checkpoint_dir))  # go up two directories
        score_file_path = os.path.join(checkpoint_dir, 'score')
        score_file_exists = os.path.isfile(score_file_path)
        new_val_iou_score = history.history['val_iou_score'][0]
        best_model_file_path = os.path.join(checkpoint_dir, 'best_model.h5')

        if score_file_exists:
            old_val_iou_score = 0
            with open(score_file_path) as f:
                old_val_iou_score = float(f.read())
            if new_val_iou_score > old_val_iou_score:
                # we have a new best model
                with open(score_file_path, 'w') as f:
                    f.write(str(new_val_iou_score))
                model.save(best_model_file_path)
        else:
            # first model of the trial
            with open(score_file_path, 'w') as f:
                f.write(str(new_val_iou_score))
            model.save(best_model_file_path)

    print(history.history.keys())
def train_mnist(config):
    # https://github.com/tensorflow/tensorflow/issues/32159
    import tensorflow as tf
    import numpy as np

    print('Is cuda available:', tf.test.is_gpu_available())
    batch_size = config['batch_s']
    num_classes = 10
    epochs = 200

    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train, x_test = x_train / 255.0, x_test / 255.0

    x_train = np.expand_dims(x_train, axis=3)
    x_test = np.expand_dims(x_test, axis=3)

    #define model
    inputs = tf.keras.layers.Input(shape=(28, 28,
                                          1))  #changed size shape=(28, 28)
    x = tf.keras.layers.BatchNormalization()(inputs)
    #1st conv layer
    x = tf.keras.layers.Conv2D(filters=config["c1_f"],
                               kernel_size=config["c1_ks"],
                               kernel_initializer=config["init"],
                               activation=config["act_f1"])(x)
    x = tf.keras.layers.MaxPool2D((2, 2))(x)
    x = tf.keras.layers.BatchNormalization()(x)

    #2nd conv layer
    x = tf.keras.layers.Conv2D(filters=config["c2_f"],
                               kernel_size=config["c2_ks"],
                               kernel_initializer=config["init"],
                               activation=config["act_f1"])(x)
    x = tf.keras.layers.MaxPool2D((2, 2))(x)
    x = tf.keras.layers.BatchNormalization()(x)

    x = tf.keras.layers.Flatten()(x)
    x = tf.keras.layers.Dense(units=config["hidden"],
                              kernel_initializer=config["init"],
                              activation=config["act_f2"])(x)
    x = tf.keras.layers.Dropout(config["drop"])(x)
    x = tf.keras.layers.BatchNormalization()(x)

    outputs = tf.keras.layers.Dense(units=num_classes,
                                    kernel_initializer=config["init"],
                                    activation="softmax")(x)

    model = tf.keras.Model(inputs=inputs,
                           outputs=outputs,
                           name="mnist_conv_model")
    model.compile(loss="sparse_categorical_crossentropy",
                  optimizer=tf.keras.optimizers.Adam(lr=config["lr"]),
                  metrics=["accuracy"])

    model.fit(
        x_train,
        y_train,
        batch_size=batch_size,
        epochs=epochs,
        verbose=0,
        validation_data=(x_test, y_test),
        callbacks=[
            TuneReportCallback({
                "mean_accuracy":
                "val_accuracy"  #optional values ['loss', 'accuracy', 'val_loss', 'val_accuracy']
            })
        ])