Example #1
0
lr_rate = 0.0001
batch_size = 16

# Step 1: Create Model
model = conv_ae.Conv_AE((None, height, width, channel), latent=200, units=16)

if sys.argv[1] == "train":

    print(model.summary())
    # sys.exit()

    # Load weights:
    # model.load_weights(model_name)

    # Step 3: Load data
    X_train, Y_train, X_valid, Y_valid = loader.load_light(
        data_path, width, height, True, 0.8, False)
    # Define The Optimizer
    optimizer = tf.keras.optimizers.Adam(learning_rate=lr_rate)
    # Define The Loss
    #---------------------
    @tf.function
    def my_loss(y_true, y_pred):
        return tf.keras.losses.MSE(y_true=y_true, y_pred=y_pred)

    # Define The Metrics
    tr_loss = tf.keras.metrics.MeanSquaredError(name='tr_loss')
    va_loss = tf.keras.metrics.MeanSquaredError(name='va_loss')

    #---------------------
    @tf.function
    def train_step(X, Y_true):
Example #2
0
lr_rate = 1e-4
batch_size = 4

# Step 1: Create Model
model = conv_vae.CONV_VAE(image_size=image_size,
                          latent_dim=latent_dim,
                          filters=6)
model.build((None, image_size, image_size, 3))

# Step 2: Define Metrics
# print(model.summary())
# sys.exit()

if sys.argv[1] == "train":
    # Step 3: Load data
    X_train, Y_train, X_valid, Y_valid = loader.load_light(
        data_path, image_size, image_size, True, 0.8, True)

    # Step 4: Training
    # model.load_weights(model_name)

    # Define The Optimizer
    optimizer = tf.keras.optimizers.Adam(
        learning_rate=lr_rate)  #, beta_1 = 0.5)

    @tf.function
    def ae_loss(y_true, y_pred):
        # de_loss = tf.reduce_mean(tf.pow(y_true - y_pred, 2))
        return tf.keras.losses.MSE(y_true=y_true, y_pred=y_pred)

    # Define The Metrics
    tr_loss = tf.keras.metrics.MeanSquaredError(name='tr_loss')