def train_model_2(config: ConfigParser, model: tf.keras.Sequential, data: Data,
                  save_path: Path, checkpoint_path: Path) -> None:
    version = config['Model']['version']

    callbacks = [
        tfa.callbacks.AverageModelCheckpoint(filepath=str(checkpoint_path) +
                                             '/cp-{epoch:04d}.ckpt',
                                             update_weights=True),
        tf.keras.callbacks.TensorBoard(log_dir=f'logs/{version}_model_2',
                                       profile_batch='100, 110',
                                       histogram_freq=1,
                                       update_freq='batch')
    ]
    optimizer = tf.keras.optimizers.SGD(
        learning_rate=float(config['Model']['learning_rate']))
    # 35 below obtained by inspecting the epoch at which convergence occurred on validation set with TensorBoard.
    optimizer = tfa.optimizers.SWA(optimizer,
                                   start_averaging=35,
                                   average_period=int(
                                       config['Model']['n_models']))

    model.compile(
        optimizer=optimizer,
        loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
        metrics=['accuracy'])
    model.fit(data.training_dataset,
              epochs=1000,
              validation_data=data.validation_dataset,
              callbacks=callbacks)

    # Save the model
    model.save(save_path)
    # Remove the model from memory, since OOM might occur.
    del model
Esempio n. 2
0
def train_model(model: tf.keras.Sequential):
    """
    Args:
        model: current model

    Returns: model trained
    """
    # Training
    model.fit(x=CMODEL.input_batch,
              y=CMODEL.label_batch,
              batch_size=CMODEL.batch_size,
              epochs=2)
    return model
Esempio n. 3
0
def fit_model(
    tf_model: tf.keras.Sequential,
    training_data,
    validation_data,
    callbacks: None,
    settings=None,
):
    # class_weight_val = np.ones(training_data[1].shape[-1])

    y = np.argmax(training_data[1], axis=-1)
    class_weight_val = class_weight.compute_class_weight(
        class_weight="balanced", classes=np.unique(y), y=y)
    class_weights = dict(zip(np.unique(y), class_weight_val))

    print("Class Weights: ", end="")
    print(class_weights)

    rtn_history = tf_model.fit(
        x=training_data[0],
        y=training_data[1],
        validation_data=validation_data,
        callbacks=callbacks,
        class_weight=class_weights,
        **settings,
    )

    return rtn_history
Esempio n. 4
0
def train_epoch_plain(net: tf.keras.Sequential, x, y):
    net.fit(x, y, epochs=1, batch_size=BATCH_SIZE, verbose=1)