コード例 #1
0
def create_and_fit_model():
    # Create the model.
    input_shape = (CONFIG.IMAGE_TARGET_HEIGHT, CONFIG.IMAGE_TARGET_WIDTH, 1)
    base_model = create_base_cnn(input_shape, dropout=True)
    head_input_shape = (128, )
    head_model1 = create_head(head_input_shape, dropout=True, name="height")
    head_model2 = create_head(head_input_shape, dropout=True, name="weight")
    model_input = layers.Input(shape=(CONFIG.IMAGE_TARGET_HEIGHT,
                                      CONFIG.IMAGE_TARGET_WIDTH, 1))
    features = base_model(model_input)
    model_output1 = head_model1(features)
    model_output2 = head_model2(features)
    model = Model(inputs=model_input, outputs=[model_output1, model_output2])

    best_model_path = str(DATA_DIR / f'outputs/{MODEL_CKPT_FILENAME}')
    checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
        filepath=best_model_path,
        monitor="val_loss",
        save_best_only=True,
        verbose=1)
    training_callbacks = [
        AzureLogCallback(run),
        create_tensorboard_callback(),
        checkpoint_callback,
    ]

    optimizer = get_optimizer(CONFIG.USE_ONE_CYCLE,
                              lr=CONFIG.LEARNING_RATE,
                              n_steps=len(paths_training) / CONFIG.BATCH_SIZE)

    # Compile the model.
    model.compile(optimizer=optimizer,
                  loss={
                      'height': 'mse',
                      'weight': 'mse'
                  },
                  loss_weights={
                      'height': CONFIG.HEIGHT_IMPORTANCE,
                      'weight': CONFIG.WEIGHT_IMPORTANCE
                  },
                  metrics={
                      'height': ["mae"],
                      'weight': ["mae"]
                  })

    # Train the model.
    model.fit(dataset_training.batch(CONFIG.BATCH_SIZE),
              validation_data=dataset_validation.batch(CONFIG.BATCH_SIZE),
              epochs=CONFIG.EPOCHS,
              callbacks=training_callbacks,
              verbose=2)
コード例 #2
0
ファイル: train.py プロジェクト: Welthungerhilfe/cgm-ml
def create_and_fit_model():
    # Create the model.
    input_shape = (CONFIG.IMAGE_TARGET_HEIGHT, CONFIG.IMAGE_TARGET_WIDTH,
                   NUM_INPUT_CHANNELS)
    model = create_cnn(input_shape, dropout=CONFIG.USE_DROPOUT)
    model.summary()

    best_model_path = str(DATA_DIR / f'outputs/{MODEL_CKPT_FILENAME}')
    checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
        filepath=best_model_path,
        monitor="val_loss",
        save_best_only=True,
        verbose=1)

    dataset_batches = dataset_training.batch(CONFIG.BATCH_SIZE)

    training_callbacks = [
        AzureLogCallback(run),
        create_tensorboard_callback(),
        checkpoint_callback,
    ]

    if getattr(CONFIG, 'USE_WANDB', False):
        setup_wandb()
        wandb.init(project="ml-project", entity="cgm-team")
        wandb.config.update(CONFIG)
        training_callbacks.append(
            WandbCallback(log_weights=True,
                          log_gradients=True,
                          training_data=dataset_batches))

    optimizer = get_optimizer(CONFIG.USE_ONE_CYCLE,
                              lr=CONFIG.LEARNING_RATE,
                              n_steps=len(paths_training) / CONFIG.BATCH_SIZE)

    # Compile the model.
    model.compile(optimizer=optimizer, loss="mse", metrics=["mae"])

    # Train the model.
    model.fit(dataset_training.batch(CONFIG.BATCH_SIZE),
              validation_data=dataset_validation.batch(CONFIG.BATCH_SIZE),
              epochs=CONFIG.EPOCHS,
              callbacks=training_callbacks,
              verbose=2)
コード例 #3
0
ファイル: train.py プロジェクト: Welthungerhilfe/cgm-ml
def create_and_fit_model():
    # Create the base model
    base_model = get_base_model(workspace, DATA_DIR)
    base_model.summary()
    assert base_model.output_shape == (None, 128)

    # Create the head
    head_input_shape = (128 * CONFIG.N_ARTIFACTS, )
    head_model = create_head(head_input_shape, dropout=CONFIG.USE_DROPOUT)

    # Implement artifact flow through the same model
    model_input = layers.Input(shape=(CONFIG.IMAGE_TARGET_HEIGHT,
                                      CONFIG.IMAGE_TARGET_WIDTH,
                                      CONFIG.N_ARTIFACTS))

    features_list = []
    for i in range(CONFIG.N_ARTIFACTS):
        features_part = model_input[:, :, :, i:i + 1]
        features_part = base_model(features_part)
        features_list.append(features_part)

    concatenation = tf.keras.layers.concatenate(features_list, axis=-1)
    assert concatenation.shape.as_list() == tf.TensorShape(
        (None, 128 * CONFIG.N_ARTIFACTS)).as_list()
    model_output = head_model(concatenation)

    model = models.Model(model_input, model_output)
    model.summary()

    best_model_path = str(DATA_DIR / f'outputs/{MODEL_CKPT_FILENAME}')
    checkpoint_callback = callbacks.ModelCheckpoint(filepath=best_model_path,
                                                    monitor="val_loss",
                                                    save_best_only=True,
                                                    verbose=1)
    training_callbacks = [
        AzureLogCallback(run),
        create_tensorboard_callback(),
        checkpoint_callback,
    ]

    optimizer = get_optimizer(CONFIG.USE_ONE_CYCLE,
                              lr=CONFIG.LEARNING_RATE,
                              n_steps=len(paths_training) / CONFIG.BATCH_SIZE)

    # Compile the model.
    model.compile(optimizer=optimizer, loss="mse", metrics=["mae"])

    # Train the model.
    model.fit(dataset_training.batch(CONFIG.BATCH_SIZE),
              validation_data=dataset_validation.batch(CONFIG.BATCH_SIZE),
              epochs=CONFIG.EPOCHS,
              callbacks=training_callbacks,
              verbose=2)

    if CONFIG.EPOCHS_TUNE:
        # Un-freeze
        for layer in base_model._layers:
            layer.trainable = True

        # Adjust learning rate
        optimizer = tf.keras.optimizers.Nadam(
            learning_rate=CONFIG.LEARNING_RATE_TUNE)
        model.compile(optimizer=optimizer, loss="mse", metrics=["mae"])

        logger.info('Start fine-tuning')
        model.fit(dataset_training.batch(CONFIG.BATCH_SIZE),
                  validation_data=dataset_validation.batch(CONFIG.BATCH_SIZE),
                  epochs=CONFIG.EPOCHS_TUNE,
                  callbacks=training_callbacks,
                  verbose=2)