Example #1
0
# Set loss function
loss = Total_loss(dataset_config.num_classes)

print("Backbone Network Version : EfficientNet{0} .".format(MODEL_NAME))

steps_per_epoch = dataset_config.number_train // BATCH_SIZE
validation_steps = dataset_config.number_test // BATCH_SIZE
print("Train batch samples{0}".format(steps_per_epoch))
print("Validation batch samples{0}".format(validation_steps))

# optimizer = tf.keras.optimizers.SGD(learning_rate=base_lr, momentum=0.9)
optimizer = tf.keras.optimizers.Adam(learning_rate=base_lr)

model = model_build(TRAIN_MODE,
                    MODEL_NAME,
                    normalizations=normalize,
                    num_priors=num_priors,
                    image_size=IMAGE_SIZE,
                    backbone_trainable=True)

model.compile(optimizer=optimizer, loss=loss.total_loss)

model.load_weights(CHECKPOINT_DIR + WEIGHT_FILENAME + '.h5')
model.summary()
""" convert to tflite """
model.save('./checkpoints/save_model', True, False, 'tf')

converter = tf.lite.TFLiteConverter.from_keras_model(model)


def prepare_for_prediction(file_path):
    img = tf.io.read_file(file_path)
Example #2
0
    callback = [reduce_lr, checkpoint]

steps_per_epoch = number_train // BATCH_SIZE
validation_steps = number_test // BATCH_SIZE
print("학습 배치 개수:", steps_per_epoch)
print("검증 배치 개수:", validation_steps)

optimizer = mixed_precision.LossScaleOptimizer(optimizer, loss_scale='dynamic')
mirrored_strategy = tf.distribute.MirroredStrategy(
    cross_device_ops=tf.distribute.HierarchicalCopyAllReduce())
print("Number of devices: {}".format(mirrored_strategy.num_replicas_in_sync))

with mirrored_strategy.scope():

    model = model_build(TRAIN_MODE,
                        MODEL_NAME,
                        pretrained=BACKBONE_PRETRAINED,
                        image_size=IMAGE_SIZE,
                        backbone_trainable=True)
    model.compile(optimizer=optimizer,
                  loss=total_loss,
                  metrics=[precision, recall, cross_entropy, localization])

    #model.summary()
    history = model.fit(training_dataset,
                        validation_data=validation_dataset,
                        steps_per_epoch=steps_per_epoch,
                        validation_steps=validation_steps,
                        epochs=EPOCHS,
                        callbacks=callback)