示例#1
0
tftk.Context.init_context(training_name='ssim_catsdog_deep_autoencoder')
tftk.ENABLE_MIXED_PRECISION()
tftk.ENABLE_SUSPEND_RESUME_TRAINING()

IMAGE_SIZE = 128
EPOCHS = 80
BATCH_SIZE = 50

# mvtec_ad, len = MVTecAd.get_train_dataset("bottle")
# mvtec_ad = mvtec_ad.map(ImageDatasetUtil.resize(IMAGE_SIZE,IMAGE_SIZE))
# (train, len),(validation, validation_len) =ImageDatasetUtil.devide_train_validation(mvtec_ad,len,0.9)

cats_vs_dogs, total_len = CatsVsDogs.get_train_dataset()
cats_vs_dogs = cats_vs_dogs.map(
    ImageDatasetUtil.map_max_square_crop_and_resize(IMAGE_SIZE, IMAGE_SIZE))
(train, len), (validation,
               validation_len) = ImageDatasetUtil.devide_train_validation(
                   cats_vs_dogs, total_len, 0.9)

train = train.map(ImageDatasetUtil.image_reguralization(),
                  num_parallel_calls=tf.data.experimental.AUTOTUNE
                  )  # .map(ImageDatasetUtil.resize(64,64))
validation_r = validation.map(ImageDatasetUtil.image_reguralization(),
                              num_parallel_calls=tf.data.experimental.AUTOTUNE
                              )  # .map(ImageDatasetUtil.resize(64,64))
model = SSIMAutoEncoderModel.get_model(input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3))

optimizer = OptimizerBuilder.get_optimizer("rmsprop")
callback = CallbackBuilder.get_callbacks()
示例#2
0
if __name__ == '__main__':


    context = Context.init_context(
        TRAINING_BASE_DIR="tmp",
        TRAINING_NAME="food101"
    )

    tftk.ENABLE_MIXED_PRECISION()
    BATCH_SIZE = 64
    
    CLASS_NUM = 101
    IMAGE_SIZE = 224
    CHANNELS = 3
    EPOCHS = 100
    SHUFFLE_SIZE = 1000

    train, train_len = Food101.get_train_dataset()
    validation, validation_len = Food101.get_validation_dataset()

    train = train.map(ImageDatasetUtil.map_max_square_crop_and_resize(IMAGE_SIZE,IMAGE_SIZE),num_parallel_calls=tf.data.experimental.AUTOTUNE).map(ImageAugument.randaugment_map(1,2))
    train = train.map(ImageDatasetUtil.image_reguralization(),num_parallel_calls=tf.data.experimental.AUTOTUNE).map(ImageDatasetUtil.one_hot(CLASS_NUM),num_parallel_calls=tf.data.experimental.AUTOTUNE).apply(ImageAugument.mixup_apply(200,0.1))
    validation = validation.map(ImageDatasetUtil.map_max_square_crop_and_resize(IMAGE_SIZE,IMAGE_SIZE),num_parallel_calls=tf.data.experimental.AUTOTUNE).map(ImageDatasetUtil.image_reguralization(),num_parallel_calls=tf.data.experimental.AUTOTUNE).map(ImageDatasetUtil.one_hot(CLASS_NUM),num_parallel_calls=tf.data.experimental.AUTOTUNE)

    optimizer = OptimizerBuilder.get_optimizer(name="rmsprop", lr=0.05)
    model = KerasResNet50V2.get_model(input_shape=(IMAGE_SIZE,IMAGE_SIZE,CHANNELS),classes=CLASS_NUM) # resnest=True,resnet_c=True,resnet_d=True,mish=True)
    callbacks = CallbackBuilder.get_callbacks(tensorboard=False, consine_annealing=False, reduce_lr_on_plateau=True,reduce_patience=5,reduce_factor=0.25,early_stopping_patience=8)
    ImageTrain.train_image_classification(train_data=train,train_size=train_len,batch_size=BATCH_SIZE,validation_data=validation,validation_size=validation_len,shuffle_size=SHUFFLE_SIZE,model=model,callbacks=callbacks,optimizer=optimizer,loss="categorical_crossentropy",max_epoch=EPOCHS)