コード例 #1
0
def train(trial: Trial):
    context = Optuna.get_optuna_conext('minist_optuna', trial)
    print("New trial ", trial.number, "++++++++++++++++++++++++++++", context)
    ENABLE_SUSPEND_RESUME_TRAINING()

    print(context)
    Optuna.suggest_float(name='lr', low=1e-6, high=1e-2, log=True)
    train, train_len = Mnist.get_train_dataset()
    validation, validation_len = Mnist.get_test_dataset()

    train = train.map(ImageDatasetUtil.image_reguralization()).map(
        ImageDatasetUtil.one_hot(CLASS_NUM))
    validation = validation.map(ImageDatasetUtil.image_reguralization()).map(
        ImageDatasetUtil.one_hot(CLASS_NUM))
    optimizer = OptimizerBuilder.get_optimizer(name="rmsprop",
                                               lr=Optuna.get_value(
                                                   'lr', default=0.1))
    model = SimpleClassificationModel.get_model(input_shape=(IMAGE_SIZE,
                                                             IMAGE_SIZE, 1),
                                                classes=CLASS_NUM)
    callbacks = CallbackBuilder.get_callbacks(tensorboard=True,
                                              reduce_lr_on_plateau=True,
                                              reduce_patience=5,
                                              reduce_factor=0.25,
                                              early_stopping_patience=16)
    history = TrainingExecutor.train_classification(
        train_data=train,
        train_size=train_len,
        batch_size=BATCH_SIZE,
        validation_data=validation,
        validation_size=validation_len,
        shuffle_size=SHUFFLE_SIZE,
        model=model,
        callbacks=callbacks,
        optimizer=optimizer,
        loss="categorical_crossentropy",
        max_epoch=EPOCHS)

    return history.history['val_loss'][-1]
コード例 #2
0
# mvtec_ad, len = MVTecAd.get_train_dataset("bottle")
# mvtec_ad = mvtec_ad.map(ImageDatasetUtil.resize(IMAGE_SIZE,IMAGE_SIZE))
# (train, len),(validation, validation_len) =ImageDatasetUtil.devide_train_validation(mvtec_ad,len,0.9)

cats_vs_dogs, total_len = CatsVsDogs.get_train_dataset()
cats_vs_dogs = cats_vs_dogs.map(
    ImageDatasetUtil.map_max_square_crop_and_resize(IMAGE_SIZE, IMAGE_SIZE))
(train, len), (validation,
               validation_len) = ImageDatasetUtil.devide_train_validation(
                   cats_vs_dogs, total_len, 0.9)

train = train.map(ImageDatasetUtil.image_reguralization(),
                  num_parallel_calls=tf.data.experimental.AUTOTUNE
                  )  # .map(ImageDatasetUtil.resize(64,64))
validation_r = validation.map(ImageDatasetUtil.image_reguralization(),
                              num_parallel_calls=tf.data.experimental.AUTOTUNE
                              )  # .map(ImageDatasetUtil.resize(64,64))
model = SSIMAutoEncoderModel.get_model(input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3))

optimizer = OptimizerBuilder.get_optimizer("rmsprop")
callback = CallbackBuilder.get_callbacks()

loss = ssim_color_loss
ImageTrain.train_image_autoencoder(train, len, BATCH_SIZE, validation_r,
                                   validation_len, 100, model, callback,
                                   optimizer, loss, EPOCHS, False)

# model.load_weights(Context.get_model_path())
ImageTrain.show_autoencoder_results(model, validation, 15)
ImageTrain.calucurate_reconstruction_error(model, validation, 10)
コード例 #3
0
ファイル: mnist.py プロジェクト: kitfactory/colab
        TRAINING_NAME="20200519141141")  #   .TRAINING_NAME:})
    ENABLE_SUSPEND_RESUME_TRAIN()

    BATCH_SIZE = 500
    CLASS_NUM = 10
    IMAGE_SIZE = 28
    EPOCHS = 20
    SHUFFLE_SIZE = 1000

    train, train_len = Mnist.get_train_dataset()
    validation, validation_len = Mnist.get_test_dataset()
    train = train.map(ImageDatasetUtil.image_reguralization()).map(
        ImageDatasetUtil.one_hot(CLASS_NUM))
    validation = validation.map(ImageDatasetUtil.image_reguralization()).map(
        ImageDatasetUtil.one_hot(CLASS_NUM))
    optimizer = OptimizerBuilder.get_optimizer(name="rmsprop")
    model = SimpleClassificationModel.get_model(input_shape=(IMAGE_SIZE,
                                                             IMAGE_SIZE, 1),
                                                classes=CLASS_NUM)
    callbacks = CallbackBuilder.get_callbacks(tensorboard=False,
                                              reduce_lr_on_plateau=True,
                                              reduce_patience=3,
                                              reduce_factor=0.25,
                                              early_stopping_patience=5)
    ImageTrain.train_image_classification(train_data=train,
                                          train_size=train_len,
                                          batch_size=BATCH_SIZE,
                                          validation_data=validation,
                                          validation_size=validation_len,
                                          shuffle_size=SHUFFLE_SIZE,
                                          model=model,
コード例 #4
0
from tftk.image.model import ResNet50
from tftk.train import TrainingExecutor
from tftk.image.augument import ImageAugument
from tftk.callback import CallbackBuilder
from tftk.optimizer import OptimizerBuilder

BATCH_SIZE = 24
CLASS_NUM = 365
IMAGE_SIZE = 224
SHUFFLE_SIZE = 10000

# トレーニングデータ
train, train_len = Place365Small.get_train_dataset()
train = train.map(ImageDatasetUtil.resize_with_crop_or_pad(IMAGE_SIZE,IMAGE_SIZE))
train = train.map(ImageAugument.randaugment_map(3,10))
train = train.map(ImageDatasetUtil.image_reguralization()).map(ImageDatasetUtil.one_hot(CLASS_NUM))

# バリデーションデータ
validation, validation_len = Place365Small.get_validation_dataset()
validation = validation.map(ImageDatasetUtil.resize_with_crop_or_pad(IMAGE_SIZE,IMAGE_SIZE))
validation = validation.map(ImageDatasetUtil.image_reguralization()).map(ImageDatasetUtil.one_hot(CLASS_NUM))

# Optimizerの準備
optimizer = OptimizerBuilder.get_optimizer(name="sgd",lr=0.01)
# モデルの準備
model = ResNet50.get_model(input_shape=(IMAGE_SIZE,IMAGE_SIZE,3),classes=CLASS_NUM)
# Callbackの準備
callbacks = CallbackBuilder.get_callbacks(tensorboard_log_dir="tmp\\log",save_weights="tmp\\weigths.hdf5", consine_annealing=False)

# トレーニングの実施
TrainingExecutor.train_classification(train_data=train,train_size=train_len,batch_size=BATCH_SIZE,validation_data=validation,validation_size=validation_len,shuffle_size=SHUFFLE_SIZE,model=model,callbacks=callbacks,optimizer=optimizer,loss="categorical_crossentropy",max_epoch=50)
コード例 #5
0
ファイル: test_efficientnet.py プロジェクト: kitfactory/tftk
    train = train.map(
        ImageDatasetUtil.image_reguralization(),
        num_parallel_calls=tf.data.experimental.AUTOTUNE).map(
            ImageDatasetUtil.one_hot(CLASS_NUM),
            num_parallel_calls=tf.data.experimental.AUTOTUNE).apply(
                ImageAugument.mixup_apply(200, 0.1))
    validation = validation.map(
        ImageDatasetUtil.map_max_square_crop_and_resize(
            IMAGE_SIZE, IMAGE_SIZE),
        num_parallel_calls=tf.data.experimental.AUTOTUNE).map(
            ImageDatasetUtil.image_reguralization(),
            num_parallel_calls=tf.data.experimental.AUTOTUNE).map(
                ImageDatasetUtil.one_hot(CLASS_NUM),
                num_parallel_calls=tf.data.experimental.AUTOTUNE)

    optimizer = OptimizerBuilder.get_optimizer()
    model = KerasEfficientNetB2.get_model(
        input_shape=(IMAGE_SIZE, IMAGE_SIZE, CHANNELS),
        classes=CLASS_NUM,
        weights="imagenet"
    )  # resnest=True,resnet_c=True,resnet_d=True,mish=True)
    callbacks = CallbackBuilder.get_callbacks(tensorboard=True,
                                              consine_annealing=False,
                                              reduce_lr_on_plateau=True,
                                              reduce_patience=6,
                                              reduce_factor=0.25,
                                              early_stopping_patience=10)
    ImageTrain.train_image_classification(train_data=train,
                                          train_size=train_len,
                                          batch_size=BATCH_SIZE,
                                          validation_data=validation,