def train(trial: Trial): context = Optuna.get_optuna_conext('minist_optuna', trial) print("New trial ", trial.number, "++++++++++++++++++++++++++++", context) ENABLE_SUSPEND_RESUME_TRAINING() print(context) Optuna.suggest_float(name='lr', low=1e-6, high=1e-2, log=True) train, train_len = Mnist.get_train_dataset() validation, validation_len = Mnist.get_test_dataset() train = train.map(ImageDatasetUtil.image_reguralization()).map( ImageDatasetUtil.one_hot(CLASS_NUM)) validation = validation.map(ImageDatasetUtil.image_reguralization()).map( ImageDatasetUtil.one_hot(CLASS_NUM)) optimizer = OptimizerBuilder.get_optimizer(name="rmsprop", lr=Optuna.get_value( 'lr', default=0.1)) model = SimpleClassificationModel.get_model(input_shape=(IMAGE_SIZE, IMAGE_SIZE, 1), classes=CLASS_NUM) callbacks = CallbackBuilder.get_callbacks(tensorboard=True, reduce_lr_on_plateau=True, reduce_patience=5, reduce_factor=0.25, early_stopping_patience=16) history = TrainingExecutor.train_classification( train_data=train, train_size=train_len, batch_size=BATCH_SIZE, validation_data=validation, validation_size=validation_len, shuffle_size=SHUFFLE_SIZE, model=model, callbacks=callbacks, optimizer=optimizer, loss="categorical_crossentropy", max_epoch=EPOCHS) return history.history['val_loss'][-1]
tftk.Context.init_context(training_name='ssim_catsdog_deep_autoencoder') tftk.ENABLE_MIXED_PRECISION() tftk.ENABLE_SUSPEND_RESUME_TRAINING() IMAGE_SIZE = 128 EPOCHS = 80 BATCH_SIZE = 50 # mvtec_ad, len = MVTecAd.get_train_dataset("bottle") # mvtec_ad = mvtec_ad.map(ImageDatasetUtil.resize(IMAGE_SIZE,IMAGE_SIZE)) # (train, len),(validation, validation_len) =ImageDatasetUtil.devide_train_validation(mvtec_ad,len,0.9) cats_vs_dogs, total_len = CatsVsDogs.get_train_dataset() cats_vs_dogs = cats_vs_dogs.map( ImageDatasetUtil.map_max_square_crop_and_resize(IMAGE_SIZE, IMAGE_SIZE)) (train, len), (validation, validation_len) = ImageDatasetUtil.devide_train_validation( cats_vs_dogs, total_len, 0.9) train = train.map(ImageDatasetUtil.image_reguralization(), num_parallel_calls=tf.data.experimental.AUTOTUNE ) # .map(ImageDatasetUtil.resize(64,64)) validation_r = validation.map(ImageDatasetUtil.image_reguralization(), num_parallel_calls=tf.data.experimental.AUTOTUNE ) # .map(ImageDatasetUtil.resize(64,64)) model = SSIMAutoEncoderModel.get_model(input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3)) optimizer = OptimizerBuilder.get_optimizer("rmsprop") callback = CallbackBuilder.get_callbacks()
if __name__ == '__main__': context = Context.init_context( TRAINING_NAME="20200519141141") # .TRAINING_NAME:}) ENABLE_SUSPEND_RESUME_TRAIN() BATCH_SIZE = 500 CLASS_NUM = 10 IMAGE_SIZE = 28 EPOCHS = 20 SHUFFLE_SIZE = 1000 train, train_len = Mnist.get_train_dataset() validation, validation_len = Mnist.get_test_dataset() train = train.map(ImageDatasetUtil.image_reguralization()).map( ImageDatasetUtil.one_hot(CLASS_NUM)) validation = validation.map(ImageDatasetUtil.image_reguralization()).map( ImageDatasetUtil.one_hot(CLASS_NUM)) optimizer = OptimizerBuilder.get_optimizer(name="rmsprop") model = SimpleClassificationModel.get_model(input_shape=(IMAGE_SIZE, IMAGE_SIZE, 1), classes=CLASS_NUM) callbacks = CallbackBuilder.get_callbacks(tensorboard=False, reduce_lr_on_plateau=True, reduce_patience=3, reduce_factor=0.25, early_stopping_patience=5) ImageTrain.train_image_classification(train_data=train, train_size=train_len, batch_size=BATCH_SIZE,
context = Context.init_context(TRAINING_BASE_DIR="tmp", TRAINING_NAME="food101") tftk.USE_MIXED_PRECISION() BATCH_SIZE = 64 CLASS_NUM = 101 IMAGE_SIZE = 224 CHANNELS = 3 EPOCHS = 100 SHUFFLE_SIZE = 1000 train, train_len = Food1o1.get_train_dataset() validation, validation_len = Food1o1.get_validation_dataset() train = train.map(ImageDatasetUtil.resize_with_crop_or_pad( IMAGE_SIZE, IMAGE_SIZE), num_parallel_calls=tf.data.experimental.AUTOTUNE).map( ImageAugument.randaugment_map(1, 2)) train = train.map( ImageDatasetUtil.image_reguralization(), num_parallel_calls=tf.data.experimental.AUTOTUNE).map( ImageDatasetUtil.one_hot(CLASS_NUM), num_parallel_calls=tf.data.experimental.AUTOTUNE).apply( ImageAugument.mixup_apply(200, 0.1)) validation = validation.map( ImageDatasetUtil.resize_with_crop_or_pad(IMAGE_SIZE, IMAGE_SIZE), num_parallel_calls=tf.data.experimental.AUTOTUNE).map( ImageDatasetUtil.image_reguralization(), num_parallel_calls=tf.data.experimental.AUTOTUNE).map( ImageDatasetUtil.one_hot(CLASS_NUM), num_parallel_calls=tf.data.experimental.AUTOTUNE)
tftk.USE_MIXED_PRECISION() BATCH_SIZE = 10 # BATCH_SIZE = 48 CLASS_NUM = 2 IMAGE_SIZE = 150 EPOCHS = 100 train, train_len = ImageLabelFolderDataset.get_train_dataset(name="dogs-vs-cats", manual_dir="tmp") validation, validation_len = ImageLabelFolderDataset.get_validation_dataset(name="dogs-vs-cats", manual_dir="tmp") # dataset,dataset_len = CatsVsDogs.get_train_dataset() # dataset = dataset.map(ImageDatasetUtil.resize_with_crop_or_pad(IMAGE_SIZE,IMAGE_SIZE)) # (train, train_len), (validation, validation_len) = ImageDatasetUtil.devide_train_validation(dataset,dataset_len,0.90) train = train.map(ImageDatasetUtil.resize_with_crop_or_pad(IMAGE_SIZE,IMAGE_SIZE),num_parallel_calls=tf.data.experimental.AUTOTUNE).map(ImageAugument.randaugment_map(1,2)) train = train.map(ImageDatasetUtil.image_reguralization(),num_parallel_calls=tf.data.experimental.AUTOTUNE).map(ImageDatasetUtil.one_hot(CLASS_NUM),num_parallel_calls=tf.data.experimental.AUTOTUNE).apply(ImageAugument.mixup_apply(200,0.1)) validation = validation.map(ImageDatasetUtil.resize_with_crop_or_pad(IMAGE_SIZE,IMAGE_SIZE),num_parallel_calls=tf.data.experimental.AUTOTUNE).map(ImageDatasetUtil.image_reguralization(),num_parallel_calls=tf.data.experimental.AUTOTUNE).map(ImageDatasetUtil.one_hot(CLASS_NUM),num_parallel_calls=tf.data.experimental.AUTOTUNE) optimizer = OptimizerBuilder.get_optimizer(name="rmsprop") model = ResNetD18.get_model(input_shape=(IMAGE_SIZE,IMAGE_SIZE,3),classes=CLASS_NUM,resnest=True,resnet_c=True,resnet_d=True,mish=True) # model = SimpleClassificationModel.get_model(input_shape=(IMAGE_SIZE,IMAGE_SIZE,3),classes=CLASS_NUM) callbacks = CallbackBuilder.get_callbacks(base_dir = "tmp" , tensorboard=True, save_weights=True, consine_annealing=False, reduce_lr_on_plateau=True,reduce_patience=5,reduce_factor=0.25,early_stopping_patience=8) Trainer.train_classification(train_data=train,train_size=train_len,batch_size=BATCH_SIZE,validation_data=validation,validation_size=validation_len,shuffle_size=1000,model=model,callbacks=callbacks,optimizer=optimizer,loss="binary_crossentropy",max_epoch=EPOCHS) """ train, train_len = RockPaperScissors.get_train_dataset() validation, validation_len = Place365Small.get_validation_dataset() train = train.map(ImageDatasetUtil.resize_with_crop_or_pad(IMAGE_SIZE,IMAGE_SIZE)) validation = validation.map(ImageDatasetUtil.resize_with_crop_or_pad(IMAGE_SIZE,IMAGE_SIZE)) dataset, len = MVTecAd.get_train_dataset(type="bottle")
def train_image_classification( cls, train_data:tf.data.Dataset, train_size:int, batch_size:int, validation_data:tf.data.Dataset, validation_size:int, shuffle_size:int, model:tf.keras.Model, callbacks:List[tf.keras.callbacks.Callback], optimizer:tf.keras.optimizers.Optimizer, loss:tf.keras.losses.Loss, max_epoch:int = 5, resume:bool = True): """画像分類の学習を実施します。 Parameters: train_data{tf.data.Dataset}: 学習に使用するトレーニングデータ train_size{int}: トレーニングデータのデータ数 batch_size{int} : 学習時のバッチサイズ shuffle_size : 学習時のデータシャッフルサイズ model{tf.keras.} : 学習モデル Example: import tftk tftk.Context.init_context( TRAINING_NAME = "example_traninig1" TRAINING_BASE_DIR = "./tmp" ) tftk.ENABLE_SUSPEND_RESUME_TRAINING() tftk.USE_MIXED_PRECISION() """ # dataset = dataset.shuffle(1024).batch(32).prefetch(tf.data.experimental.AUTOTUNE) train_data = train_data.map(ImageDatasetUtil.dict_to_classification_tuple(),num_parallel_calls=tf.data.experimental.AUTOTUNE).repeat() if shuffle_size != 0: train_data = train_data.shuffle(shuffle_size) train_data = train_data.batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE) validation_data = validation_data.map(ImageDatasetUtil.dict_to_classification_tuple(),num_parallel_calls=tf.data.experimental.AUTOTUNE) validation_data = validation_data.repeat().batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE) model.compile(optimizer=optimizer, loss=loss, metrics=["acc"]) model.summary() initial_epoch = 0 exe = ResumeExecutor.get_instance() if IS_ON_COLABOLATORY_WITH_GOOGLE_DRIVE(): Colaboratory.copy_resume_data_from_google_drive() else: print("google drive is not found.") if exe.is_resumable_training()==True: print("This is resume training!!") exe.resume_model(model) resume_val = exe.resume_values() initial_epoch, _, _,_ = resume_val initial_epoch = initial_epoch + 1 print("resuming epoch", initial_epoch, "max_epoch", max_epoch) else: if exe.is_train_ended()==True: print("Training is completed.") exit() else: # print("Not resume training") pass steps_per_epoch = train_size//batch_size validation_steps = validation_size//batch_size history = model.fit( train_data, callbacks=callbacks, validation_data=validation_data, steps_per_epoch=steps_per_epoch, validation_steps=validation_steps, epochs=max_epoch, initial_epoch=initial_epoch) tf.keras.backend.clear_session() del optimizer,callbacks,model,train_data,validation_data return history
from tftk.image.dataset import Mnist from tftk.image.dataset import ImageDatasetUtil from tftk.image.augument import ImageAugument from tftk.image.dataset import ImageLabelFolderDataset if __name__ == '__main__': BATCH_SIZE = 100 CLASS_NUM = 10 IMAGE_SIZE = 28 EPOCHS = 2 SHUFFLE_SIZE = 1000 train, train_len = ImageLabelFolderDataset.get_train_dataset(name="dogs-vs-cats", manual_dir="tmp") train = train.map(ImageDatasetUtil.resize_with_crop_or_pad(224,224)) for d in train.take(1): image = d["image"] autocontrast_image = ImageAugument.autocontrast(image) array = autocontrast_image.numpy() # equalize_image = ImageAugument.equalize(image) # array = equalize_image.numpy() # invert_image = ImageAugument.invert(image) # array = invert_image.numpy() # rotate_image = ImageAugument.rotate(image,45) # array = rotate_image.numpy()
CLASS_NUM = 10 IMAGE_SIZE = 150 IMAGE_CHANNELS = 3 EPOCHS = 100 BATCH_SIZE = 100 tftk.ENABLE_MIXED_PRECISION() context = Context.init_context(TRAINING_NAME='DogsVsCats') train, train_len = ImageLabelFolderDataset.get_train_dataset( name="dogs-vs-cats", manual_dir="tmp") validation, validation_len = ImageLabelFolderDataset.get_validation_dataset( name="dogs-vs-cats", manual_dir="tmp") train = train.map(ImageDatasetUtil.map_max_square_crop_and_resize( IMAGE_SIZE, IMAGE_SIZE), num_parallel_calls=tf.data.experimental.AUTOTUNE) train = train.map(ImageAugument.randaugment_map(2, 4), num_parallel_calls=tf.data.experimental.AUTOTUNE) train = train.map(ImageDatasetUtil.image_reguralization(), num_parallel_calls=tf.data.experimental.AUTOTUNE) train = train.map(ImageDatasetUtil.one_hot(CLASS_NUM), num_parallel_calls=tf.data.experimental.AUTOTUNE) validation = validation.map( ImageDatasetUtil.map_max_square_crop_and_resize( IMAGE_SIZE, IMAGE_SIZE), num_parallel_calls=tf.data.experimental.AUTOTUNE) validation = validation.map( ImageDatasetUtil.image_reguralization(), num_parallel_calls=tf.data.experimental.AUTOTUNE)
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' import tftk from tftk.image.dataset import Mnist from tftk.image.dataset import ImageDatasetUtil from tftk.image.augument import ImageAugument from tftk.image.dataset import ImageLabelFolderDataset if __name__ == '__main__': BATCH_SIZE = 100 CLASS_NUM = 10 IMAGE_SIZE = 28 EPOCHS = 2 SHUFFLE_SIZE = 1000 train, train_len = ImageLabelFolderDataset.get_train_dataset(name="dogs-vs-cats", manual_dir="tmp") train = train.map(ImageDatasetUtil.resize_with_crop_or_pad(224,224)).map(ImageDatasetUtil.one_hot(2)) train = train.map(ImageDatasetUtil.image_reguralization()).apply(ImageAugument.mixup_apply(mixup_size=100, alpha=0.2)) for d in train.take(1): image = d["image"] * 255 image = tf.cast(image, tf.uint8) print(image) image = image.numpy() y = d["label"] print("y",d["label"].numpy()) im = Image.fromarray(image) im.show()