예제 #1
0
    def get_optuna_conext(cls, training_name: str, trial: Trial) -> Context:
        """トライアルの開始準備をする。Objective関数の最初に呼び出してください。

        Parameters:
            trial : 開始するトライアルオブジェクト
        
        """
        print("get_context", trial)
        cls.trial = trial
        context = Context.init_context(training_name)
        context[Context.OPTUNA] = True
        context[Context.OPTUNA_TRIAL] = trial
        return context
예제 #2
0
def pretext_dataset(dataset:tf.data.Dataset, start_label:int)->tf.data.Dataset:
    filtered = dataset.filter(lambda data:data['label'] >= start_label)

    def supervised_transform(data):
        image = data['image']
        image = tf.cast(image, tf.float32)
        image = image / 255.0


    def random_transform(image):
        pass


if __name__ == '__main__':

    context = Context.init_context(TRAINING_NAME='')
    # ENABLE_SUSPEND_RESUME_TRAINING()

    BATCH_SIZE = 500
    CLASS_NUM = 10
    IMAGE_SIZE = 28
    EPOCHS = 2
    SHUFFLE_SIZE = 1000






    # if IS_SUSPEND_RESUME_TRAIN() == True and IS_ON_COLABOLATORY_WITH_GOOGLE_DRIVE()== True:
    
예제 #3
0
파일: mnist.py 프로젝트: kitfactory/colab
from tftk.image.dataset import Mnist
from tftk.image.dataset import ImageDatasetUtil
from tftk.image.model.classification import SimpleClassificationModel
from tftk.callback import CallbackBuilder
from tftk.optimizer import OptimizerBuilder
from tftk import Context

from tftk.train.image import ImageTrain

from tftk import ENABLE_SUSPEND_RESUME_TRAIN, IS_SUSPEND_RESUME_TRAIN, ResumeExecutor

if __name__ == '__main__':

    context = Context.init_context(
        TRAINING_NAME="20200519141141")  #   .TRAINING_NAME:})
    ENABLE_SUSPEND_RESUME_TRAIN()

    BATCH_SIZE = 500
    CLASS_NUM = 10
    IMAGE_SIZE = 28
    EPOCHS = 20
    SHUFFLE_SIZE = 1000

    train, train_len = Mnist.get_train_dataset()
    validation, validation_len = Mnist.get_test_dataset()
    train = train.map(ImageDatasetUtil.image_reguralization()).map(
        ImageDatasetUtil.one_hot(CLASS_NUM))
    validation = validation.map(ImageDatasetUtil.image_reguralization()).map(
        ImageDatasetUtil.one_hot(CLASS_NUM))
    optimizer = OptimizerBuilder.get_optimizer(name="rmsprop")
    model = SimpleClassificationModel.get_model(input_shape=(IMAGE_SIZE,
예제 #4
0
from tftk.image.dataset import Mnist
from tftk.image.dataset import ImageDatasetUtil
from tftk.image.model.classification import SimpleClassificationModel
from tftk.callback import CallbackBuilder
from tftk.optimizer import OptimizerBuilder
from tftk import Context

from tftk.train.image import ImageTrain

from tftk import ENABLE_SUSPEND_RESUME_TRAINING, ResumeExecutor

if __name__ == '__main__':

    context = Context.init_context(TRAINING_NAME='mnist_y')
    # ENABLE_SUSPEND_RESUME_TRAINING()

    BATCH_SIZE = 500
    CLASS_NUM = 10
    IMAGE_SIZE = 28
    EPOCHS = 2
    SHUFFLE_SIZE = 1000

    # if IS_SUSPEND_RESUME_TRAIN() == True and IS_ON_COLABOLATORY_WITH_GOOGLE_DRIVE()== True:

    train, train_len = Mnist.get_train_dataset()
    validation, validation_len = Mnist.get_test_dataset()

    train = train.map(ImageDatasetUtil.image_reguralization()).map(
        ImageDatasetUtil.one_hot(CLASS_NUM))
    validation = validation.map(ImageDatasetUtil.image_reguralization()).map(
        ImageDatasetUtil.one_hot(CLASS_NUM))
예제 #5
0
파일: callback.py 프로젝트: kitfactory/tftk
    def get_callbacks(cls,
                      tensorboard: bool = True,
                      consine_annealing=False,
                      reduce_lr_on_plateau=True,
                      early_stopping=True,
                      monitor='val_loss',
                      **kwargs):
        """よく利用するコールバックを簡単に取得できるようにします。

        デフォルトではTensorBoard,ReduceLROnPlateau(),EarlyStopping(val_loss非更新、10エポックで停止)が自動的に選ばれます。
        また、tftk.ENABLE_SUSPEND_RESUME_TRAINING()が有効な場合、中断/再開を可能にするSuspendCallbackが使用されます。
        
        Parameters:
            tensorboard : TensorBoardのログをBaseDir/TrainingName以下に保存する場合はTrueを指定する。未指定時 True
            cosine_annealing : CosineAnnealingを行い学習率をコントロールする場合はTrue、未指定時 False
            reduce_lr_on_plateau : ReduceLROnPlateauで停滞時に学習率を下げる場合はTrue 、未指定時 True
            ealy_stopping : EarlyStoppingで停滞時に学習を終了する場合、True。 未指定時 True.
            csv_logger : CSVLoggerを使用し、学習の記録を行う
            monitor {str} -- [description] (default: {"val_acc"})

        Keyword Arguments:
            profile_batch{str} -- プロファイルを行う際の開始バッチ、終了バッチを指定します。Noneの場合実行しません。
            annealing_epoch : コサイン・アニーリング全体のエポック数、指定なし 100エポック
            init_lr : コサイン・アニーリングする際の最初の学習率、未指定時 0.01
            min_lr : 最小の学習率、コサイン・アニーリング時 = 1e-6, ReduceOnPlateau時1e-6
            patience : ReduceOnPlateau使用時にpatienceエポック数、モニター値の更新がない場合、factor分学習率を下げる。
            early_stopping_patience : EalyStopping利用時に、このエポック数、monitor値の更新がなければ、学習を終了する。

        Returns:
            List[tf.keras.callbacks.Callback] -- 学習に使用するコールバックのList

        Example:
            from tftk.callbacks import HandyCallback
            callbacks = HandyCallback.get_callbacks(early_stopping_patience=15)
        """

        context = Context.get_instance()
        base = context[Context.TRAINING_BASE_DIR]
        name = context[Context.TRAINING_NAME]

        traing_dir = Context.get_training_path()

        if tf.io.gfile.exists(traing_dir) == False:
            tf.io.gfile.makedirs(traing_dir)

        callbacks = []

        if tensorboard is True:
            # print("Callback-TensorBoard")
            tensorboard_log_dir = traing_dir + os.path.sep + "log"
            profile_batch = kwargs.get("profile_batch", None)
            if profile_batch != None:
                callbacks.append(
                    tf.keras.callbacks.TensorBoard(log_dir=tensorboard_log_dir,
                                                   profile_batch=profile_batch,
                                                   histogram_freq=1))
            else:
                callbacks.append(
                    tf.keras.callbacks.TensorBoard(
                        log_dir=tensorboard_log_dir))

        # if save_weights == True:
        #     print("Callback-ModelCheckPoint")
        #     save_path = base + os.path.sep  + "model.hdf5"
        #     callbacks.append(tf.keras.callbacks.ModelCheckpoint(filepath=save_path,monitor="val_acc",save_best_only=True,save_weights_only=True))

        if consine_annealing == True:
            print("Callback-CosineAnnealing")
            annealing_epoch = kwargs.get("annealing_epoch", 100)
            init_lr = kwargs.get("init_lr", 0.01)
            min_lr = kwargs.get("min_lr", 1e-6)
            cosine_annealer = CosineAnnealingScheduler(annealing_epoch,
                                                       eta_max=init_lr,
                                                       eta_min=min_lr)
            callbacks.append(cosine_annealer)
            reduce_lr_on_plateau = False

        if reduce_lr_on_plateau == True:
            print("Callback-ReduceOnPlateau")
            patience = kwargs.get("patience", 5)
            factor = kwargs.get("factor", 0.25)
            min_lr = kwargs.get("min_lr", 1e-6)
            callbacks.append(
                tf.keras.callbacks.ReduceLROnPlateau(patience=patience,
                                                     factor=factor,
                                                     verbose=1,
                                                     min_lr=min_lr))

        if early_stopping == True & context.get(Context.OPTUNA,
                                                False) == False:
            early_stopping_patience = kwargs.get("early_stopping_patience", 8)
            callbacks.append(
                tf.keras.callbacks.EarlyStopping(
                    monitor=monitor,
                    patience=early_stopping_patience,
                    verbose=1))

        if IS_SUSPEND_RESUME_TRAINING() == True:
            print("Suspend Resume Callback")
            callbacks.append(SuspendCallback(monitor=monitor))

        if context.get(Context.OPTUNA, False) == True:
            print("Using Optuna")
            # callbacks.append()
            trial = context.get(Context.OPTUNA_TRIAL)
            callbacks.append(TFKerasPruningCallback(trial, monitor=monitor))

        return callbacks
예제 #6
0
from tftk.image.dataset import Mnist
from tftk.image.dataset import ImageDatasetUtil
from tftk.image.model.classification import SimpleClassificationModel
from tftk.callback import CallbackBuilder
from tftk.optimizer import OptimizerBuilder
from tftk import Context

from tftk.train.image import ImageTrain

from tftk import ENABLE_SUSPEND_RESUME_TRAIN, IS_SUSPEND_RESUME_TRAIN, ResumeExecutor

if __name__ == '__main__':

    context = Context.init_context(
        TRAINING_BASE_DIR="tmp",
        TRAINING_NAME="resume_test"
    )

    ENABLE_SUSPEND_RESUME_TRAIN()

    BATCH_SIZE = 500
    CLASS_NUM = 10
    IMAGE_SIZE = 28
    EPOCHS = 3
    SHUFFLE_SIZE = 1000
    BASEDIR = "./tmp"
    TRAIN_NAME = "resume_test"

    # if IS_SUSPEND_RESUME_TRAIN() == True and IS_ON_COLABOLATORY_WITH_GOOGLE_DRIVE()== True:
    
예제 #7
0
파일: food101.py 프로젝트: kitfactory/colab
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

import tftk
from tftk.image.dataset import Food1o1
from tftk.image.dataset import ImageDatasetUtil
from tftk.image.model import KerasResNet50V2
from tftk.train.image import ImageTrain
from tftk.image.augument import ImageAugument
from tftk.callback import CallbackBuilder
from tftk.optimizer import OptimizerBuilder
from tftk import Context

if __name__ == '__main__':

    context = Context.init_context(TRAINING_BASE_DIR="tmp",
                                   TRAINING_NAME="food101")

    tftk.USE_MIXED_PRECISION()
    BATCH_SIZE = 64

    CLASS_NUM = 101
    IMAGE_SIZE = 224
    CHANNELS = 3
    EPOCHS = 100
    SHUFFLE_SIZE = 1000

    train, train_len = Food1o1.get_train_dataset()
    validation, validation_len = Food1o1.get_validation_dataset()

    train = train.map(ImageDatasetUtil.resize_with_crop_or_pad(
        IMAGE_SIZE, IMAGE_SIZE),
예제 #8
0
from tftk.image.model import SimpleClassificationModel
from tftk.train.image import ImageTrain
from tftk.callback import CallbackBuilder
from tftk.optimizer import OptimizerBuilder

if __name__ == '__main__':

    CLASS_NUM = 10
    IMAGE_SIZE = 150
    IMAGE_CHANNELS = 3
    EPOCHS = 100
    BATCH_SIZE = 100

    tftk.ENABLE_MIXED_PRECISION()

    context = Context.init_context(TRAINING_NAME='DogsVsCats')
    train, train_len = ImageLabelFolderDataset.get_train_dataset(
        name="dogs-vs-cats", manual_dir="tmp")
    validation, validation_len = ImageLabelFolderDataset.get_validation_dataset(
        name="dogs-vs-cats", manual_dir="tmp")

    train = train.map(ImageDatasetUtil.map_max_square_crop_and_resize(
        IMAGE_SIZE, IMAGE_SIZE),
                      num_parallel_calls=tf.data.experimental.AUTOTUNE)
    train = train.map(ImageAugument.randaugment_map(2, 4),
                      num_parallel_calls=tf.data.experimental.AUTOTUNE)
    train = train.map(ImageDatasetUtil.image_reguralization(),
                      num_parallel_calls=tf.data.experimental.AUTOTUNE)
    train = train.map(ImageDatasetUtil.one_hot(CLASS_NUM),
                      num_parallel_calls=tf.data.experimental.AUTOTUNE)