Exemplo n.º 1
0
def train_classifier() -> tf.keras.Model:
    model = get_classification_model()
    model = compile_model(model)

    train, val, test = create_datasets()
    train = configure_dataset_for_performance(train, BATCH_SIZE)
    val = configure_dataset_for_performance(val, BATCH_SIZE)
    test = configure_dataset_for_performance(test, BATCH_SIZE)

    log_dir = "logs/fit/" + datetime.now().strftime("%Y%m%d-%H%M%S")
    callbacks = [
        EarlyStopping(monitor="val_loss",
                      patience=5,
                      restore_best_weights=True),
        TerminateOnNaN(),
        TBCallback(log_dir=log_dir, histogram_freq=1),
        PlotLossesKeras(),
    ]
    fit_kwargs = dict(
        epochs=20,
        shuffle=True,
        callbacks=callbacks,
    )
    history = model.fit(train, validation_data=val, **fit_kwargs)

    plot_history(history.history)

    scores = model.evaluate(test)
    metric_dict = dict(zip(model.metrics_names, scores))
    print("Test metrics:", metric_dict)

    return model
Exemplo n.º 2
0
    def model_fit(self):

        self.__model.fit(self.__x_train, self.__y_train, epochs=self.epochs, 
                                   batch_size= self.batch_size, validation_data=[self.__x_test, self.__y_test],verbose=0, 
                                   callbacks = [EarlyStopping(monitor='val_loss', patience=5, verbose=1),
                                                           ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, verbose=0), 
                                                           TerminateOnNaN()])
Exemplo n.º 3
0
def get_callbacks(model_name='tmp',
                  model_seed=666,
                  patience_LR=50,
                  patience_training=200):

    nan_terminate = TerminateOnNaN()
    ReduceLR = ReduceLROnPlateau(monitor='val_loss',
                                 factor=0.1,
                                 patience=patience_LR,
                                 verbose=1,
                                 mode='auto',
                                 min_delta=0.0001,
                                 cooldown=0,
                                 min_lr=0)
    early_stop = EarlyStopping(monitor='val_loss',
                               min_delta=0,
                               patience=patience_training,
                               verbose=2,
                               mode='auto',
                               baseline=None,
                               restore_best_weights=True)

    csv_logger = CSVLogger(f'models/{model_name}/log_{model_seed}.csv',
                           append=True)

    checkpoint = ModelCheckpoint(
        f'models/{model_name}/{model_name}_{model_seed}',
        monitor='val_loss',
        verbose=1,
        save_best_only=True,
        mode='min',
        save_weights_only=True)

    return [early_stop, checkpoint, csv_logger, nan_terminate, ReduceLR]
Exemplo n.º 4
0
def get_callbacks(save_path: Union[os.PathLike, str],
                  model_name: str = "baseline"):
    return [
        EarlyStopping(
            monitor=MONITOR_METRIC,
            patience=10,
            restore_best_weights=True,
        ),
        CSVLogger(os.path.join(save_path, "training.log"),
                  separator=',',
                  append=False),
        ReduceLROnPlateau(
            monitor=MONITOR_METRIC,
            factor=0.1,
            patience=5,
            verbose=1,
            mode='min',
        ),
        ModelCheckpoint(
            filepath=os.path.join(save_path,
                                  f"{model_name}" + ".{epoch:02d}.hdf5"),
            save_best_only=True,
            monitor=MONITOR_METRIC,
            mode="min",
        ),
        TensorBoard(log_dir=os.path.join(save_path, "logs")),
        TerminateOnNaN()
    ]
Exemplo n.º 5
0
def generate_clf(X_train,
                 y_train,
                 X_test,
                 y_test,
                 theme_base,
                 epochs,
                 batch,
                 seed,
                 dropout):
    clf = pred_models.NewTownClassifier(theme_base=theme_base, seed=seed, dropout=dropout)
    clf.compile(optimizer='adam',
                loss=losses.BinaryCrossentropy(),
                metrics=['binary_accuracy'])
    # prepare path and check for previously trained model
    dir_path = pathlib.Path(weights_path / f'predictive_weights/{clf.theme}')
    if not dir_path.exists():
        # prepare callbacks
        callbacks = [
            TensorBoard(
                log_dir=str(
                    logs_path / f'{datetime.now().strftime("%Hh%Mm%Ss")}_{clf.theme}'),
                histogram_freq=1,
                write_graph=True,
                write_images=True,
                update_freq='epoch',
                profile_batch=2,
                embeddings_freq=0,
                embeddings_metadata=None),
            ReduceLROnPlateau(
                monitor='val_loss',
                factor=0.1,
                patience=5,
                verbose=1,
                mode='auto',
                min_delta=0.0001,
                cooldown=0,
                min_lr=0),
            TerminateOnNaN(),
            ModelCheckpoint(
                str(dir_path / 'weights'),
                monitor='val_loss',
                verbose=1,
                save_best_only=True,
                save_weights_only=True,
                mode='auto',
                save_freq='epoch')
        ]
        # train
        clf.fit(x=X_train,
                y=y_train,
                batch_size=batch,
                epochs=epochs,
                verbose=1,
                validation_data=(X_test, y_test),
                shuffle=True,
                callbacks=callbacks)
    else:
        clf.load_weights(str(dir_path / 'weights'))
    #
    return clf
Exemplo n.º 6
0
    def train(self):
        LR = self.config['train']['learning_rate']
        BATCH_SIZE = self.config['train']['batch_size']
        NUM_EPOCHS = self.config['train']['num_epochs']
        METRICS = self.config['train']['metrics']
        _CALLBACKS = self.config['train']['callbacks']

        self.model.compile(loss=self.jaccard_loss,
                           optimizer=optimizers.Adam(LR),
                           metrics=METRICS)
        train_data, val_data = self._create_data()

        CALLBACKS = [] if not _CALLBACKS \
            else [EarlyStopping(patience=10),
                  CSVLogger('log.csv'),
                  TerminateOnNaN(),
                  ReduceLROnPlateau(),
                  ModelCheckpoint('chpts/w.{epoch:02d}.h5')]

        self.model.fit(train_data,
                       validation_data=val_data,
                       epochs=NUM_EPOCHS,
                       steps_per_epoch=BATCH_SIZE,
                       validation_steps=4,
                       callbacks=CALLBACKS)
Exemplo n.º 7
0
    def fit(self,
            x,
            y,
            ox=None,
            oy=None,
            batch_size=None,
            n_shuffle=5,
            epochs=None,
            mi_eps=0.00001):
        ''' '''
        n = x.shape[0]
        batch_size = get_default_parameter(
            'batch_size') if batch_size is None else batch_size
        steps_per_epoch = n // batch_size
        steps_per_epoch = min(max(steps_per_epoch, 100), 1000)

        z_gen = PFSBatchGenerator(x, y, ox=ox, oy=oy, batch_size=batch_size, \
         steps_per_epoch=steps_per_epoch, n_shuffle=n_shuffle)
        epochs = get_default_parameter('epochs') if epochs is None else epochs
        self.model.fit(z_gen, epochs=epochs, batch_size=batch_size, steps_per_epoch=steps_per_epoch, \
         callbacks=[EarlyStopping(patience=3, monitor='loss'), TerminateOnNaN()])
        self.mutual_information = -self.model.evaluate(z_gen)
        w = self.model.w_layer.get_weights()[0]

        if self.mutual_information < mi_eps:
            # Retrain to avoid MI collapse to 0.
            batch_size = 2 * batch_size
            z_gen = PFSBatchGenerator(x, y, ox=ox, oy=oy, batch_size=batch_size, \
             steps_per_epoch=steps_per_epoch, n_shuffle=n_shuffle)
            self.model.fit(z_gen, epochs=epochs, batch_size=batch_size, steps_per_epoch=steps_per_epoch, \
             callbacks=[EarlyStopping(patience=3, monitor='loss'), TerminateOnNaN()])
            self.mutual_information = -self.model.evaluate(z_gen)
            w = self.model.w_layer.get_weights()[0]

        # The feature direction should be normal
        w = w.flatten()
        w = w / np.sqrt(np.dot(w, w))

        # The principal feature should point in the same direction as the target (i.e. <y, w^Tx> = cov(y, w^Tx) > 0)
        corr_sign = np.sign(np.corrcoef(y, np.dot(x, w))[0, 1])
        w = corr_sign * w

        self.feature_direction = w
        self.fx = self.model.fx(tf.constant(z_gen.z)).numpy()
        if self.expand_y:
            self.gy = self.model.gy(tf.constant(z_gen.z)).numpy()
Exemplo n.º 8
0
 def fit(self, z, batch_size=10000, steps_per_epoch=1000, epochs=None):
     ''' '''
     z_gen = CopulaBatchGenerator(z,
                                  batch_size=batch_size,
                                  steps_per_epoch=steps_per_epoch)
     epochs = get_default_parameter('epochs') if epochs is None else epochs
     self.model.fit(z_gen, epochs=epochs, batch_size=batch_size, steps_per_epoch=steps_per_epoch, \
      callbacks=[EarlyStopping(patience=3, monitor='loss'), TerminateOnNaN()])
     self.copula_entropy = self.model.evaluate(z_gen)
Exemplo n.º 9
0
    def objective(self, params):
        """
        objective function to optimize

        :param params: hyperparamters for optimizer
        :return: maximum validation accuracy
        :rtype: float
        """
        # get instances
        dataset = Datasets.get(self.dataset_name)
        model = Models.get(self.model_name, dataset=dataset)
        optimizer = Optimizers.get(self.optimizer_name, params=params)

        # configure hyperdash experiment
        hd_exp = HyperdashExperiment(
            f'{self.dataset_name}',
            api_key_getter=lambda: self.config['hyperdash']['api_key'])
        hd_exp.param('dataset_name', self.dataset_name)
        hd_exp.param('model_name', self.model_name)
        hd_exp.param('optimizer_name', self.optimizer_name)

        for k, v in params.items():
            hd_exp.param(k, v)

        # set callbacks
        callbacks = [
            Hyperdash(['accuracy', 'loss', 'val_accuracy', 'val_loss'],
                      hd_exp),
            EarlyStopping('val_accuracy',
                          patience=10,
                          min_delta=0.01,
                          verbose=1),
            TerminateOnNaN()
        ]

        # get data
        (x_train, y_train), *_ = dataset.get_batch()

        # start learning
        model.compile(loss=self.loss,
                      optimizer=optimizer,
                      metrics=['accuracy'])
        history = model.fit(x_train,
                            y_train,
                            batch_size=self.batch_size,
                            epochs=self.epochs,
                            callbacks=callbacks,
                            validation_split=0.2,
                            verbose=2)

        # stop hyperdash experiment
        hd_exp.end()

        # return maximum validation accuracy
        val_accuracy = np.array(history.history['val_accuracy'])
        return max(val_accuracy) * (-1)
Exemplo n.º 10
0
def main(args):
    w, h = args.model_input_size.split('x')
    input_shape = np.asarray([h, w, 3], dtype=int)
    checkpoint = ModelCheckpoint(str(WEIGHTS_OUTPUT_PATH),
                                 monitor='val_loss',
                                 verbose=1,
                                 save_weights_only=True,
                                 save_best_only=True)
    logging = TensorBoard(log_dir=str(LOGS_DIR))
    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.5,
                                  patience=25,
                                  verbose=1,
                                  cooldown=0,
                                  min_lr=1e-7)
    early_stopping = EarlyStopping(monitor='val_loss',
                                   min_delta=0,
                                   patience=300,
                                   verbose=1)
    terminate_on_nan = TerminateOnNaN()
    callbacks = [
        logging, checkpoint, reduce_lr, early_stopping, terminate_on_nan
    ]

    optimizer = Adam(lr=args.learning_rate)
    model = build_simple_conv_net(input_shape)
    model.compile(loss='binary_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy'])

    initial_epoch = args.init_epoch
    epochs = args.total_epochs - initial_epoch
    assert epochs >= 1

    train_datagen = ImageDataGenerator(rescale=1 / 255)
    test_datagen = ImageDataGenerator(rescale=1 / 255)

    train_generator = train_datagen.flow_from_directory(
        './data/training/',
        target_size=input_shape[:2],
        batch_size=args.batch_size,
        class_mode='binary')

    validation_generator = test_datagen.flow_from_directory(
        './data/validation/',
        target_size=input_shape[:2],
        batch_size=args.batch_size,
        class_mode='binary')

    history = model.fit(train_generator,
                        epochs=epochs,
                        validation_data=validation_generator,
                        callbacks=callbacks)

    model.save(MODEL_OUTPUT_PATH)
Exemplo n.º 11
0
def callbacks(save_path: str, depth: int) -> List:
    """Keras callbacks which include ModelCheckpoint, CSVLogger, TensorBoard, LearningRateScheduler, TerminateOnNaN
    
    Parameters
    ----------
    save_path: str
        local directory to save model weights
    depth : int
        Depth of ResNet model

    Returns
    -------
    List
        List all callbacks
    """
    existsfolder(save_path)

    model_checkpoint = ModelCheckpoint(
        filepath=f"{save_path}/" + f"ResNet{depth}" +
        "-epoch:{epoch:02d}-val_acc:{val_accuracy:.2f}.hdf5",
        save_best_only=True,
        save_weights_only=False,
        verbose=1)

    existsfolder('./assets/logs')

    csv_logger = CSVLogger(filename=f"./assets/logs/logs-{now}.csv",
                           append=True)

    def lr_schedule(epoch):
        if epoch < 10:
            return 0.003
        elif epoch < 50:
            return 0.0003
        else:
            return 0.00003

    lr_reduce = ReduceLROnPlateau(monitor='val_loss',
                                  factor=math.sqrt(0.1),
                                  patience=5,
                                  min_lr=3e-6,
                                  verbose=1)

    lr_scheduler = LearningRateScheduler(lr_schedule, verbose=1)

    early = EarlyStopping(monitor='val_loss', min_delta=1e-4, patience=15)

    terminate_on_nan = TerminateOnNaN()

    callbacks_list = [
        csv_logger, lr_scheduler, lr_reduce, early, model_checkpoint,
        terminate_on_nan
    ]
    return callbacks_list
Exemplo n.º 12
0
def get_callbacks(output_folder, job_config, fold, val_loss, start_time,
                  fold_name, loss, metrics, optimizers):

    log_folder = os.path.join(output_folder, "logs")
    os.makedirs(log_folder, exist_ok=True)

    lr_reducer = SavableReduceLROnPlateau(
        os.path.join(output_folder, "lr_reducer.json"),
        factor=job_config["LR_REDUCTION_FACTOR"],
        cooldown=job_config["PATIENCE"],
        patience=job_config["PATIENCE"],
        min_lr=job_config["MIN_LR"],
        monitor='val_loss',
        mode='min',
        min_delta=0,
        verbose=2)

    model_autosave = SubModelCheckpoint(filepath=os.path.join(
        output_folder, "{epoch:04d}-{val_loss:.6f}.h5"),
                                        submodel=fold_name,
                                        save_best_only=False,
                                        save_weights_only=True)
    model_autosave.best = val_loss

    early_stopping = SavableEarlyStopping(os.path.join(output_folder,
                                                       "early_stopping.json"),
                                          patience=job_config["PATIENCE"] * 3,
                                          verbose=2,
                                          monitor='val_loss',
                                          mode='min')

    tensorboard = TensorBoard(log_dir=os.path.join(log_folder, "tenboard"),
                              profile_batch=0)

    logger = CSVLogger(os.path.join(log_folder, 'train.csv'),
                       separator=',',
                       append=True)

    time_limit = EarlyStoppingByTime(limit_seconds=int(
        os.environ.get("LIMIT_SECONDS", -1)),
                                     start_time=start_time,
                                     verbose=0)

    optimizer_saver = OptimizerSaver(os.path.join(output_folder,
                                                  "optimizer.pkl"),
                                     loss=loss,
                                     metrics=metrics)

    return [
        optimizer_saver, lr_reducer,
        TerminateOnNaN(), early_stopping, logger, tensorboard, model_autosave,
        time_limit
    ]
Exemplo n.º 13
0
    def get_callbacks():
        callbacks = []
        terminate_on_nan = TerminateOnNaN()
        callbacks.append(terminate_on_nan)

        early_stopping = EarlyStopping(monitor='loss',
                                       patience=3,
                                       verbose=1,
                                       mode='auto')
        callbacks.append(early_stopping)

        return early_stopping
Exemplo n.º 14
0
    def __init__(self, output_dir, key):

        # Variables to hold the description of the experiment
        self.config_description = "This is the template config file."

        # System dependent variable
        self._workers = 1
        self._multiprocessing = False
        self._gpus = 1
        self._displayer = MNISTDisplayer()

        # Variables for comet.ml
        self._project_name = "my_project"
        self._workspace = "my_workspace"
        self.output_dir = join(output_dir, "{}_{}_{}".format(self.workspace, self.project_name, key))

        # Network variables
        self.num_classes = 10
        self.img_size = (28, 28)
        self._weights = None
        self._network = MNISTExample(self.num_classes)

        # Training variables
        self._epochs = 5
        self._batch_size = 128
        self._steps_per_epoch = 60000 // 128
        self._optimizer = Adadelta()
        self._loss = categorical_crossentropy
        self._metrics = ['accuracy']

        self._callbacks = []

        self.early_stopping_params = {"monitor":'val_loss', "min_delta":0, "patience":7}
        self.reduce_lr_on_plateau_params = {"monitor":'val_loss', "factor":0.1, "patience":5}

        self.tensorboard = TensorBoard(join(self.output_dir, "checkpoints/logs"))
        self.terminate_on_nan = TerminateOnNaN()
        self.early_stopping = EarlyStopping(**self.early_stopping_params)
        self.reduce_lr_on_plateau = ReduceLROnPlateau(**self.reduce_lr_on_plateau_params)
        self.model_checkpoint = ModelCheckpoint(filepath=join(self.output_dir, "checkpoints", "cp-{epoch:04d}_loss-{loss:.4f}_val_loss-{val_loss:.4f}.ckpt"), verbose=1, save_best_only=True, save_weights_only=True)

        self._callbacks = [self.tensorboard, self.terminate_on_nan, self.early_stopping, self.reduce_lr_on_plateau, self.model_checkpoint]

        # Creating the training and validation generator (you may want to move these to the prepare functions)
        train_data, validation_data = mnist.load_data()
        self._train_generator = MNISTGenerator(train_data, self.batch_size)
        self._validation_generator = MNISTGenerator(validation_data, self.batch_size)
        # Dummy test for example
        self._test_generator = MNISTGenerator(validation_data, self.batch_size)

        self._evaluator = None
        self._displayer = MNISTDisplayer()
Exemplo n.º 15
0
 def _fit_model(self, X, y, **kwargs):
     self.model.fit(
         x=X,
         y=y,
         epochs=self._epochs,
         callbacks=[
             EarlyStopping(patience=self._early_stop,
                           restore_best_weights=True),
             TerminateOnNaN(),
         ],
         validation_split=self._validation_split,
         **kwargs,
     )
Exemplo n.º 16
0
 def train_arcloss(self):
     self.model.fit(self.dataset_train,
                    validation_data=self.dataset_eval,
                    epochs=cfg.train_epochs,
                    callbacks=[
                        self.tensorboard_callback, self.checkpoint_callback,
                        TerminateOnNaN(),
                        EarlyStopping(patience=5, restore_best_weights=True)
                    ])
     self.model.base_model.save_weights('./weights/{}_arcface'.format(
         cfg.backbone))
     self.model.base_model.save('./saved_model/{}_arcface'.format(
         cfg.backbone))
Exemplo n.º 17
0
 def _fit_model(self, X, y, **kwargs):
     self.model.fit(
         self._generate_batches(X, y),
         steps_per_epoch=len(X),
         epochs=self._epochs,
         callbacks=[
             EarlyStopping(
                 patience=self._early_stop,
                 restore_best_weights=True,
                 monitor="accuracy",
             ),
             TerminateOnNaN(),
         ],
         **kwargs,
     )
 def model_fit(self):
     self.__model.fit_generator(self.generator,
                                self.validation_data,
                                epochs=self.epochs,
                                verbose=0,
                                callbacks=[
                                    EarlyStopping(monitor='val_loss',
                                                  patience=5,
                                                  verbose=1),
                                    ReduceLROnPlateau(monitor='val_loss',
                                                      factor=0.1,
                                                      patience=5,
                                                      verbose=0),
                                    TerminateOnNaN()
                                ])
Exemplo n.º 19
0
    def prepare_model(self, model, optimizer, loss):
        """
         Called by `prepare_for_training` function.
         Parameters
        ----------
        model : Seg object.

        optimizer : obj or None
            Instance of a `Keras Optimizer <https://keras.io/optimizers/>`_ to be used for training.
            If ``None`` (default), uses ``Adam`` with the learning rate specified in ``config``.
        loss: `loss_seg`
            computes Cross-Entropy between the class targets and predicted outputs

        Returns
        ----------
        `Keras Callbacks <https://keras.io/callbacks/>`_ to be used for training.

        """

        from tensorflow.keras.optimizers import Optimizer
        isinstance(optimizer, Optimizer) or _raise(ValueError())

        if self.config.train_loss == 'seg':
            loss_standard = eval('loss_seg(relative_weights=%s)' %
                                 self.config.relative_weights)
            _metrics = [loss_standard]
        elif self.config.train_loss == 'denoiseg':
            loss_standard = eval(
                'loss_denoiseg(alpha={}, relative_weights={})'.format(
                    self.config.denoiseg_alpha, self.config.relative_weights))
            seg_metric = eval(
                'denoiseg_seg_loss(weight={}, relative_weights={})'.format(
                    1 - self.config.denoiseg_alpha,
                    self.config.relative_weights))
            denoise_metric = eval('denoiseg_denoise_loss(weight={})'.format(
                self.config.denoiseg_alpha))
            _metrics = [loss_standard, seg_metric, denoise_metric]
        else:
            _raise('Unknown Loss!')

        callbacks = [TerminateOnNaN()]

        # compile model
        model.compile(optimizer=optimizer,
                      loss=loss_standard,
                      metrics=_metrics)

        return callbacks
Exemplo n.º 20
0
def get_callbacks():
    callbacks = []
    callbacks.append(TerminateOnNaN())
    callbacks.append(ResultKeeper("results.bin"))
    callbacks.append(CustomModelCheckpoint('checkpoints'))
    callbacks.append(CustomTensorBoard('tensorboard'))
    callbacks.append(
        CustomEarlyStopping(mini_targets={
            5: 200,
            10: 100
        },
                            monitor="val_loss",
                            patience=3))
    #lr_decay = lambda epoch, lr: lr / np.power(.1, epoch)
    #callbacks.append(LearningRateScheduler(lr_decay, verbose= 1))
    return callbacks
Exemplo n.º 21
0
def setup_callbacks(cfg, callbacks, validation):
    ckpt_cfg = cfg.ModelCheckpoint
    es_cfg = cfg.EarlyStopping
    tb_cfg = cfg.TensorBoard

    if not validation:
        if ckpt_cfg.enabled and ckpt_cfg.monitor.startswith("val_"):
            ckpt_cfg.enabled = False
            ckpt_cfg.monitor = ckpt_cfg.monitor[4:]
        if es_cfg.enabled and es_cfg.monitor.startswith("val_"):
            es_cfg.enabled = False
            es_cfg.monitor = es_cfg.monitor[4:]

    if es_cfg.enabled:
        es_callback = EarlyStopping(
            monitor=es_cfg.monitor,
            patience=es_cfg.patience,
            mode=es_cfg.mode,
            verbose=es_cfg.verbose,
            baseline=es_cfg.baseline,
            restore_best_weights=es_cfg.restore_best_weights)
        callbacks.append(es_callback)

    if ckpt_cfg.enabled:
        if not ckpt_cfg.path.endswith(gg.file_ext()):
            ckpt_cfg.path += gg.file_ext()
        makedirs_from_filepath(ckpt_cfg.path)
        mc_callback = ModelCheckpoint(
            ckpt_cfg.path,
            monitor=ckpt_cfg.monitor,
            save_best_only=ckpt_cfg.save_best_only,
            save_weights_only=ckpt_cfg.save_weights_only,
            verbose=ckpt_cfg.vervose)
        callbacks.append(mc_callback)

    if cfg.TerminateOnNaN.enabled:
        callbacks.append(TerminateOnNaN())

    if tb_cfg.enabled:
        callbacks.append(
            tf.keras.callbacks.TensorBoard(
                tb_cfg.log_dir,
                write_graph=tb_cfg.write_graph,
                update_freq=tb_cfg.update_freq,
                histogram_freq=tb_cfg.histogram_freq,
                write_images=tb_cfg.write_images))
    return cfg, callbacks
Exemplo n.º 22
0
    def prepare_model(self, model, optimizer, loss, metrics=('mse', 'mae')):
        """ TODO """

        from tensorflow.keras.optimizers import Optimizer
        isinstance(optimizer, Optimizer) or _raise(ValueError())

        if loss == 'mse':
            loss_standard = eval('loss_mse()')
        elif loss == 'mae':
            loss_standard = eval('loss_mae()')

        _metrics = [eval('loss_%s()' % m) for m in metrics]
        callbacks = [TerminateOnNaN()]

        # compile model
        model.compile(optimizer=optimizer, loss=loss_standard, metrics=_metrics)

        return callbacks
Exemplo n.º 23
0
    def train_model(self, model_type, load=True, epochs=0):

        data_handler = self.harmonic_data_set.set_type(model_type)

        if model_type == 0:
            m_params = args.h_parser.parse_args()
            model_loc = self.model_path + '/harmonic_model.h5'
        else:
            m_params = args.a_parser.parse_args()
            model_loc = self.model_path + '/aperiodic_model.h5'

        if epochs == 0:
            epochs = m_params.epochs

        if load and not os.path.isfile(model_loc):
            print('Cannot find model :' + model_loc +
                  "\n Creating new model...")
            load = False

        if load:
            model = load_model(model_loc)
            print('Successfully loaded :' + model_loc +
                  "\n Continuing training...")
        else:
            model = self.build_model(data_handler, m_params)

            adam_optimizer = Adam(learning_rate=m_params.learn_rate)
            model.compile(optimizer=adam_optimizer, loss=network_loss)

        if not os.path.isdir(self.model_path):
            os.mkdir(self.model_path)

        lr_schedule = LearningRateScheduler(self.lr_scheduler)
        nan_terminator = TerminateOnNaN()
        checkpoint = ModelCheckpoint(model_loc,
                                     monitor='loss',
                                     verbose=1,
                                     save_best_only=True,
                                     mode='min')

        model.fit(data_handler,
                  epochs=epochs,
                  callbacks=[nan_terminator, lr_schedule, checkpoint])
def train(varBatchSize, varEpochs, model, PSIData, DSIData, PQIData, DQIData):
	#Switch to correct directory
	checkpoint = ModelCheckpoint(filepath='' + 'WEIGHTS-{epoch:02d}.hdf5', monitor='loss', verbose=0, save_best_only=True, save_weights_only=False, mode='auto', period=1)
	#Switch to correct directory
	saver = ModelCheckpoint(filepath='' + 'Latest.hdf5', monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', period=1)
	#Switch to correct directory
	csvlogger = CSVLogger("")
	terminator = TerminateOnNaN()
	
	reduce_lr = ReduceLROnPlateau(monitor='loss', factor=0.1, patience=1, min_lr=0.0001, verbose=1)

	model.fit(x={'Pickup_Spatial_Input': PSIData[0], 'Dropoff_Spatial_Input': DSIData[0], 'Pickup_Quantity_Input' : PQIData[0], 'Dropoff_Quantity_Input' : DQIData[0]},
			  y = {'Pickup_Spatial_Output': PSIData[1], 'Dropoff_Spatial_Output': DSIData[1], 'Pickup_Quantity_Output' : PQIData[1], 'Dropoff_Quantity_Output' : DQIData[1]},
			  batch_size = varBatchSize, 
			  epochs=varEpochs, 
			  verbose=1,
			  validation_data=({'Pickup_Spatial_Input': PSIData[2], 'Dropoff_Spatial_Input': DSIData[2], 'Pickup_Quantity_Input' : PQIData[2], 'Dropoff_Quantity_Input' : DQIData[2]}, 
			  	{'Pickup_Spatial_Output': PSIData[3], 'Dropoff_Spatial_Output': DSIData[3], 'Pickup_Quantity_Output' : PQIData[3], 'Dropoff_Quantity_Output' : DQIData[3]}), 
		      callbacks=[reduce_lr, terminator, csvlogger, checkpoint, saver], shuffle=False)
Exemplo n.º 25
0
def fitModel(schema, n_cls, dgen_opt, datagen, shot, X_train, X_valid, y_train,
             y_valid, aug_flag):
    callbacks = [EarlyStopping(patience=PATIENCE), TerminateOnNaN()]
    #  TensorBoard(log_dir='./logs/tboard', histogram_freq=1, write_grads=True)]
    if 'Original' == datagen:
        if aug_flag:
            datagen = ImageDataGenerator(**dgen_opt)
            datagen.fit(X_train)
            traingen = datagen.flow(X_train,
                                    to_categorical(y_train, n_cls),
                                    batch_size=BATCHSIZE)
            validgen = (X_valid, to_categorical(y_valid, n_cls))
        else:
            history = schema.model.fit(
                X_train,
                to_categorical(y_train, n_cls),
                **FITOPTS,
                callbacks=callbacks,
                validation_data=(X_valid, to_categorical(y_valid, n_cls)))
            history.history.update({'params': history.params})
            history.history.update({'epoch': history.epoch})
            return history.history
    else:
        if aug_flag:
            traingen = datagen[0](X_train, y_train, n_cls, dgen_opt, BATCHSIZE)
            validgen = datagen[1](X_valid, y_valid, n_cls, BATCHSIZE)
        else:
            traingen = datagen(X_train, y_train, n_cls, BATCHSIZE)
            validgen = datagen(X_valid, y_valid, n_cls, BATCHSIZE)

    _STEPS_PER = dict(STEPS_PER)
    if shot == None:
        _STEPS_PER['steps_per_epoch'] = len(y_train) // BATCHSIZE
        _STEPS_PER['validation_steps'] = len(y_valid) // BATCHSIZE
    history = schema.model.fit_generator(traingen,
                                         validation_data=validgen,
                                         callbacks=callbacks,
                                         **FITGENOPTS,
                                         **_STEPS_PER)
    history.history.update({'params': history.params})
    history.history.update({'epoch': history.epoch})
    return history.history
Exemplo n.º 26
0
    def _fit_model(self, X, y, **kwargs):
        self.preprocessor.fit(X)
        batch_size = 64
        validation_size = min(int(0.25 * len(X)), 10 * batch_size)

        Xtrain, Xvalid = X[:-validation_size], X[-validation_size:]
        ytrain, yvalid = y[:-validation_size], y[-validation_size:]

        self.model.fit(
            self.preprocessor.flow(Xtrain, ytrain, batch_size=batch_size),
            steps_per_epoch=len(Xtrain) // batch_size,
            epochs=self._epochs,
            callbacks=[
                EarlyStopping(patience=self._early_stop,
                              restore_best_weights=True),
                TerminateOnNaN(),
            ],
            validation_data=(Xvalid, yvalid),
            **kwargs,
        )
Exemplo n.º 27
0
    def train(self,
              epochs: int,
              monitor_metric='val_acc',
              patience: int = 5,
              steps_per_epoch: Union[int, str] = 'auto',
              validation_steps: Union[int, str] = 'auto',
              log_dir: str = 'logs',
              use_multiprocessing: bool = False):
        pprint(locals())
        log_dir = Path(log_dir).joinpath(
            datetime.now().replace(microsecond=0).isoformat())
        model_path = Path(log_dir).joinpath('checkpoints').joinpath(
            'best-model.h5py')
        model_path = str(model_path)

        if steps_per_epoch == 'auto':
            steps_per_epoch = self.nb_train_samples // self.batch_size
        if validation_steps == 'auto':
            validation_steps = self.nb_valid_samples // self.batch_size

        self.model.compile(optimizer='adam',
                           loss='sparse_categorical_crossentropy',
                           metrics=['acc'])
        history = self.model.fit_generator(
            self.train_dataset.as_numpy_iterator(),
            steps_per_epoch=steps_per_epoch,
            validation_data=self.valid_dataset.as_numpy_iterator(),
            validation_steps=validation_steps,
            epochs=epochs,
            use_multiprocessing=use_multiprocessing,
            workers=os.cpu_count() - 1,
            callbacks=[
                TerminateOnNaN(),
                TensorBoard(log_dir=log_dir),
                ModelCheckpoint(model_path,
                                monitor=monitor_metric,
                                verbose=1,
                                save_best_only=True),
                EarlyStopping(monitor=monitor_metric, patience=patience),
            ])
        return history.history
Exemplo n.º 28
0
def prepare_model(model, optimizer, loss, metrics=('mse','mae'),
                  loss_bg_thresh=0, loss_bg_decay=0.06, Y=None):
    """ TODO """

    from keras.optimizers import Optimizer
    isinstance(optimizer,Optimizer) or _raise(ValueError())


    loss_standard   = eval('loss_%s()'%loss)
    _metrics        = [eval('loss_%s()'%m) for m in metrics]
    callbacks       = [TerminateOnNaN()]

    # checks
    assert 0 <= loss_bg_thresh <= 1
    assert loss_bg_thresh == 0 or Y is not None
    if loss == 'laplace':
        assert K.image_data_format() == "channels_last", "TODO"
        assert model.output.shape.as_list()[-1] >= 2 and model.output.shape.as_list()[-1] % 2 == 0

    # loss
    if loss_bg_thresh == 0:
        _loss = loss_standard
    else:
        freq = np.mean(Y > loss_bg_thresh)
        # print("class frequency:", freq)
        alpha = K.variable(1.0)
        loss_per_pixel = eval('loss_{loss}(mean=False)'.format(loss=loss))
        _loss = loss_thresh_weighted_decay(loss_per_pixel, loss_bg_thresh,
                                           0.5 / (0.1 + (1 - freq)),
                                           0.5 / (0.1 +      freq),
                                           alpha)
        callbacks.append(ParameterDecayCallback(alpha, loss_bg_decay, name='alpha'))
        if not loss in metrics:
            _metrics.append(loss_standard)


    # compile model
    model.compile(optimizer=optimizer, loss=_loss, metrics=_metrics)

    return callbacks
Exemplo n.º 29
0
    def _setup_callbacks(
        message: Text,
        t0: int,
        enable_tensorboard: bool = True,
        tensorboard_logdir: Optional[Text] = None
    ) -> Tuple[List[Callback], Text]:
        """Setups the callbacks to monitor model estimation.

    Args:
      t0: specifies the number of days between the occurrence of the first
        infected case (patient zero) and the first observed case.
      message: optionally pass a prefix string in the filenames of training
        weights (in the format of hdf5 file). We will generate a lot of such
        files in the training process.
      enable_tensorboard: whether or not use tensorboard to monitor training.
      tensorboard_logdir: xxx.

    Returns:
      callbacks : TYPE DESCRIPTION.
      min_loss_filepath: TYPE DESCRIPTION.

    """
        min_loss_filepath = os.path.join(tempfile.tempdir,
                                         message + str(t0) + "min_loss.hdf5")
        callbacks = []
        callbacks.append(
            ModelCheckpoint(min_loss_filepath,
                            monitor="loss",
                            save_best_only=True,
                            save_weights_only=False))
        callbacks.append(TerminateOnNaN())
        if enable_tensorboard:
            if not os.path.isdir(tensorboard_logdir):
                os.makedirs(tensorboard_logdir)
            logdir = os.path.join(
                tensorboard_logdir,
                f"job{t0}_" + datetime.now().strftime("%Y%m%d-%H%M%S"))
            callbacks.append(TensorBoard(log_dir=logdir))
        return callbacks, min_loss_filepath
Exemplo n.º 30
0
def train_model():
    optimizer = Adam(lr=LEARNING_RATE)
    if OPTIMIZER == 'sgd':
        optimizer = SGD(lr=LEARNING_RATE, momentum=MOMENTUM)

    if LOSS_FUNC == 'triplet':
        train_gen, val_gen = prepare_datagen_triplet()
        model = triplet_model(IMAGE_SHAPE, NUM_FEATURES)
        model.compile(optimizer=optimizer, loss=identity_loss)
    else:
        train_gen, val_gen = prepare_datagen_custom_loss()
        model = construct_model(train_gen.num_classes, NUM_FEATURES)
        model.compile(loss='categorical_crossentropy',
                      optimizer=optimizer,
                      metrics=['accuracy'])
    if LOAD_MODEL:
        load_pretrained(model)
    model.summary()

    callbacks = [
        ModelCheckpoint(CHECKPOINT_FOLDER + 'model.hdf5',
                        verbose=1,
                        save_best_only=True),
        CSVLogger(CHECKPOINT_FOLDER + 'log.csv'),
        TerminateOnNaN()
    ]

    if SCHEDULER == 'CosineAnnealing':
        callbacks.append(
            CosineAnnealingScheduler(T_max=EPOCHS,
                                     eta_max=LEARNING_RATE,
                                     eta_min=MIN_LEARNING_RATE,
                                     verbose=1))

    model.fit(train_gen,
              validation_data=val_gen,
              epochs=EPOCHS,
              callbacks=callbacks,
              verbose=1)