Example #1
0
    def fit_model(self, X_train, X_val, y_train, y_val, class_weights, is_frozen_layers: bool) -> None:
        """
        Fit the CNN model and plot the training evolution.
        Originally written as a group for the common pipeline. Later amended by Adam Jaamour.
        :param X_train: training input
        :param X_val: training outputs
        :param y_train: validation inputs
        :param y_val: validation outputs
        :param class_weights: dict containing class weights
        :param is_frozen_layers: boolean specifying whether layers are frozen or not
        :return: None
        """
        if is_frozen_layers:
            max_epochs = config.max_epoch_frozen
            patience = int(config.max_epoch_frozen / 10)
        else:
            max_epochs = config.max_epoch_unfrozen
            patience = int(config.max_epoch_unfrozen / 10)

        if config.dataset == "mini-MIAS":
            self.history = self._model.fit(
                x=X_train,
                y=y_train,
                # class_weight=class_weights,
                batch_size=config.batch_size,
                steps_per_epoch=len(X_train) // config.batch_size,
                validation_data=(X_val, y_val),
                validation_steps=len(X_val) // config.batch_size,
                epochs=max_epochs,
                callbacks=[
                    EarlyStopping(monitor='val_loss', patience=patience, restore_best_weights=True),
                    ReduceLROnPlateau(patience=int(patience / 2))
                ]
            )
        elif config.dataset == "mini-MIAS-binary":
            self.history = self._model.fit(
                x=X_train,
                y=y_train,
                batch_size=config.batch_size,
                steps_per_epoch=len(X_train) // config.batch_size,
                validation_data=(X_val, y_val),
                validation_steps=len(X_val) // config.batch_size,
                epochs=max_epochs,
                callbacks=[
                    EarlyStopping(monitor='val_loss', patience=patience, restore_best_weights=True),
                    ReduceLROnPlateau(patience=int(patience / 2))
                ]
            )
        elif config.dataset == "CBIS-DDSM":
            self.history = self._model.fit(
                x=X_train,
                validation_data=X_val,
                class_weight=class_weights,
                epochs=max_epochs,
                callbacks=[
                    EarlyStopping(monitor='val_loss', patience=patience, restore_best_weights=True),
                    ReduceLROnPlateau(patience=int(patience / 2))
                ]
            )
def get_callbacks(file_name=''):
    if file_name != '':
        path_checkpoint = 'checkpoint_keras_' + file_name
        log_dir = 'logs_' + file_name

    callback_checkpoint = ModelCheckpoint(filepath=path_checkpoint,
                                          monitor='val_loss',
                                          verbose=1,
                                          save_weights_only=False,
                                          save_best_only=True,
                                          mode='auto',
                                          period=1)
    callback_early_stopping = EarlyStopping(monitor='val_loss',
                                            patience=5,
                                            verbose=1)
    callback_tensorboard = TensorBoard(log_dir=log_dir,
                                       histogram_freq=0,
                                       write_graph=False)
    callback_reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                           factor=0.1,
                                           min_lr=1e-4,
                                           patience=3,
                                           verbose=1)

    callbacks = [callback_checkpoint, callback_tensorboard, callback_reduce_lr]

    return callbacks
Example #3
0
    def fit(self, model: GlowModel, dp: DataProcessor):
        tc = self.config.training
        model.dump_model_internal()
        self.compile(model)
        steps_per_epoch = tc.steps_per_epoch or dp.image_count // tc.batch_size

        callbacks = [
            SamplingCallback(self.config, model),
            TensorBoard(
                str(self.config.resource.tensorboard_dir),
                batch_size=tc.batch_size,
                write_graph=True,
                # histogram_freq=5, write_grads=True
            ),
            ReduceLROnPlateau(monitor='loss',
                              factor=tc.lr_decay,
                              patience=tc.lr_patience,
                              verbose=1,
                              min_lr=tc.lr_patience),
        ]
        try:
            model.encoder.fit_generator(self.generator_for_fit(dp),
                                        epochs=tc.epochs,
                                        steps_per_epoch=steps_per_epoch,
                                        callbacks=callbacks,
                                        verbose=1)
        except InvalidArgumentError as e:
            model.dump_model_internal()
            raise e
Example #4
0
    def train(self, x, y, batch_size, epochs, verbose):
        def exp_decay(epoch):
            initial_lrate = self.learning_rate
            k = 0.1
            lrate = initial_lrate * np.exp(-k * epoch)
            return lrate

        callbacks = [
            EarlyStopping(patience=20,
                          monitor='val_loss',
                          restore_best_weights=True),
            LearningRateScheduler(exp_decay, verbose=0),
            ReduceLROnPlateau(monitor='val_loss',
                              factor=0.1,
                              patience=10,
                              verbose=0),
            TensorBoard(log_dir=self.model_dir),
            TerminateOnNaN()
        ]
        history = self.ed_model.fit(x=x,
                                    y=y,
                                    batch_size=batch_size,
                                    epochs=epochs,
                                    callbacks=callbacks,
                                    shuffle=False,
                                    validation_split=0.2,
                                    verbose=verbose)
        return history.history
Example #5
0
    def execute(self):
        result_file = os.path.join(self.result_dir, "train_result_{}.txt".format(self.task_index))
        config = tf.ConfigProto(allow_soft_placement=True)
        config.gpu_options.allow_growth = True
        with tf.Session(self.server.target, config=config) as sess:
            K.set_session(sess)
            if self.go_on:
                self.restore_model()
            tb_callback = TensorBoard(log_dir=self.log_dir, write_grads=True, write_images=True)
            ckpt_callback = ModelCheckpoint(self.checkpoint_path,
                                            monitor='loss',
                                            save_weights_only=True)
            reduce_lr = ReduceLROnPlateau(monitor='loss', factor=0.1, patience=3, verbose=1)
            early_stopping = EarlyStopping(monitor='loss', min_delta=0, patience=10, verbose=1)

            # add callbacks to save model checkpoint and tensorboard events (on worker:0 only)
            callbacks = [tb_callback, ckpt_callback] if self.task_index == 0 else []

            callbacks += [reduce_lr, early_stopping]
            # try:
            his = self.model.fit_generator(self.generate_rdd_data(),
                                           steps_per_epoch=self.steps_per_epoch,
                                           # validation_data=self.val_generate_data(val_data),
                                           # validation_steps=max(1, self.val_num // self.batch_size),
                                           epochs=self.epochs + self.initial_epoch,
                                           initial_epoch=self.initial_epoch,
                                           workers=0,
                                           callbacks=callbacks)
            logger.debug("{}-{}".format(self.task_index, his.history))
            ModelDir.write_result(result_file, self.get_results(his), self.go_on)
            # except Exception as e:
            #     logger.debug(str(e))
            self.save_model()
            self.tf_feed.terminate()
Example #6
0
    def train(self):
        state_model = StateModel(self.config)

        if self.config.train.new_model:
            state_model.build()
        else:
            state_model.load_model()
            self.config.train.vae.lr *= 0.01

        state_model.compile()

        self.memory = FileMemory(self.config)
        self.memory.forget_past()
        all_episode_list = list(self.memory.episodes())
        generator = self.episode_generator(all_episode_list)
        vae = self.config.train.vae
        callbacks = [
            ReduceLROnPlateau(factor=vae.lr_decay_factor,
                              patience=vae.lr_patience,
                              min_lr=vae.lr_min,
                              monitor='loss',
                              verbose=1),
            EarlyStopping(monitor="loss",
                          patience=vae.early_stopping_patience),
        ]
        state_model.model.training_model.fit_generator(
            generator,
            steps_per_epoch=vae.steps_per_epoch,
            epochs=vae.epochs,
            callbacks=callbacks)
        state_model.save_model()
Example #7
0
def gen_model(param_object):
    x_train, y_train = load_data(flag=0)
    _G = generator(param_object.gen_type,
                   param_object.filter_kernal[param_object.filter_kernal_no],
                   param_object.input_shape, 'Adam', mean_squared_error,
                   tf.zeros(shape=param_object.inflow_shape,
                            dtype=tf.float32)).generator_model()
    opti_trick = ReduceLROnPlateau(monitor='val_loss',
                                   factor=0.2,
                                   patience=100,
                                   verbose=1)
    ckpt = ModelCheckpoint(ckpt_path,
                           monitor='val_loss',
                           verbose=1,
                           save_best_only=True,
                           save_weights_only=True,
                           period=1)
    _G.fit(x_train,
           y_train,
           epochs=param_object.epochs,
           batch_size=param_object.batch_size,
           callbacks=[opti_trick, ckpt],
           validation_split=0.2,
           shuffle=True)
    return _
def try_train_model_nn(model_name, fold):
    with utils.timeit_context('load data'):
        X, y, video_ids = load_train_data(model_name, fold)

    print(X.shape, y.shape)
    model = model_nn(input_size=X.shape[1])
    model.compile(optimizer=Adam(lr=1e-3),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    model.summary()

    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.25,
                                                        random_state=42)
    batch_size = 64

    model.fit(
        X_train,
        y_train,
        batch_size=batch_size,
        epochs=128,
        verbose=1,
        validation_data=[X_test, y_test],
        callbacks=[ReduceLROnPlateau(factor=0.2, verbose=True, min_lr=1e-6)])

    prediction = model.predict(X_test)

    print(y_test.shape, prediction.shape)
    print(metrics.pri_matrix_loss(y_test, prediction))
    print(metrics.pri_matrix_loss(y_test, np.clip(prediction, 0.001, 0.999)))
    delta = prediction - y_test
    print(np.min(delta), np.max(delta), np.mean(np.abs(delta)),
          np.sum(np.abs(delta) > 0.5))
Example #9
0
    def set_callbacks(self) -> list:
        """

        Returns
        -------

        """
        monitor = 'val_loss'

        es = EarlyStopping(monitor=monitor,
                           patience=3,
                           verbose=1,
                           restore_best_weights=True)

        rop = ReduceLROnPlateau(monitor=monitor, patience=5, verbose=1)

        checkpoint = ModelCheckpoint(filepath=self.default_path,
                                     monitor=monitor,
                                     verbose=1,
                                     save_best_only=True,
                                     save_weights_only=False,
                                     load_weights_on_restart=False)

        logs_dir = os.path.join(DirConf.LOG_DIR, 'experiment')
        if not os.path.exists(logs_dir):
            os.mkdir(logs_dir)

        tb = TensorBoard(log_dir=logs_dir, write_graph=True)

        return [es, rop, checkpoint, tb]
Example #10
0
def main(net, epochs, batch_size):
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    set_session(sess)

    (x_train, y_train), (x_test, y_test) = cifar10.load_data()
    x_train, x_test = x_train.astype('float32') / 255, x_test.astype(
        'float32') / 255
    mean = np.mean(x_train, axis=0)
    x_train -= mean
    x_test -= mean

    datagen = ImageDataGenerator(
        width_shift_range=0.1,
        height_shift_range=0.1,
        horizontal_flip=True,
    )
    datagen.fit(x_train)

    model = make_resnet(net)
    model.summary()

    model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),
                        validation_data=(x_test, y_test),
                        epochs=epochs,
                        callbacks=[
                            ReduceLROnPlateau(verbose=1, patience=20),
                            TensorBoard(observer.dir)
                        ])
Example #11
0
def main():
    feature, label = load_data(train_data_path)

    # Split the dataset into train and validation set
    # Keep 10% for the validation and 90% for the training
    # Stratify is argument to keep training set evenly balanced over the labels
    feature_train, feature_test_set, label_train, label_val = train_test_split(
        feature, label, test_size=0.1, stratify=label)
    ## DNN
    dnn_model = get_dnn_model()
    history = dnn_model.fit(feature_train,
                            label_train,
                            batch_size=100,
                            epochs=8,
                            validation_data=(feature_test_set, label_val),
                            verbose=1)

    evaluate_model(dnn_model, feature_train, feature_test_set, label_train,
                   label_val, history)

    ## CNN
    feature = feature.values.reshape(-1, 28, 28, 1)

    feature_train, feature_test_set, label_train, label_val = train_test_split(
        feature, label, test_size=0.1, stratify=label)

    cnn_model = get_cnn_model()
    history = cnn_model.fit(feature_train,
                            label_train,
                            batch_size=100,
                            epochs=8,
                            validation_data=(feature_test_set, label_val),
                            verbose=1)

    evaluate_model(cnn_model, feature_train, feature_test_set, label_train,
                   label_val, history)

    # Hyperparameters optimization (tuning)

    hpt_model = get_hyperparam_model()
    learning_rate_reduction = ReduceLROnPlateau(monitor='val_accuracy',
                                                patience=3,
                                                verbose=1,
                                                factor=0.5,
                                                min_lr=0.00001)
    feature_train, feature_test_set, label_train, label_val = train_test_split(
        feature, label, test_size=0.1, stratify=label)
    data_generator = get_images(feature_train)

    history = hpt_model.fit(data_generator.flow(feature_train,
                                                label_train,
                                                batch_size=100),
                            epochs=3,
                            validation_data=(feature_test_set, label_val),
                            verbose=2,
                            callbacks=[learning_rate_reduction])

    evaluate_model(hpt_model, feature_train, feature_test_set, label_train,
                   label_val, history)
Example #12
0
    def train(self, train_gen, val_gen,
              saved_model_path, epochs=100, steps=100, train_split=0.8,
              verbose=1, min_delta=.0005, patience=5, use_early_stop=True):
        """
        train_gen: generator that yields an array of images an array of

        """

        # checkpoint to save model after each epoch
        save_best = ModelCheckpoint(saved_model_path,
                                    monitor='val_loss',
                                    verbose=verbose,
                                    save_best_only=True,
                                    mode='auto')

        # stop training if the validation error stops improving.
        early_stop = EarlyStopping(monitor='val_loss',
                                   min_delta=min_delta,
                                   patience=patience,
                                   verbose=verbose,
                                   mode='auto')

        # reduce the learning rate if validation error not longer improved
        reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                      factor=0.2,
                                      patience=patience-2,
                                      min_lr=0.001,
                                      verbose=verbose,
                                      mode='auto')

        tensorboard = TensorBoard(log_dir='/home/wangbin/mycar/logs',
                                  histogram_freq=0,
                                  batch_size=32,
                                  write_graph=True,
                                  write_grads=False,
                                  write_images=True)
#                                  embeddings_freq=0,
#                                  embeddings_layer_names=None,
#                                  embeddings_metadata=None,
#                                  embeddings_data=None,
#                                  update_freq='epoch')

        callbacks_list = [save_best]

        if use_early_stop:
            callbacks_list.append(early_stop)

        callbacks_list.append(reduce_lr)
        callbacks_list.append(tensorboard)

        hist = self.model.fit_generator(
            train_gen,
            steps_per_epoch=steps,
            epochs=epochs,
            verbose=verbose,
            validation_data=val_gen,
            callbacks=callbacks_list,
            validation_steps=steps * (1.0 - train_split) / train_split)
        return hist
Example #13
0
def run_train_test(test_name, gray, augmentation, pic_shape, if_transfer,
                   epoch, batch_size, model_type, optimizer, data):
    data_string = f"pic_shape{pic_shape}_aug={augmentation}_gray={gray}"

    info_str = f"{test_name}/optimizer={optimizer}_data={data[1]}_epoch={epoch}_batch={batch_size}_model={model_type}_data_string={data_string}"
    logdir = "../../logs/" + info_str
    tensorboard_callback = callbacks.TensorBoard(log_dir=logdir)

    train_gen, test_gen, class_weights = data[0]

    train_gen = DataGenerator(train_gen.x_data, train_gen.y_data,
                              train_gen.class_number)
    test_gen = DataGenerator(test_gen.x_data, test_gen.y_data,
                             test_gen.class_number)

    train_gen.set_parameters(batch_size, augmentation)
    test_gen.set_parameters(batch_size, False)
    print("class weights ", class_weights)

    if if_transfer:
        model = transfer_model(model_type, pic_shape, train_gen.class_number)
    else:
        model = my_model(model_type, pic_shape)
        model.add(layers.Dense(train_gen.class_number, activation="softmax"))

    if data[1] == "two":
        metrics = MetricsTwo(test_gen, class_weights)
    else:
        metrics = MetricsAll(test_gen, class_weights)

    model.compile(optimizer=optimizer,
                  loss=losses.CategoricalCrossentropy(from_logits=True),
                  metrics=["accuracy"])

    learning_rate_reduction = ReduceLROnPlateau(monitor='loss',
                                                patience=3,
                                                verbose=1,
                                                factor=0.5,
                                                min_lr=0.00001)
    if augmentation:
        class_weights = None

    model.fit(
        train_gen,
        epochs=epoch,
        batch_size=batch_size,
        validation_data=test_gen,
        class_weight=class_weights,
        callbacks=[tensorboard_callback, metrics, learning_rate_reduction])

    pred = model.predict(test_gen.x_data)
    pred_classes = np.argmax(pred, axis=1)
    true_classes = test_gen.y_data
    plot_confusion_matrix(true_classes, pred_classes, data[1],
                          "../../diagrams/conf/" + info_str + ".png")
    add_tb_info(metrics, epoch, data[1], logdir)
    if data[1] == "two":
        plot_roc(test_gen.y_data, pred[:, 1],
                 "../../diagrams/roc/" + info_str + ".png")
Example #14
0
def get_callbacks(weight_path: str, history_path: str) -> List[Callback]:
    """
    Retorna a lista callbacks do modelo
    Args:
    -----
        weight_path: Caminho para salvar os checkpoints
    Returns:
    --------
        (list of keras.callbacks): lista dos callbacks
    """
    # Salva os pesos dos modelo para serem carregados
    # caso o monitor não diminua
    check_params = {
        "monitor": "val_loss",
        "verbose": 1,
        "mode": "min",
        "save_best_only": True,
        "save_weights_only": True,
    }
    checkpoint = ModelCheckpoint(weight_path, **check_params)

    # Reduz o valor de LR caso o monitor nao diminuia
    reduce_params = {
        "factor": 0.5,
        "patience": 3,
        "verbose": 1,
        "mode": "max",
        "min_delta": 1e-3,
        "cooldown": 2,
        "min_lr": 1e-8,
    }
    reduce_lr = ReduceLROnPlateau(monitor="val_f1", **reduce_params)

    # Parada do treino caso o monitor nao diminua
    stop_params = {"mode": "max", "restore_best_weights": True, "patience": 40}
    early_stop = EarlyStopping(monitor="val_f1", **stop_params)
    # Termina se um peso for NaN (not a number)
    terminate = TerminateOnNaN()

    # Habilita a visualizacao no TersorBoard
    # tensorboard = TensorBoard(log_dir="./logs")

    # Armazena os dados gerados no treinamento em um CSV
    if history_path is not None:
        csv_logger = CSVLogger(history_path, append=True)
        # Vetor a ser passado na função fit
        callbacks = [checkpoint, early_stop, reduce_lr, terminate, csv_logger]
    else:
        # Vetor a ser passado na função fit
        # callbacks = [
        #     checkpoint,
        #     early_stop,
        #     reduce_lr,
        #     terminate
        # ]
        callbacks = [checkpoint, reduce_lr, terminate]
    # callbacks = [checkpoint, early_stop, reduce_lr, terminate]
    return callbacks
Example #15
0
def learning_rate_setting():
    if MODEL_WIDTH > 800:
        reduce_lr = ReduceLROnPlateau(monitor=MONITOR,
                                      patience=5,
                                      factor=0.5,
                                      verbose=1,
                                      mode='auto',
                                      cooldown=5,
                                      min_lr=0.00001)
    else:
        reduce_lr = ReduceLROnPlateau(monitor=MONITOR,
                                      patience=5,
                                      factor=0.7,
                                      verbose=1,
                                      mode='auto',
                                      cooldown=10,
                                      min_lr=0.00001)
    return reduce_lr
def train_vUnet(dataset, lr, epochs, trial_num, model_path=None):
    # get the data
    data_getter = DataPreparer(im_path, mask_path, batch_size=batch_size)
    train_generator, val_generator = data_getter.main()
    num_train = data_getter.num_train
    num_val = data_getter.num_val

    # get model
    if not model_path:
        model_flag = 'vUnet'
        model = new_Unet(model_flag)
        for i in range(18):  # first 18 layers of pre-trained model
            model.layers[i].trainable = False

        model.summary()
        print('dataset: ', dataset)
        print('num of training data: ', num_train)
        print('num of validation data: ', num_val)

        # compile
        model.compile(
            optimizer=Adam(lr),
            loss=['binary_crossentropy', 'binary_crossentropy'],  # mask, edge
            metrics=[dice_coef],
            loss_weights=[0.998, 0.002])
    else:
        model = load_model(model_path, custom_objects={'dice_coef': dice_coef})

    # callbacks
    reduce_lr = ReduceLROnPlateau(monitor='loss',
                                  factor=0.5,
                                  patience=5,
                                  verbose=1,
                                  min_lr=1e-6)

    model_name = "results/model/human_muscle/vUnet_{0}_{1}.hdf5".format(
        dataset, trial_num)
    model_checkpoint = ModelCheckpoint(model_name,
                                       monitor='val_loss',
                                       save_best_only=True,
                                       verbose=1)
    tensorboard = TensorBoard(log_dir='./log/' + dataset + '/' + trial_num,
                              write_graph=False,
                              write_grads=False,
                              histogram_freq=15,
                              batch_size=batch_size)
    # train
    hist = model.fit_generator(
        train_generator,
        epochs=epochs,
        steps_per_epoch=num_train // batch_size,
        verbose=2,
        callbacks=[reduce_lr, tensorboard, model_checkpoint],
        validation_data=val_generator,
        validation_steps=num_val // batch_size)
    return
def full_training(model, train_data, val_data, epochs1, epochs2):
    if config.pretrained == "imagenet":
        for i in range(140):
            model.layers[i].trainable = False
    else:
        for i in range(len(model.layers)):
            model.layers[i].trainable = True

    model.compile(optimizer=Adam(lr=0.0001),
                  loss=dual_loss_weighted,
                  metrics=[BinaryAccuracy()])

    hist_1 = model.fit(x=train_data,
                       validation_data=val_data,
                       epochs=epochs1,
                       callbacks=[
                           EarlyStopping(monitor='val_loss',
                                         patience=10,
                                         restore_best_weights=True),
                           ReduceLROnPlateau(patience=4)
                       ])

    # Train a second time with a smaller learning rate and with all layers unfrozen
    # (train over fewer epochs to prevent over-fitting).

    for i in range(len(model.layers)):
        model.layers[i].trainable = True

    model.compile(optimizer=Adam(lr=0.00001),
                  loss=dual_loss_weighted,
                  metrics=[BinaryAccuracy()])

    hist_2 = model.fit(x=train_data,
                       validation_data=val_data,
                       epochs=epochs2,
                       callbacks=[
                           EarlyStopping(monitor='val_loss',
                                         patience=10,
                                         restore_best_weights=True),
                           ReduceLROnPlateau(patience=6)
                       ])

    return model
Example #18
0
    def train(self, save_dir, result_dir, checkpoint_dir, log_dir):
        result_file = os.path.join(result_dir, "train_result.txt")
        train_set = self.train_set
        config = tf.compat.v1.ConfigProto(allow_soft_placement=True)
        config.gpu_options.allow_growth = True
        with tf.compat.v1.Session(config=config) as sess:
            # K.set_session(sess)
            if self.go_on:
                self.restore_model(checkpoint_dir)
            tb_callback = TensorBoard(log_dir=log_dir, write_images=True)
            checkpoint_file = os.path.join(checkpoint_dir,
                                           self.name + '_checkpoint_{epoch}')
            ckpt_callback = ModelCheckpoint(
                checkpoint_file,
                # monitor='loss',
                save_weights_only=True)
            reduce_lr = ReduceLROnPlateau(monitor='loss',
                                          factor=0.1,
                                          patience=3,
                                          verbose=1)
            early_stopping = EarlyStopping(monitor='loss',
                                           min_delta=0,
                                           patience=10,
                                           verbose=1)

            # add callbacks to save model checkpoint and tensorboard events (on worker:0 only)
            callbacks = [tb_callback, ckpt_callback]
            # callbacks = []

            self.model.compile(optimizer=Adam(lr=1e-4),
                               loss={
                                   'yolo_loss': lambda y_true, y_pred: y_pred
                               })
            # print('Unfreeze all of the layers.')
            callbacks.extend([reduce_lr, early_stopping])
            steps_per_epoch = len(train_set) // self.batch_size
            # note that more GPU memory is required after unfreezing the body
            # try:
            his = self.model.fit_generator(
                self.train_generate_data(train_set),
                steps_per_epoch=steps_per_epoch,
                # validation_data=self.val_generate_data(val_data),
                # validation_steps=max(1, self.val_num // self.batch_size),
                epochs=self.initial_epoch + 1,
                initial_epoch=self.initial_epoch,
                workers=1,
                callbacks=callbacks)
            logger.debug(str(his.history))
            # except Exception as e:
            #     logger.debug(str(e))
            # logger.debug('end')
            save_model_path = os.path.join(save_dir, 'model.h5')
            self.model.save(save_model_path)
            ModelDir.write_result(result_file, self.get_results(his))
def get_callbacks():
    earlystop = EarlyStopping(monitor='val_loss', min_delta=0.001, patience=10)
    lr_reduction = ReduceLROnPlateau(monitor='val_loss',
                                     patience=3,
                                     verbose=1,
                                     factor=0.5,
                                     min_lr=1e-5)
    csv_log = CSVLogger(base_path + 'log.csv')
    # tensorboard = TensorBoard(log_dir='log(logs)')
    callbacks = [earlystop, lr_reduction, csv_log]
    return callbacks
Example #20
0
def train_temporal(batch_size=6, epoch=1000):
    raw_path = '/media/zxl/other/pjh/datasetsss/CASME_II_TIM/'
    flowimg_path = '/media/zxl/other/pjh/datasetsss/CASME_II_TIM_opticflow_image/'
    strainimg_path = '/media/zxl/other/pjh/datasetsss/CASME_II_TIM_opticalstrain_image/'
    best_model_file = "./models/VGG_16_5_channels_temporal.h5"

    train_data_list_path = './train_list.txt'
    f1 = open(train_data_list_path, 'r')
    train_data_list = f1.readlines()
    steps_per_epoch = int(ceil(len(train_data_list) * 1.0 / batch_size))

    test_data_list_path = './test_list.txt'
    f2 = open(test_data_list_path, 'r')
    test_data_list = f2.readlines()
    validation_steps = int(ceil(len(test_data_list) * 1.0 / batch_size))

    vgg_model = load_model('./models/VGG_16_5_channels.h5')
    model = Model(inputs=vgg_model.input, outputs=vgg_model.layers[35].output)
    model.predict_on_batch(np.zeros(
        (10, 224, 224, 5)))  #https://www.jianshu.com/p/c84ae0527a3f
    best_model = ModelCheckpoint(best_model_file,
                                 monitor='val_acc',
                                 verbose=1,
                                 save_best_only=True)
    reduce_lr = ReduceLROnPlateau(monitor='val_acc',
                                  factor=0.5,
                                  patience=3,
                                  verbose=1,
                                  min_lr=0.00001)
    temporal_model = temporal_module(data_dim=4096)
    temporal_model.compile(loss='categorical_crossentropy',
                           optimizer=Adam(lr=0.00001, decay=0.000001),
                           metrics=["accuracy"])
    temporal_model.fit_generator(
        generator_batch_feature(model,
                                raw_path,
                                flowimg_path,
                                strainimg_path,
                                train_data_list,
                                batch_size=batch_size),
        steps_per_epoch=steps_per_epoch,
        epochs=epoch,
        verbose=1,
        validation_data=generator_batch_feature(model,
                                                raw_path,
                                                flowimg_path,
                                                strainimg_path,
                                                test_data_list,
                                                batch_size=batch_size),
        validation_steps=validation_steps,
        class_weight=None,
        callbacks=[best_model, reduce_lr])
    f1.close()
    f2.close()
Example #21
0
def get_callbacks(model_filename, patience_stopping=5, patience_lr=10):
    early_stopping = EarlyStopping(monitor="val_loss", patience=patience_stopping, verbose=1)
    mcp_save = ModelCheckpoint(model_filename, save_best_only=True, monitor="val_loss", mode="min")
    reduce_lr_loss = ReduceLROnPlateau(
        monitor="loss",
        factor=0.1,
        patience=patience_lr,
        verbose=1,
        epsilon=1e-4,
        mode="min",
    )
    return [early_stopping, mcp_save, reduce_lr_loss]
Example #22
0
def trainModel(data, model_path):
    X = data['X']
    y = data['y']
    (X_train, X_val, y_train, y_val) = train_test_split(X, y, test_size = 0.3, random_state=SEED)

    print('Building model using C-RNN architecture...')

    n_features = numFeatures
    input_shape = (None, n_features)
    model_input = Input(input_shape, name='input')
    layer = model_input
    for i in range(N_LAYERS):
        layer = Conv1D(
                filters=CONV_FILTER_COUNT,
                kernel_size=FILTER_LENGTH,
                name='convolution_' + str(i + 1)
                )(layer)
        layer = BatchNormalization(momentum=0.9)(layer)
        layer = Activation('relu')(layer)
        layer = MaxPooling1D(2)(layer)
        # layer = Dropout(0.5)(layer)

    layer = TimeDistributed(Dense(len(GENRES)))(layer)
    time_distributed_merge_layer = Lambda(
                function=lambda x: K.mean(x, axis=1),
                output_shape=lambda shape: (shape[0],) + shape[2:],
                name='output_merged'
                )
    layer = time_distributed_merge_layer(layer)
    layer = Activation('softmax', name='output_realtime')(layer)
    model_output = layer
    model = Model(model_input, model_output)
    opt = Adam(lr=0.001)
    model.compile(
            loss='categorical_crossentropy',
            optimizer=opt,
            metrics=['accuracy']
            )

    print('Starting training...')
    model.fit(
            X_train, y_train, batch_size=BATCH_SIZE, epochs=EPOCHS,
            validation_data=(X_val, y_val), verbose=1, callbacks=[
            ModelCheckpoint(
                    model_path, save_best_only=True, monitor='val_acc', verbose=1
                    ),
            ReduceLROnPlateau(
                    monitor='val_acc', factor=0.5, patience=10, min_delta=0.01,
                    verbose=1
                    )
            ]
            )
    return model
Example #23
0
def train_spatial(batch_size=32, epoch=1000):

    raw_path = '/media/zxl/other/pjh/datasetsss/CASME_II_TIM/'
    flowimg_path = '/media/zxl/other/pjh/datasetsss/CASME_II_TIM_opticflow_image/'
    strainimg_path = '/media/zxl/other/pjh/datasetsss/CASME_II_TIM_opticalstrain_image/'
    best_model_file = "./models/VGG_16_5_channels.h5"

    train_data_list_path = './train_list.txt'
    f1 = open(train_data_list_path, 'r')
    train_data_list = f1.readlines()
    steps_per_epoch = int(ceil(len(train_data_list) * 1.0 / batch_size))

    test_data_list_path = './test_list.txt'
    f2 = open(test_data_list_path, 'r')
    test_data_list = f2.readlines()
    validation_steps = int(ceil(len(test_data_list) * 1.0 / batch_size))
    # X = generator_batch_5c(raw_path, flowimg_path, strainimg_path, data_list)
    vgg_model = VGG_16_5_channels()
    vgg_model.compile(loss='categorical_crossentropy',
                      optimizer=Adam(lr=0.00001, decay=0.000001),
                      metrics=["accuracy"])
    best_model = ModelCheckpoint(best_model_file,
                                 monitor='val_acc',
                                 verbose=1,
                                 save_best_only=True)
    reduce_lr = ReduceLROnPlateau(monitor='val_acc',
                                  factor=0.5,
                                  patience=3,
                                  verbose=1,
                                  min_lr=0.00001)
    vgg_model.fit_generator(generator_batch_5c(raw_path,
                                               flowimg_path,
                                               strainimg_path,
                                               train_data_list,
                                               batch_size=batch_size),
                            steps_per_epoch=steps_per_epoch,
                            epochs=epoch,
                            verbose=1,
                            validation_data=generator_batch_5c(
                                raw_path,
                                flowimg_path,
                                strainimg_path,
                                test_data_list,
                                batch_size=batch_size),
                            validation_steps=validation_steps,
                            class_weight=None,
                            callbacks=[best_model, reduce_lr],
                            max_queue_size=80,
                            workers=8,
                            use_multiprocessing=False)
    f1.close()
    f2.close()
Example #24
0
def get_callbacks() -> List[Callback]:
    """
        Retorna a lista callbacks do modelo
        Args:
        -----
            weight_path: Caminho para salvar os checkpoints
        Returns:
        --------
            (list of keras.callbacks): lista dos callbacks
    """
    # Salva os pesos dos modelo para serem carregados
    # caso o monitor não diminua
    check_params = {
        'monitor': 'val_loss',
        'verbose': 1,
        'mode': 'min',
        'save_best_only': True,
        'save_weights_only': True
    }
    checkpoint = ModelCheckpoint('./checkpoints/', **check_params)

    # Reduz o valor de LR caso o monitor nao diminuia
    reduce_params = {
        'factor': 0.5,
        'patience': 3,
        'verbose': 1,
        'mode': 'min',
        'min_delta': 1e-3,
        'cooldown': 2,
        'min_lr': 1e-8
    }
    reduce_lr = ReduceLROnPlateau(monitor='val_loss', **reduce_params)

    # Parada do treino caso o monitor nao diminua
    stop_params = {'mode': 'min', 'restore_best_weights': True, 'patience': 40}
    early_stop = EarlyStopping(monitor='val_f1', **stop_params)

    # Termina se um peso for NaN (not a number)
    terminate = TerminateOnNaN()

    # Habilita a visualizacao no TersorBoard
    # tensorboard = TensorBoard(log_dir="./logs")

    # Armazena os dados gerados no treinamento em um CSV
    # csv_logger = CSVLogger('./logs/trainig.log', append=True)

    # Vetor a ser passado na função fit
    # callbacks = [checkpoint, early_stop, reduce_lr, terminate, tensorboard, csv_logger]
    callbacks = [checkpoint, early_stop, reduce_lr, terminate]
    return callbacks
Example #25
0
 def __init__(self):
     """
     Set callbacks on init
     """
     tensorboard = tf.keras.callbacks.TensorBoard(
         log_dir=self.tensorboard_log)
     early_stopping = EarlyStopping(patience=15, verbose=1)
     reduce_lr_on_plateau = ReduceLROnPlateau(factor=.4,
                                              patience=7,
                                              verbose=1)
     timing = TimingCallback()
     self.callbacks = [
         tensorboard, early_stopping, reduce_lr_on_plateau, timing
     ]
Example #26
0
def resnet_attention_train_predict(
    row,
    PCA_num,
    model_number,
    modelfile,
    m,
    x_train,
    y_train,
    x_test,
    y_test,
    Epochs,
    Batch_size,
):

    y_train, y_train, x_test, y_test = x_train, y_train, x_test, y_test

    model = resnet_v1(input_shape=(row, PCA_num + 1, m),
                      depth=20,
                      num_classes=2)

    model.compile(optimizer="Adam",
                  loss=keras.losses.binary_crossentropy,
                  metrics=['accuracy'])

    lr_scheduler = LearningRateScheduler(lr_schedule)
    lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
                                   cooldown=0,
                                   patience=5,
                                   min_lr=0.5e-6)

    checkpoint = ModelCheckpoint(
        filepath=modelfile, monitor='val_loss', verbose=0,
        save_best_only=True)  #,save_weights_only=True)

    cbs = [checkpoint, lr_reducer, lr_scheduler]

    history = model.fit(
        x_train,
        y_train,
        batch_size=Batch_size,
        epochs=Epochs,
        verbose=0,
        #validation_split=0.1,
        validation_data=[x_test, y_test],
        shuffle=False,
        callbacks=cbs)  #callbacks=cbs
    return history

    del model
Example #27
0
 def __init__(self,
              alpha=0.031157,
              delta=0.13907,
              epsilon_decay=0.99997,
              eta=0.044575,
              gamma=0.013082,
              learning_rate=0.050023):
     super().__init__("DQN")
     self.action_size = State.ACTION_SIZE
     self.state_size = State.STATE_SIZE
     self.memory_rl = PrioritizedReplayBuffer(2000000)
     self.memory_sl = SupervisedMemory(2000000)
     self.batch_size = 512
     self.model_update_frequency = 10
     self.model_save_frequency = 100
     self.alpha = alpha  # Pred opt: 0.7
     self.delta = delta  # Pred opt: 0.5
     self.epsilon = 1
     self.epsilon_min = 0.001
     self.epsilon_decay = epsilon_decay  # Pred opt: 0.99999
     self.gamma = gamma  # 0 # 0.029559  # Pred opt: 0.01
     self.learning_rate = learning_rate  # Pred opt: 0.1
     self.learning_rate_sl = 0.005
     self.eta = eta  # Pred opt: 0.1
     self.number_of_episodes = 1000
     self.reduce_lr = ReduceLROnPlateau(monitor='loss',
                                        factor=0.1,
                                        patience=5,
                                        min_lr=0)
     self.policy_network = self.build_model(
         self.learning_rate, 'linear',
         Huber(reduction=Reduction.SUM, delta=self.delta))
     self.target_network = self.build_model(
         self.learning_rate, 'linear',
         Huber(reduction=Reduction.SUM, delta=self.delta))
     self.target_network.set_weights(self.policy_network.get_weights())
     self.supervised_learning_network = self.build_model(
         self.learning_rate_sl, 'softmax',
         tf.keras.losses.sparse_categorical_crossentropy)
     self.total_rewards_p1 = []
     self.total_rewards_p2 = []
     self.losses = []
     self.steps = 0
     self.p2 = AgentPlaceholder()
     self.rounds = 0
     self.n_batches = 0
     self.save_model = True
     self.env = UnoEnvironment(False)
def create_model(x_train, y_train, x_val, y_val, n_step, n_features):
    callbacks_list = [
        EarlyStopping(
            monitor='val_loss',
            patience=5,
        ),
        ReduceLROnPlateau(
            monitor='val_loss',
            factor=0.3,
            patience=2,
        )
    ]

    a = x_train.shape[0]
    b = x_val.shape[0]
    bigger = a if a > b else b
    gcd = 1
    for i in range(1, int(bigger**0.5) + 1):
        if (a % i == 0) and (b % i == 0) and (gcd < i):
            gcd = i

    model = Sequential()
    model.add(
        LSTM({{choice([64, 128, 256])}},
             return_sequences=True,
             batch_input_shape=(gcd, n_step, n_features),
             stateful=True))
    #if {{choice(['add', 'no'])}} == 'add':
    #    model.add(LSTM({{choice([64, 128, 256])}}, stateful=True, return_sequences=True))
    model.add(Dense(n_features))
    model.compile(optimizer='adam', loss='mae', metrics=['acc'])

    for epoch_idx in range(200):
        hist = model.fit(x_train,
                         y_train,
                         epochs=1,
                         validation_data=(x_val, y_val),
                         callbacks=callbacks_list,
                         shuffle=False,
                         verbose=0,
                         batch_size=gcd)
        model.reset_states()

    return {
        'loss': np.amin(hist.history['val_loss']),
        'status': STATUS_OK,
        'model': model
    }
Example #29
0
def get_callbacks():
    es = EarlyStopping(monitor='val_auc',
                       min_delta=0.001,
                       patience=2,
                       verbose=1,
                       mode='max',
                       baseline=None,
                       restore_best_weights=True)

    rlr = ReduceLROnPlateau(monitor='val_auc',
                            factor=0.5,
                            patience=3,
                            min_lr=1e-6,
                            mode='max',
                            verbose=1)
    return [es, rlr]
Example #30
0
def init_callbacks(monitor: str, lr_patience: int, lr_decay: float,
                   lr_min: float, early_stopping_patience: int, verbosity: int,
                   weights_path: str, selective_learning: bool) -> []:
    """
    Initializes callbacks for the training procedure.

    :param monitor: the metric to monitor.
    :param lr_patience: the number of epochs to wait before decaying the learning rate. Set it to 0 to ignore decaying.
    :param lr_decay: the decay of the learning rate.
    :param lr_min: the minimum learning rate to be reached.
    :param early_stopping_patience: the number of epochs to wait before early stopping.
    :param verbosity: the verbosity of the callbacks.
    :param weights_path: path to the weights to be saved. Pass None, in order to not save the best model.
    :param selective_learning: whether the selective_learning framework is being used.
    :return: the callbacks list.
    """
    callbacks = []

    if not selective_learning and weights_path is not None:
        callbacks.append(
            ModelCheckpoint(weights_path,
                            monitor,
                            save_weights_only=True,
                            save_best_only=True))

    if lr_decay > 0 or lr_patience == 0:
        learning_rate_reduction = ReduceLROnPlateau(monitor=monitor,
                                                    patience=lr_patience,
                                                    verbose=verbosity,
                                                    factor=lr_decay,
                                                    min_lr=lr_min)
        callbacks.append(learning_rate_reduction)

    if early_stopping_patience > 0:
        early_stopping = EarlyStopping(monitor=monitor,
                                       patience=early_stopping_patience,
                                       min_delta=.0002,
                                       mode='max',
                                       restore_best_weights=True,
                                       verbose=verbosity)
        callbacks.append(early_stopping)

    if selective_learning:
        callbacks.append(_SelectiveLearningCallback())

    return callbacks