示例#1
0
def get_callbacks(WEIGHTS_FPATH, LOG_FPATH, monitor):
    callbacks = [
        ModelCheckpoint(WEIGHTS_FPATH,
                        monitor=monitor,
                        save_best_only=True,
                        save_weights_only=True,
                        mode='auto'),
        EarlyStopping(monitor=monitor, patience=3),

        #LearningRateScheduler(anneal_lr),
        #LearningRateTracker(),
        ReduceLROnPlateau(monitor=monitor,
                          factor=0.2,
                          patience=2,
                          min_lr=1e-7,
                          mode='auto'),
        CSVLogger(LOG_FPATH, separator=' ', append=True),
    ]
    return callbacks
    def train(self,
              train_gen,
              val_gen,
              saved_model_path,
              epochs=100,
              steps=100,
              train_split=0.8,
              verbose=1,
              min_delta=.0005,
              patience=5,
              use_early_stop=True):
        """
        train_gen: generator that yields an array of images an array of
        """

        # checkpoint to save model after each epoch
        save_best = ModelCheckpoint(saved_model_path,
                                    monitor='val_loss',
                                    verbose=verbose,
                                    save_best_only=True,
                                    mode='min')

        # stop training if the validation error stops improving.
        early_stop = EarlyStopping(monitor='val_loss',
                                   min_delta=min_delta,
                                   patience=patience,
                                   verbose=verbose,
                                   mode='auto')

        callbacks_list = [save_best]

        if use_early_stop:
            callbacks_list.append(early_stop)

        hist = self.model.fit_generator(train_gen,
                                        steps_per_epoch=steps,
                                        epochs=epochs,
                                        verbose=1,
                                        validation_data=val_gen,
                                        callbacks=callbacks_list,
                                        validation_steps=steps *
                                        (1.0 - train_split) / train_split)
        return hist
示例#3
0
文件: DEM.py 项目: ifuding/TC
    def train(self, train_part_df, validate_part_df, num_fold = 0):
        """
        Keras Training
        """
        print("-----DNN training-----")

        DNN_Train_Data = self.DNN_DataSet(train_part_df, neg_aug = self.neg_aug)
        DNN_validate_Data = self.DNN_DataSet(validate_part_df)
        scores_list = []
        callbacks = [
        EarlyStopping(monitor='val_loss', patience=self.patience, verbose=0),
        AccuracyEvaluation(validation_data=DNN_validate_Data, interval=1,
                            cand_class_id_emb_attr = self.cand_class_id_emb_attr,
                            eval_df = validate_part_df,
                            model_type = self.model_type,
                            class_id_dict = self.class_id_dict,
                            class_to_id = self.class_to_id,
                            scores = scores_list,
                            TTA = self.TTA,
                            img_model = self.img_flat_model,
                            flags = self.flags,
                            only_emb = self.only_emb)
        ]
        if self.model_type == 'DEM_BC_AUG':
            datagen = MixedImageDataGenerator(
                    rotation_range=self.rotation_range,
                    shear_range = self.shear_range,
                    zoom_range=self.zoom_range,
                    horizontal_flip=self.horizontal_flip)
            datagen.fit(DNN_Train_Data[0])
            h = self.model.fit_generator(
                    datagen.flow((DNN_Train_Data[0], DNN_Train_Data[1:]), None, batch_size=self.batch_size), 
                    validation_data=(DNN_validate_Data, None), steps_per_epoch = DNN_Train_Data[0].shape[0]//self.batch_size,
                    epochs=self.epochs, shuffle=True, verbose = self.verbose, workers=2, use_multiprocessing=False, 
                    callbacks=callbacks)
        else:
            h = self.model.fit(DNN_Train_Data,  validation_data = (DNN_validate_Data, None),
                        epochs=self.epochs, batch_size = self.batch_size, shuffle=True, verbose = self.verbose, callbacks=callbacks)
        score_df = pd.DataFrame(scores_list, columns = self.class_id_dict.keys())
        score_df.index.name = 'Epoch'
        score_df['Fold'] = num_fold
        self.scores.append(score_df)
        return self.model
示例#4
0
    def callback_func(self, B2_dir):
        # Seek a mininum for validation loss and display the stopped epochs using verbose and adding delays
        es = EarlyStopping(monitor='val_loss',
                           mode='min',
                           verbose=1,
                           patience=100)

        # Save best model using checkpoint
        model_path = os.path.join(B2_dir, 'ResNet.h5')
        mcp = ModelCheckpoint(os.path.normcase(model_path),
                              monitor='val_loss',
                              mode='min',
                              verbose=1,
                              save_best_only=True)

        # Define callback function in a list
        callback_list = [es, mcp]

        return callback_list, model_path
def train_baseline_cnn(emb_layer, x_train, y_train, x_val, y_val, opt):
    model = CNN(embedding_layer=emb_layer,
                num_words=opt.transfer_n_words,
                embedding_dim=opt.baseline_embed_dim,
                filter_sizes=opt.cnn_filter_shapes,
                feature_maps=opt.filter_sizes,
                max_seq_length=opt.baseline_sent_len,
                dropout_rate=opt.baseline_drop_out_ratio,
                hidden_units=200,
                nb_classes=2).build_model()

    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizers.Adam(),
                  metrics=['accuracy'])

    #     y_train = y_train.reshape(-1, 1)
    #     model = build_model(emb_layer, opt)
    print(model.summary())
    tb_call_back = TensorBoard(log_dir=f'{opt.tbpath}/baseline_cnn_{time()}',
                               histogram_freq=1,
                               write_graph=True,
                               write_images=True)

    checkpoint = ModelCheckpoint("baseline_cnn.h5",
                                 monitor='val_acc',
                                 verbose=1,
                                 save_best_only=True,
                                 save_weights_only=False,
                                 mode='auto',
                                 period=1)
    early_stopping = EarlyStopping(monitor='val_loss', patience=2)
    history = model.fit(x_train,
                        y_train,
                        epochs=opt.baseline_epochs,
                        batch_size=opt.baseline_batchsize,
                        verbose=1,
                        validation_data=(x_val, y_val),
                        callbacks=[early_stopping, tb_call_back, checkpoint])

    with open("CNN_train_baseline_history.txt", "w") as f:
        print(history.history, file=f)
    return model
def train(model: Sequential, train_x, train_y, epoches, test_x, test_y,
          model_file):
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])
    print('running for %d epoches.' % epoches)
    save_model = ModelCheckpoint(model_file)
    stop_model = EarlyStopping(min_delta=0.001, patience=10)

    model.fit(x=train_x,
              y=train_y,
              shuffle=True,
              batch_size=60,
              epochs=epoches,
              validation_data=(test_x, test_y),
              callbacks=[save_model, stop_model])
    print("Done training, Now evaluating.")
    loss, acc = model.evaluate(x=test_x, y=test_y)

    print("Final loss: %3.2f Final accuracy: %3.2f" % (loss, acc))
示例#7
0
def train(model: Sequential, epochs, train_x, train_y, test_x, test_y):
    model.compile(optimizer=RMSProp(),
                  loss=losses.mean_squared_error,
                  metrics=['accuracy'])

    print('running for %d epoches.' % epochs)
    save_model = ModelCheckpoint(MODEL_NAME)
    stop_model = EarlyStopping(min_delta=0.0002, patience=10)
    print("start training")
    model.fit(x=train_x,
              y=train_y,
              shuffle=True,
              batch_size=32,
              epochs=epochs,
              validation_data=(test_x, test_y),
              callbacks=[save_model, stop_model])
    print("Done training, Now evaluating.")

    loss, acc = model.evaluate(x=test_x, y=test_y)
    print("Final loss: %3.2f Final accuracy: %3.2f" % (loss, acc))
    def train(self):
        #下面使用高阶语句构建模型
        model = self.build_model()
        model.compile(loss='categorical_crossentropy',
              # optimizer=RMSprop(lr=0.001),
              optimizer = Adam(lr=self.learn_rate),
            #   optimizer=tf.train.AdamOptimizer(learning_rate=self.learn_rate),
              metrics=['accuracy'])

        callbacks = [EarlyStopping(
            monitor='val_loss', patience=2)]

        # All images will be rescaled by 1./255
        train_datagen = ImageDataGenerator(rescale=1./255)
        test_datagen = ImageDataGenerator(rescale=1./255)

        # Flow training images in batches of 20 using train_datagen generator
        train_generator = train_datagen.flow_from_directory(
                self.train_path,  # This is the source directory for training images
                target_size=(DIVIDE_IMAGE_HEIGHT,DIVIDE_IMAGE_WEIGHT),  
                color_mode='grayscale',
                batch_size=BATCH_SIZE,
                # Since we use binary_crossentropy loss, we need binary labels
                class_mode='categorical')

        # Flow validation images in batches of 20 using test_datagen generator
        validation_generator = test_datagen.flow_from_directory(
                self.valid_path,
                target_size=(DIVIDE_IMAGE_HEIGHT,DIVIDE_IMAGE_WEIGHT),
                color_mode='grayscale',
                batch_size=BATCH_SIZE,
                class_mode='categorical')

        history = model.fit_generator(
            train_generator,
            epochs=1000,
            callbacks=callbacks,
            validation_data=validation_generator,
            verbose=2)
        self.model = model
        model.save(self.model_path)
示例#9
0
    def train(self, train_gen, val_gen,
              saved_model_path, epochs=100, steps=100, train_split=0.8,
              verbose=1, min_delta=.0005, patience=5, use_early_stop=True):
        """
        train_gen: generator that yields an array of images an array of

        """

        # checkpoint to save model after each epoch
        save_best = ModelCheckpoint(saved_model_path,
                                    monitor='val_loss',
                                    verbose=verbose,
                                    save_best_only=True,
                                    mode='min')

        # stop training if the validation error stops improving.
        early_stop = EarlyStopping(monitor='val_loss',
                                   min_delta=min_delta,
                                   patience=patience,
                                   verbose=verbose,
                                   mode='auto')
        reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
                              patience=2, min_lr=0.001)

        callbacks_list = [save_best]
        callbacks_list.append(reduce_lr)

        if use_early_stop:
            callbacks_list.append(early_stop)
        start = datetime.datetime.now()   
        hist = self.model.fit_generator(
            train_gen,
            steps_per_epoch=steps,
            epochs=epochs,
            verbose=1,
            validation_data=val_gen,
            callbacks=callbacks_list,
            validation_steps=int(steps * (1.0 - train_split) / train_split))
        end = datetime.datetime.now()
        print('TRAIN TIME:',end-start)
        return hist
def partial_training(model, train_data, val_data, epochs):
    for i in range(len(model.layers)):
        model.layers[i].trainable = True

    l_r = 0.00005

    model.compile(optimizer=Adam(lr=l_r),
                  loss=dual_loss_weighted,
                  metrics=[BinaryAccuracy()])

    hist_1 = model.fit(x=train_data,
                       validation_data=val_data,
                       epochs=epochs,
                       callbacks=[
                           EarlyStopping(monitor='val_loss',
                                         patience=10,
                                         restore_best_weights=True),
                           ReduceLROnPlateau(patience=4)
                       ])

    return model
    def train(self):
        start = time.time()
        early_stop = EarlyStopping(monitor='val_loss', patience=self.early_stop_patience, restore_best_weights=True)
        tensorboard = TensorBoard(log_dir=f"{os.environ['WORKSPACE']}/logs/{self.seq_info}__{self.get_model_info_str()}__{datetime.now().timestamp()}")

        # Train model
        self.training_history = self.model.fit(
            self.train_x, self.train_y,
            batch_size=self.batch_size,
            epochs=self.max_epochs,
            validation_data=(self.validation_x, self.validation_y),
            callbacks=[tensorboard, early_stop],
            shuffle=True
        )

        # Score model
        self.score = self.model.evaluate(self.validation_x, self.validation_y, verbose=0)
        self.score = {out: self.score[i] for i, out in enumerate(self.model.metrics_names)}
        print('Scores:', self.score)
        end = time.time()
        self.train_time = end - start
示例#12
0
def retrainModel():
    loadDataset()
    shuffleAndSplitDataset()
    dataAugmentation()
    convert2NPArray()

    cb = EarlyStopping(monitor='acc', min_delta=0.005, patience=0)
    model = load_model('classifier/model.keras')
    # model.fit(images_train, labels_train, batch_size=5, epochs=15, verbose=1, validation_split=0.1, callbacks=[cb])
    model.fit(images_train,
              labels_train,
              batch_size=5,
              epochs=15,
              verbose=1,
              validation_split=0.1)
    model.save('classifier/model.keras')
    # Evaluación del modelo
    result = model.evaluate(images_test, labels_test, verbose=0)
    print('Testing set accuracy:', result[1])

    testModel()
示例#13
0
    def train_autoencoder(self, epochs):
        """Function to train autoencoder

        :param epochs: Number of epochs
        :type epochs: int
        """
        self.logger.info("Training autoencoder")
        callbacks = [
            EarlyStopping(monitor='loss', patience=10),
            ModelCheckpoint('autoencoder_weights.hdf5',
                            monitor='loss',
                            save_best_only=True,
                            verbose=1)
        ]

        generator = self._get_generator(self.dataset_dir,
                                        self.batch_size,
                                        data_type=1)
        self.autoencoder.fit(
            generator, epochs=epochs,
            callbacks=callbacks)  # TODO: Add callback to save logs
示例#14
0
def get_call_back(lr, batch_size):
    """
    定义call back
    :return:
    """
    text = '{}-{}-{}'.format(cfg.base_model_name, batch_size, lr)
    checkpoint = ModelCheckpoint(filepath='/tmp/ssd-' + text + '.{epoch:03d}.h5',
                                 monitor='val_loss',
                                 verbose=1,
                                 save_best_only=True,
                                 save_weights_only=True,
                                 save_freq='epoch'
                                 )
    reduce = ReduceLROnPlateau(monitor='val_loss', factor=0.2, min_delta=2e-3,
                               patience=10, min_lr=1e-5)
    stop = EarlyStopping(monitor='val_loss', patience=20)
    # scheduler = LearningRateScheduler(lr_schedule(epochs, lr))

    log = TensorBoard(log_dir='log-{}-{}'.format(text,
                                                 time.strftime("%Y%m%d", time.localtime())))
    return [checkpoint, reduce, stop, log]
    def train(self,
              x_train,
              y_train,
              learning_rate,
              epochs,
              batch_size,
              tb_logs_dir=None,
              verbose=False):
        early_stopping_callback = EarlyStopping(monitor="val_loss",
                                                patience=25)
        callbacks = [early_stopping_callback]

        if bool(tb_logs_dir):
            date_time = datetime.now().strftime('%Y-%m-%d-%H%M%S')
            log_name = os.path.join(tb_logs_dir,
                                    "{}_{}".format("transfer_net", date_time))
            # defining callbacks for training
            tensorboard_callback = TensorBoard(log_dir=log_name,
                                               write_graph=True,
                                               write_images=True)
            callbacks += [tensorboard_callback]

        x_train, x_val, y_train, y_val = train_test_split(x_train,
                                                          y_train,
                                                          train_size=0.8,
                                                          random_state=1)
        y_train = tf.keras.utils.to_categorical(y_train,
                                                self.num_output_classes)
        y_val = tf.keras.utils.to_categorical(y_val, self.num_output_classes)

        self.model.compile(loss="categorical_crossentropy",
                           optimizer=keras.optimizers.Adam(lr=learning_rate),
                           metrics=['accuracy', f1_score])
        self.hist = self.model.fit(x_train,
                                   y_train,
                                   epochs=epochs,
                                   batch_size=batch_size,
                                   verbose=verbose,
                                   callbacks=callbacks,
                                   validation_data=(x_val, y_val))
示例#16
0
    def fit(self, train_data, train_label, validation_data, validation_label,
            batch_size, nb_epochs):
        # TODO exploit 'sample_weight'
        # TODO implement resumed training with 'initial_epoch'
        # TODO add documentation

        callbacks = []

        # define checkpoints
        if self.logdir is not None:
            # create checkpoint callback
            checkpoint_path = os.path.join(self.logdir, "cp-{epoch}.ckpt")
            cp_callback = ModelCheckpoint(filepath=checkpoint_path, verbose=1)
            callbacks.append(cp_callback)

        # TODO debug early stopping
        # define early stopping
        early_stop = EarlyStopping(monitor="val_categorical_accuracy",
                                   min_delta=0,
                                   patience=5,
                                   verbose=2)
        callbacks.append(early_stop)

        # fit model
        self.history = self.model.fit(x=train_data,
                                      y=train_label,
                                      batch_size=batch_size,
                                      epochs=nb_epochs,
                                      verbose=2,
                                      callbacks=callbacks,
                                      validation_data=(validation_data,
                                                       validation_label),
                                      shuffle=True,
                                      sample_weight=None,
                                      initial_epoch=0)

        # update model attribute
        self.trained = True

        return
示例#17
0
def train(model, train_x, train_y, epochs, test_x, test_y, model_file):
    model.compile(loss='categorical_crossentropy',
                  optimizer='Adadelta',
                  metrics=['accuracy'])

    print("Running for {0} epochs.".format(epochs))

    savemodel = ModelCheckpoint(model_file)
    stopmodel = EarlyStopping(min_delta=0.001, patience=10)

    model.fit(x=train_x,
              y=train_y,
              shuffle=True,
              batch_size=256,
              epochs=epochs,
              validation_data=(test_x, test_y),
              callbacks=[savemodel, stopmodel])

    print("Done training. Now evaluating.")
    loss, acc = model.evaluate(x=test_x, y=test_y)

    print("Final loss:{0} Final accuracy:{1}".format(loss, acc))
示例#18
0
def usingCnnModel(training_data, training_labels, val_data, val_labels):
    """
    This is using the CNN model and setting it up.
    Args:
        training_data(numpy arrays):    This is the numpy array of the training data.
        training_labels(numpy arrays):  This is the numpy array of the training labels.
        val_data(numpy arrays):         This is the numpy array of the validation data.
        val_labels(numpy arrays):       This is the numpy array of the validation labels.
    Returns:
        history(history):               This is the history of the classifier.
        classifier(sequential):         This is the cnn model classifier fitted to the training data and labels.
    """
    model_checkpoint = ModelCheckpoint(
        filepath=os.path.abspath('best_weights.h5'),
        monitor=monitor_model_checkpoint,
        save_best_only=True)

    early_stopping = EarlyStopping(
        monitor=monitor_early_stopping,
        patience=patience_num)  # original patience =3

    classifier = buildClassifier()
    callbacks_array = []
    if use_early_stopping:
        callbacks_array.append(early_stopping)
    if use_model_checkpoint:
        callbacks_array.append(model_checkpoint)

    print(len(training_data))
    history = classifier.fit(
        training_data,
        training_labels,
        epochs=epochs,
        validation_data=(val_data, val_labels),
        callbacks=callbacks_array,
        batch_size=batch_size
        # steps_per_epoch=int(len(training_data) / batch_size),
    )
    return history, classifier
示例#19
0
    def __setup_callbacks(self) -> List:
        """
        Sets up the callbacks for training
        :return: the early stopping schedule, tensorboard data and the checkpointer
        """

        # Create a folder for the model log of the current experiment
        weights_log_path = os.path.join(self.__current_experiment_path,
                                        'weights')

        # Set up the callback to save the best weights after each epoch
        checkpointer = ModelCheckpoint(filepath=os.path.join(
            weights_log_path, 'weights.{epoch:02d}-{val_loss:.2f}.hdf5'),
                                       verbose=0,
                                       save_best_only=True,
                                       save_weights_only=True,
                                       monitor='val_loss',
                                       mode='min')

        # Set up Tensorboard
        tensorboard = TensorBoard(log_dir=os.path.join(
            self.__current_experiment_path, 'tensorboard'),
                                  write_graph=True,
                                  histogram_freq=0,
                                  write_grads=True,
                                  write_images=False,
                                  batch_size=self._params.batch_size,
                                  update_freq=self._params.batch_size)

        # Set up early stopping to interrupt the training if val_loss is not increasing after n epochs
        early_stopping = EarlyStopping(monitor='val_loss',
                                       patience=25,
                                       mode='min')

        csv_logger = CSVLogger(os.path.join(self.__current_experiment_path,
                                            "training.csv"),
                               append=True)

        return [early_stopping, tensorboard, checkpointer, csv_logger]
示例#20
0
    def train(self,
              model_path: str,
              train_data: 'BatchSequence',
              train_steps: int,
              batch_size: int,
              validation_data: 'BatchSequence',
              validation_steps: int,
              epochs: int,
              verbose: int = 1,
              min_delta: float = .0005,
              patience: int = 5) -> tf.keras.callbacks.History:
        """
        trains the model
        """
        model = self._get_train_model()
        self.compile()

        callbacks = [
            EarlyStopping(monitor='val_loss',
                          patience=patience,
                          min_delta=min_delta),
            ModelCheckpoint(monitor='val_loss',
                            filepath=model_path,
                            save_best_only=True,
                            verbose=verbose)]

        history: Dict[str, Any] = model.fit(
            x=train_data,
            steps_per_epoch=train_steps,
            batch_size=batch_size,
            callbacks=callbacks,
            validation_data=validation_data,
            validation_steps=validation_steps,
            epochs=epochs,
            verbose=verbose,
            workers=1,
            use_multiprocessing=False
        )
        return history
示例#21
0
    def __init__(self,
                 epochs=5,
                 batch_size=16,
                 lr=1e-5,
                 optimizer='sgd',
                 callbacks=EarlyStopping(patience=3),
                 metrics=['accuracy'],
                 input_shape=(128, 128, 3),
                 n_output=3,
                 loss='mean_squared_error'):

        self.epochs = epochs
        self.batch_size = batch_size
        self.lr = lr
        self.callbacks = callbacks
        self.metrics = metrics
        self.input_shape = input_shape
        self.n_output = n_output
        self.otpimizer = optimizer
        self.loss = loss

        self.instantiate_model()
示例#22
0
def get_agent(name_of_model="model-3_200-50-3"):
    if False:  # is exist
        return keras.models.load_model('./saved_model.pb')
    else:
        x_train = np.asarray(pickle.load(open("X.p", "rb")))
        print(x_train.shape)
        y_train = np.asarray(pickle.load(open("Y.p", "rb")))
        nb_class = 3
        y_train = keras.utils.to_categorical(y_train, nb_class)
        model = keras.Sequential()
        model.add(layers.Dense(200, input_dim=68 * 40, activation='sigmoid'))
        model.add(layers.Dense(50, activation='sigmoid'))
        model.add(layers.Dense(3, activation='softmax'))
        model.summary()
        model.compile(optimizer='rmsprop',
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])

        from tensorflow.python.keras.callbacks import EarlyStopping
        # need to understand the f**k
        ourCallback = EarlyStopping(monitor='val_accuracy',
                                    min_delta=0.0001,
                                    patience=20,
                                    verbose=0,
                                    mode='auto',
                                    baseline=None,
                                    restore_best_weights=False)
        model.fit(x_train,
                  y_train,
                  epochs=100,
                  batch_size=128,
                  validation_split=0.2,
                  callbacks=[ourCallback])

        # model.fit(x_train, y_train, epochs=100, validation_split=0.33)
        #model.fit(x_train, y_train, epochs=100)
        model.save(".")
        return model
def train_keras_model(model, input_data, labels, dev_input_data, dev_labels,
                      batch_size, epochs, steps_per_epochs, validation_steps,
                      weights_filename):
    '''
    Method used to train the singletask model implemented and compiled.
    :param model: the singletask model implemented and compiled
    :param input_data: arrays of arrays of id tokens
    :param labels: array of arrays of id tokens
    :param dev_input_data: array of array of id tokens containing data from dev set
    :param dev_labels: array of array of id tokens containing data from dev set
    :param batch_size:  integer that indicates the size of batch
    :param epochs: integer that indicates how much epochs to use to train the model
    :param steps_per_epochs: integer that indicates how much step per epochs to use to train the model
    :param validation_steps: integer that indicates how much validation steps per epoch to yse to validate the model
    :param weights_filename: filepath where save the weights of the model
    :return: the statistics of the model after that the training is completed.
    '''
    early_stopping = EarlyStopping(monitor="val_loss", patience=2)
    checkpointer = ModelCheckpoint(filepath="drive/My Drive/" +
                                   weights_filename + ".hdf5",
                                   monitor='val_loss',
                                   verbose=1,
                                   save_best_only=True,
                                   mode='min')
    cbk = [early_stopping, checkpointer]

    print("\nStarting training...")
    stats = model.fit_generator(batch_generator(input_data, labels,
                                                batch_size),
                                steps_per_epoch=steps_per_epochs,
                                epochs=epochs,
                                callbacks=cbk,
                                verbose=1,
                                validation_data=batch_generator(
                                    dev_input_data, dev_labels, batch_size),
                                validation_steps=validation_steps)
    print("Training complete.\n")
    return stats
示例#24
0
    def train(self) -> Sequential:
        dataset = self.dataset

        normalizer = preprocessing.Normalization()
        normalizer.adapt(np.array(dataset.x_train))

        model = tf.keras.models.Sequential([
            normalizer,
            layers.Dense(units=1),
        ],
                                           name="{}_tf_model".format(
                                               self.target_feature))

        model.compile(optimizer=tf.optimizers.Adam(learning_rate=0.1),
                      loss='mean_absolute_error')
        model.build(dataset.x_train.shape)

        callbacks = [
            EarlyStopping(
                monitor="val_loss",
                min_delta=0.001,
                patience=2,
                verbose=1,
            )
        ]

        history = model.fit(dataset.x_train,
                            dataset.y_train,
                            epochs=200,
                            validation_data=(dataset.x_valid, dataset.y_valid),
                            verbose=0,
                            callbacks=callbacks)
        plotter.plot_loss(self.target_feature, history)

        metrics = model.evaluate(dataset.x_test, dataset.y_test)
        print("{} x_test loss: {}".format(self.target_feature, metrics))

        return model
    def train(self,
              train_data,
              train_labels,
              valid_data,
              valid_labels,
              batch_size=32,
              epochs=30):
        '''
        Trains the model. We use early stopping here and saves only the best model to the disk

        :param train_data: train data. 4-D tensor is expected:
                           (num_samples, 3-D shape of pre-trained network last layer output)
        :param train_label: train labels. 2-D tensor is expected:
                           (num_samples, num__dog_breeds)
        :param valid_data: validation data. 4-D tensor is expected:
                           (num_samples, 3-D shape of pre-trained network last layer output)
        :param valid_label: validation labels. 2-D tensor is expected:
                           (num_samples, num__dog_breeds)
        :param batch_size: batch size. Default is 32
        :param epochs: no. of epochs. 30 is default
        '''
        checkpointer = ModelCheckpoint(monitor='val_acc',
                                       filepath=self.best_model_filename,
                                       verbose=1,
                                       save_best_only=True,
                                       mode='max')
        early_stopper = EarlyStopping(monitor='val_acc',
                                      min_delta=0.001,
                                      patience=4,
                                      mode='max')

        self.model.fit(train_data,
                       train_labels,
                       validation_data=(valid_data, valid_labels),
                       epochs=epochs,
                       verbose=1,
                       batch_size=batch_size,
                       callbacks=[checkpointer, early_stopper])
示例#26
0
    def setup_callables(self):
        monitor = "val_dice_coef"
        # Setup callback to save best weights after each epoch
        checkpointer = ModelCheckpoint(filepath=os.path.join(
            self.model_dir, 'weights.{epoch:02d}-{val_loss:.2f}.hdf5'),
                                       verbose=0,
                                       save_best_only=True,
                                       save_weights_only=True,
                                       monitor=monitor,
                                       mode='max')
        # setup callback to register training history
        csv_logger = CSVLogger(os.path.join(self.log_dir, 'log.csv'),
                               append=True,
                               separator=';')

        # setup logger to catch warnings and info messages
        set_logger(os.path.join(self.log_dir, 'train_val.log'))

        # setup callback to retrieve tensorboard info
        tensorboard = TensorBoard(log_dir=self.log_dir,
                                  write_graph=True,
                                  histogram_freq=0)

        # setup early stopping to stop training if val_loss is not increasing after 3 epochs
        early_stopping = EarlyStopping(monitor=monitor,
                                       patience=5,
                                       mode='max',
                                       verbose=0)
        lr_reducer = ReduceLROnPlateau(monitor=monitor,
                                       factor=0.05,
                                       cooldown=0,
                                       patience=5,
                                       verbose=0,
                                       mode='max')

        return [
            checkpointer, csv_logger, tensorboard, early_stopping, lr_reducer
        ]
示例#27
0
    def train_model(self,
                    model,
                    train_images,
                    train_labels,
                    val_images,
                    val_labels):
        """
        """
        es = EarlyStopping(monitor='loss',
                           mode='max',
                           verbose=1,
                           patience=cfg.patience)
        # modelcheckpoint
        mc = ModelCheckpoint(super().get_path_to_save(),
                             monitor='acc',
                             mode='max',
                             save_best_only=True,
                             verbose=1)
        # tensorborad
        logdir = os.path.join(cfg.pfad_zu_logs, cfg.keras_model_name)
        tb = TensorBoard(log_dir=logdir,
                         histogram_freq=0,
                         write_graph=True,
                         write_images=False)

        callbak = [es, mc, tb]
        self.compile_model(model)
        print("______________Anfang des Trainings____________________")
        history = model.fit(train_images,
                            train_labels,
                            validation_data=(val_images, val_labels),
                            epochs=self.__Num_epochs,
                            batch_size=self.__batch_size,
                            verbose=1,
                            validation_split=self.__validation_split,
                            callbacks=callbak)
        print("training fertig")
        return history
示例#28
0
def run_loss(args):
    data = args['data']

    # For each run we want to get a new random balance
    data.process()
    # split, train, test
    dense_out = len(data.labels[0])
    # split for all models
    X_train_, X_test_, Y_train, Y_test = train_test_split(data.text, data.labels,
                                                          test_size=0.20, random_state=42)

    print(args)

    # Prep data for the LSTM model
    # This currently will train the tokenizer on all text (unbalanced and train/test)
    # It would be nice to replace this with a pretrained embedding on larger text

    tokenizer = Tokenizer(num_words=int(args['max_features']), split=' ')
    tokenizer.fit_on_texts(data.all_text)
    X_train = tokenizer.texts_to_sequences(X_train_)
    X_train = pad_sequences(X_train, maxlen=max_len)
    X_test = tokenizer.texts_to_sequences(X_test_)
    X_test = pad_sequences(X_test, maxlen=max_len)

    # Train the LSTM model
    lstm_model = simple_lstm(int(args['max_features']), dense_out, X_train.shape[1],
                             int(args['embed_dim']), int(args['lstm_out']), args['dropout'])

    if args['epochs'] == 0:
        args['epochs'] = 1

    es = EarlyStopping(monitor='val_acc', min_delta=0, patience=6, verbose=0, mode='max')
    model_hist = lstm_model.fit(X_train, Y_train, epochs=args['epochs'], batch_size=batch_size,
                                verbose=1, validation_data=(X_test, Y_test), callbacks=[es])
    lstm_acc = model_hist.history['val_acc'][-1]
    print("LSTM model accuracy ", lstm_acc)
    # This minimizes, so the maximize we have to take the inverse :)
    return 1 - lstm_acc
    def train(self, npt_exp):
        directory = "muscle-formation-diff/data/images/train"
        if not path.exists(directory):
            directory = "../../data/images/train"
        steps = len(listdir(directory)) // self.batch_size

        callbacks = NeptuneCallback(neptune_experiment=npt_exp,
                                    n_batch=steps,
                                    images=self.x_test[:20],
                                    img_size=self.img_size)

        tf.config.experimental_run_functions_eagerly(True)
        # self.vae.add_loss(self.kl_reconstruction_loss)
        self.vae.compile(optimizer='adam',
                         loss=self.vae_loss_function,
                         metrics=['accuracy'])

        train_generator, validation_generator, test_generator = get_generators(
        )

        history = self.vae.fit(
            train_generator,
            steps_per_epoch=len(train_generator),
            validation_data=validation_generator,
            validation_steps=len(validation_generator),
            epochs=PARAMS['n_epochs'],
            shuffle=PARAMS['shuffle'],
            callbacks=[
                callbacks,
                LambdaCallback(
                    on_epoch_end=lambda epoch, logs: log_data(logs)),
                EarlyStopping(patience=PARAMS['early_stopping'],
                              monitor='loss',
                              restore_best_weights=True)
            ])
        # self.vae.compile(optimizer=self.optimizer, loss=self.loss_func)

        return self.vae, history
示例#30
0
def main():
    window_size = 100
    scaler = MinMaxScaler(feature_range=(0, 1))
    bp_to_validate, bp_original, trainX, trainY = prepare_dataset(
        scaler, window_size)

    callback_early_stopping = EarlyStopping(
        monitor='loss', patience=10, verbose=1)
    callbacks = [callback_early_stopping]

    model = Sequential()
    model.add(LSTM(1, input_shape=(1, window_size),
                   activation='linear', return_sequences=True))
    model.add(LSTM(512, activation='sigmoid'))
    model.add(Dense(1, activation='linear'))
    model.compile(loss='mean_squared_error', optimizer='adam')
    model.fit(trainX, trainY, steps_per_epoch=62, epochs=100, batch_size=16,
              verbose=1, callbacks=callbacks)
    print(model.summary())
    model.save('rnn_model.h5')

    bp_validated = model.predict(bp_to_validate)
    draw_results(bp_original, bp_validated, scaler)