예제 #1
0
    def train(self, batch_size, epochs, xtrain, ytrain, validation_data=None):
        train_dataset = tf.data.Dataset.from_tensor_slices(
            (xtrain.astype(np.float32), ytrain.astype(np.float32)))
        train_dataset = train_dataset.shuffle(
            buffer_size=xtrain.shape[0],
            reshuffle_each_iteration=True).batch(batch_size)
        #grad_vars = self.model.trainable_weights
        #zero_grads = [tf.zeros_like(w) for w in grad_vars]
        #self.optimizer.apply_gradients(zip(zero_grads, grad_vars))
        red_lr = ReduceLROnPlateau(self.optimizer, 0.8, 10, 1e-5)
        train_loss = []
        valid_loss = []
        epoch_loss_avg = tf.keras.metrics.Mean()
        for i in range(epochs):
            epoch_loss_avg.reset_states()
            for x, y in train_dataset:
                loss = self.train_step(self.model, x, y)
                epoch_loss_avg.update_state(loss)
            train_loss.append(epoch_loss_avg.result())
            red_lr.on_epoch_end(train_loss[-1], i)
            if (validation_data):
                yvalid = self.model(validation_data[0], training=True)
                valid_loss.append(
                    np.mean(self.loss_fn(yvalid, validation_data[1]).numpy()))
                print("Step {} loss {} valid_loss {}".format(
                    i, epoch_loss_avg.result(), valid_loss[i]))
            else:
                print("Step {} loss {}".format(i, epoch_loss_avg.result()))

        return train_loss, valid_loss
예제 #2
0
    def train(self,
              model,
              batch_size,
              epochs,
              xtrain,
              ytrain,
              indx,
              validation_data=None):
        train_dataset = tf.data.Dataset.from_tensor_slices(
            (xtrain.astype(np.float32), ytrain.astype(np.float32)))
        train_dataset = train_dataset.shuffle(
            buffer_size=xtrain.shape[0],
            reshuffle_each_iteration=True).batch(batch_size)
        #self.optimizer = tf.keras.optimizers.RMSprop(self.lr)
        red_lr = ReduceLROnPlateau(self.optimizers[indx], 0.8, 10, 1e-5)

        if (validation_data):
            validation_data[0] = validation_data[0].astype(np.float32)
            validation_data[1] = validation_data[1].astype(np.float32)

        train_loss = []
        valid_loss = []
        mse_loss = []
        anchor_loss = []
        epoch_loss_avg = tf.keras.metrics.Mean()
        mse_loss_avg = tf.keras.metrics.Mean()
        anchor_loss_avg = tf.keras.metrics.Mean()

        for i in range(epochs):
            epoch_loss_avg.reset_states()
            mse_loss_avg.reset_states()
            anchor_loss_avg.reset_states()
            for x, y in train_dataset:
                total_loss, mse, anchor = self.train_step(
                    model, x, y, self.optimizers[indx], indx)
                epoch_loss_avg.update_state(total_loss)
                mse_loss_avg.update_state(mse)
                anchor_loss_avg.update_state(anchor)
            train_loss.append(epoch_loss_avg.result().numpy())
            mse_loss.append(mse_loss_avg.result().numpy())
            anchor_loss.append(anchor_loss_avg.result().numpy())
            red_lr.on_epoch_end(train_loss[-1], i)

            if (validation_data):
                valid_loss.append(
                    np.mean(
                        self.loss_fn(model, validation_data[0],
                                     validation_data[1], False).numpy()))
                print("Step {} loss {} valid_loss {}".format(
                    i, epoch_loss_avg.result(), valid_loss[i]))
            else:
                print("Step {} loss {}".format(i, epoch_loss_avg.result()))

        return [train_loss, mse_loss, anchor_loss], valid_loss
예제 #3
0
    def train(self,
              batch_size,
              epochs,
              xtrain,
              ytrain,
              kl_weight=1e-3,
              validation_data=None):
        train_dataset = tf.data.Dataset.from_tensor_slices(
            (xtrain.astype(np.float32), ytrain.astype(np.float32)))
        train_dataset = train_dataset.shuffle(
            buffer_size=xtrain.shape[0],
            reshuffle_each_iteration=True).batch(batch_size)
        train_loss = []
        NLL_loss = []
        KL_loss = []
        valid_loss = []
        red_lr = ReduceLROnPlateau(self.optimizer, 0.8, 10, 1e-5)
        epoch_loss_avg = tf.keras.metrics.Mean()
        NLL_loss_avg = tf.keras.metrics.Mean()
        KL_loss_avg = tf.keras.metrics.Mean()
        for i in range(epochs):
            epoch_loss_avg.reset_states()
            NLL_loss_avg.reset_states()
            KL_loss_avg.reset_states()
            for x, y in train_dataset:
                loss, NLL, KL = self.train_step(self.model, x, y, kl_weight)
                epoch_loss_avg.update_state(loss)
                NLL_loss_avg.update_state(NLL)
                KL_loss_avg.update_state(KL)
            train_loss.append(epoch_loss_avg.result().numpy())
            NLL_loss.append(NLL_loss_avg.result().numpy())
            KL_loss.append(KL_loss_avg.result().numpy())
            red_lr.on_epoch_end(train_loss[-1], i)
            if (validation_data):
                valid_loss.append(
                    np.mean(
                        self.loss_fn(self.model, validation_data[0],
                                     validation_data[1], kl_weight,
                                     False).numpy()))
                print("Step {} loss {} valid_loss {}".format(
                    i, epoch_loss_avg.result(), valid_loss[i]))
            else:
                print("Step {} loss {}".format(i, epoch_loss_avg.result()))

        return [train_loss, NLL_loss, KL_loss], valid_loss
예제 #4
0
        test_loss, test_accuracy = model.test_on_batch(x_batch_test,
                                                       y_batch_test)
        testing_acc.append(test_accuracy)
        testing_loss.append(test_loss)
    train_logs_dict = get_logs(train_logs_dict, epoch, model, x_train, y_train)
    test_logs_dict = get_logs(test_logs_dict, epoch, model, x_test, y_test)
    logs = {
        'acc': np.mean(training_acc),
        'loss': np.mean(training_loss),
        'val_loss': np.mean(testing_loss),
        'val_acc': np.mean(testing_acc)
    }
    modelcheckpoint.on_epoch_end(epoch, logs)
    earlystop.on_epoch_end(epoch, logs)
    reduce_lr.on_epoch_end(epoch, logs)
    tensorboard.on_epoch_end(epoch, logs)
    print(
        "accuracy: {}, loss: {}, validation accuracy: {}, validation loss: {}".
        format(np.mean(training_acc), np.mean(training_loss),
               np.mean(testing_acc), np.mean(testing_loss)))
    if model.stop_training:
        break
earlystop.on_train_end()
modelcheckpoint.on_train_end()
reduce_lr.on_train_end()
tensorboard.on_train_end()

# confusion metric for training
y_train_pred = model.predict(x_train).argmax(axis=1)