Ejemplo n.º 1
0
    def test_seq_to_seq(self):
        #print (self.get_random_states())
        train_x = []
        # Data size: 10 x (image + 2 actions) x board/action size
        train_x = np.random.randint(0, 2, size=(10, 3, 9))

        # train_x = [
        #     [
        #     [0.1, 1.0],
        #     [0.1, 1.0],
        #     [0.1, 1.0],
        #     [0.1, 1.0],
        #     [0.1, 1.0],
        # ]]
        # 1 being the batch size
        # 10 being the length
        #train_x = np.random.randint(low=0, high=2, size=(1, 10, 9))

        train_y = [[0.11, 0.11, 0.11]] * 10

        #train_y = [ 0.11 ]
        train_y = np.array(train_y)

        model = Sequential()
        #model.add(layers.Flatten(input_shape=(3, 9))),
        #model.add(layers.Embedding(input_shape=(10, 9), ))
        model.add(
            layers.LSTM(units=100, input_shape=(3, 9), return_sequences=True))
        model.add(layers.Dropout(rate=0.25))
        model.add(layers.Dense(50, activation='relu'))
        model.add(layers.Dense(1, activation=None))
        model.compile(optimizer='adam', loss=tf.losses.MSE, metrics=['mae'])
        print(model.summary())
        model.fit(x=train_x, y=train_y, epochs=100, verbose=0)
        loss = model.evaluate(train_x, train_y, verbose=2)
        self.assertLess(loss[0], 1e-04)
Ejemplo n.º 2
0
Lm1 = Sequential()
#q1. try without input?
Lm1.add(Input(shape=(784, )))
Lm1.add(Dense(num_classes, activation='softmax'))

#q2.try SparseCategoricalCrossentropy without one-hot
loss_object = tf.keras.losses.categorical_crossentropy

optimizer = tf.keras.optimizers.SGD(0.01)

#
train_loss = tf.keras.metrics.Mean(name='train_loss')
#try SparseCategoricalAccuracy
train_accuracy = tf.keras.metrics.CategoricalAccuracy(name='train_accuracy')

test_loss = tf.keras.metrics.Mean(name='test_loss')
test_accuracy = tf.keras.metrics.CategoricalAccuracy(name='test_accuracy')

checkpoint_path = "./checkpoints/"
checkpoint_dir = os.path.dirname(checkpoint_path)
cp_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path,
                                                 verbose=1,
                                                 period=1)
#q3 metrics=xxx without []?
Lm1.compile(optimizer=optimizer, loss=loss_object, metrics=[train_accuracy])
#q4 train_ds?
Lm1.fit(train_ds, epochs=3, callbacks=[cp_callback])
loss, acc = Lm1.evaluate(train_ds)
print("saved model, loss: {:5.2f}, acc: {:5.2f}".format(loss, acc))
Ejemplo n.º 3
0
class FecModel(Model):

    def __init__(self, loader):
        self._loader = loader

        self._num_train = 28709
        self._num_val = 7178
        self._batch_size = 64
        self._num_epoch = 1

        self.create_model()

    def create_model(self):
        self._model = Sequential()

        self._model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(48, 48, 1)))
        self._model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
        self._model.add(MaxPooling2D(pool_size=(2, 2)))
        self._model.add(Dropout(0.25))

        self._model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
        self._model.add(MaxPooling2D(pool_size=(2, 2)))
        self._model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
        self._model.add(MaxPooling2D(pool_size=(2, 2)))
        self._model.add(Dropout(0.25))

        self._model.add(Flatten())
        self._model.add(Dense(1024, activation='relu'))
        self._model.add(Dropout(0.5))
        self._model.add(Dense(7, activation='softmax'))
        self._model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.0001, decay=1e-6),
                            metrics=['accuracy'])

    def train_model(self):
        print('Load train data...')
        train_generator = self._loader.prepare_train_data()
        print('Load validation data...')
        validation_generator = self._loader.prepare_validation_data()
        model_info = self._model.fit_generator(
            train_generator,
            steps_per_epoch=self._num_train // self._batch_size,
            epochs=self._num_epoch,
            validation_data=validation_generator,
            validation_steps=self._num_val // self._batch_size)

    def evaluate_model(self):
        print('Load validation data...')
        evaluate_generator = self._loader.prepare_validation_data()

        model_info = self._model.evaluate(evaluate_generator,
                                          steps=self._num_train // self._batch_size,
                                          batch_size=self._batch_size,
                                          epochs=self._num_epoch)
        return model_info

    def save_model(self):
        print('Save model...')
        self._model.save_weights(FecConfig.model_file_name)

    def load_model(self):
        print('Load model...')
        self._model.load_weights(FecConfig.model_file_name)

    def make_prediction(self, input_folder):
        processed_images = self._loader.prepare_data(input_folder)
        print('Predicting data...')

        results = []
        for processed_image in processed_images:
            for face_image in processed_image.face_images:
                result = self._model.predict(face_image)
                results.append(result)

        print('Save results...')
        self._loader.save_data(processed_images, results)