Exemplo n.º 1
0
class MobileNetModel:
    def __init__(self, data_X, data_y):
        self.n_class = int(data_y.shape[0])
        self.model = None
        self._create_architecture(data_X, data_y)

    def _create_architecture(self, data_X, data_y):
        self.model = MobileNet(include_top=False,
                               weights=None,
                               input_tensor=None,
                               input_shape=list(
                                   [int(_) for _ in data_X.shape[-3:]]),
                               pooling=None)
        self.model.load_weights('./weights/mobilenet_1_0_224_tf_no_top.h5')
        """ Freeze the previous layers """
        for layer in self.model.layers:
            layer.trainable = False
        """ By Setting top to False, we need to add our own classification layers """
        # The model documentation notes that this is the size of the classification block
        x = GlobalAveragePooling2D()(self.model.output)
        # let's add a fully-connected layer
        x = Dense(1024, activation='relu')(x)
        x = Dropout(x, rate=0.5)
        # and a logistic layer -- let's say we have 200 classes
        x = Dense(int(data_y.shape[1]),
                  activation='softmax',
                  name='predictions')(x)
        # create graph of your new model
        self.model = Model(inputs=self.model.inputs,
                           outputs=x,
                           name='MobileNet')

        self.model.compile(optimizer=tf.train.AdamOptimizer(),
                           loss='categorical_crossentropy',
                           metrics=['accuracy', 'mean_squared_error'])

    def train(self, train_generator, validation_generator):
        print('Training Model')
        # fits the model on batches with real-time data augmentation:
        self.model.fit_generator(train_generator,
                                 steps_per_epoch=1,
                                 epochs=20,
                                 validation_steps=1,
                                 validation_data=validation_generator,
                                 verbose=1)
Exemplo n.º 2
0
def temporal_convolutional_network(data, settings):
    """Creates a Temporal Convolutional Network model (TCN) and predictions.

        Args:
            data: pandas.DataFrame.
            settings: Dictionary object containing settings parameters.
        Returns:
            A dictionary containing the TCN model and predictions.
        """

    #  TRAIN DATA GENERATOR
    train_generator = create_generator(data['train'],
                                       settings['morph'],
                                       shuffle=True)
    #  TRAIN DATA GENERATOR
    test_generator = create_generator(data['test'],
                                      settings['morph'],
                                      shuffle=False)

    #  INSTANTIATE KERAS TENSOR INPUT WITH TIMESERIESGENEREATOR SHAPE
    model_input = Input(batch_shape=train_generator[0][0].shape)

    #  INSTANTIATE MODEL LAYERS
    model_output = add_tcn_layers(model_input, settings)

    #  INSTANTIATE MODEL AND ASSIGN INPUT AND OUTPUT
    model = Model(inputs=[model_input], outputs=[model_output])

    # COMPILE THE MODEL
    model.compile(optimizer=settings['optimizer'], loss=settings['loss'])

    #  PRINT MODEL STATS
    tcn_full_summary(model, expand_residual_blocks=False)

    #  TRAIN THE MODEL WITH VALIDATION
    model.fit_generator(train_generator,
                        steps_per_epoch=len(train_generator),
                        epochs=settings['epochs'],
                        verbose=0)

    #  PREDICT USING TEST DATA
    predictions = model.predict(test_generator)

    return {'model': model, 'predictions': predictions}
Exemplo n.º 3
0
train_generator = train_datagen.flow_from_directory(train_dir,
                                                    batch_size = 20,
                                                    class_mode = 'binary', 
                                                    target_size = (150, 150))     

# Flows validation images in batches of 20 using test_datagen generator
validation_generator =  test_datagen.flow_from_directory( validation_dir,
                                                          batch_size  = 20,
                                                          class_mode  = 'binary', 
                                                          target_size = (150, 150))


history = model.fit_generator(
            train_generator,
            validation_data = validation_generator,
            steps_per_epoch = 100,
            epochs = 20,
            validation_steps = 50,
            verbose = 2)


import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(len(acc))

plt.plot(epochs, acc, 'r', label='Training accuracy')
plt.plot(epochs, val_acc, 'b', label='Validation accuracy')
    log("Model restored")
    eval_globals.best_video_level_accuracy_1 = float(
        zip_file_name.split("-")[1])
    log("Current Best", eval_globals.best_video_level_accuracy_1)

    saver.restore(tf.keras.backend.get_session(),
                  checkpoints)  # use tensorflow saver
    initial_epoch = int(zip_file_name.split("-")[0])  # get epoch number
else:
    # init the model from scratch, it's already done
    log("Starting from scratch")
    # expected input data shape: (batch_size, timesteps, data_dim)
    recurrent_fusion_model.summary()
    initial_epoch = 0

# training
recurrent_fusion_model.fit_generator(
    train_generator(),
    epochs=epochs,
    steps_per_epoch=(num_training_samples + batch_size - 1) // batch_size,
    validation_data=test_generator(),
    validation_steps=(num_testing_samples + batch_size - 1) // batch_size,
    callbacks=[
        saver_callback(),
        keras.callbacks.ReduceLROnPlateau(monitor='val_loss',
                                          patience=50,
                                          verbose=1,
                                          min_lr=lr / 10)
    ],
    initial_epoch=initial_epoch)
Exemplo n.º 5
0
class EncDecModel:
    def __init__(self):
        # Definition of hyper parameter, data sources and other class variables
        self.embedding_dim = 3
        self.lstm_hidden_dim = self.embedding_dim
        self.max_decoder_length = 25
        self.epochs = 10
        self.data_sequence = DataPreprocessor(64, train=True, enc_dec=True, pad_to=self.max_decoder_length)
        self.data_sequence.tokenizer.save_vocab()
        self.val_sequence = DataPreprocessor(64, train=False, enc_dec=True, pad_to=self.max_decoder_length)
        self.history = None
        self.model_path: str = None
        self.model: KerasModel = None

    def build(self):
        # Embedding-layer to transform input into 3D-space.
        input_embedding = Embedding(self.data_sequence.vocab_size(), self.embedding_dim)

        # Inputs
        encoder_inputs = Input(shape=(None,))
        encoder_inputs_emb = input_embedding(encoder_inputs)

        # Encoder LSTM
        encoder = LSTM(self.lstm_hidden_dim, return_state=True)
        encoder_outputs, state_h, state_c = encoder(encoder_inputs_emb)
        state = [state_h, state_c]  # state will be used to initialize the decoder

        # Start vars (emulates a constant input)
        def constant(input_batch, size):
            batch_size = K.shape(input_batch)[0]
            return K.tile(K.ones((1, size)), (batch_size, 1))

        decoder_in = Lambda(constant, arguments={'size': self.embedding_dim})(encoder_inputs_emb)  # "start word"

        # Definition of further layers to be used in the model (decoder and mapping to vocab-sized vector)
        decoder_lstm = LSTM(self.lstm_hidden_dim, return_sequences=False, return_state=True)
        decoder_dense = Dense(self.data_sequence.vocab_size(), activation='softmax')

        chars = []  # Container for single results during the loop
        for i in range(self.max_decoder_length):
            # Reshape necessary to match LSTMs interface, cell state will be reintroduced in the next iteration
            decoder_in = Reshape((1, self.embedding_dim))(decoder_in)
            decoder_in, hidden_state, cell_state = decoder_lstm(decoder_in, initial_state=state)
            state = [hidden_state, cell_state]

            # Mapping
            decoder_out = decoder_dense(decoder_in)

            # Reshaping and storing for later concatenation
            char = Reshape((1, self.data_sequence.vocab_size()))(decoder_out)
            chars.append(char)

            # Teacher forcing. During training the original input will be used as input to the decoder
            decoder_in_train = Lambda(lambda x, ii: x[:, -ii], arguments={'ii': i+1})(encoder_inputs_emb)
            decoder_in = Lambda(lambda x, y: K.in_train_phase(y, x), arguments={'y': decoder_in_train})(decoder_in)

        # Single results are joined together (axis 1 vanishes)
        decoded_seq = Concatenate(axis=1)(chars)

        self.model = Model(encoder_inputs, decoded_seq, name="enc_dec")
        self.model.compile(optimizer='adam', loss='categorical_crossentropy')
        self.model.summary()

        try:
            file_name = 'enc_dec_model'
            plot_model(self.model, to_file=f'{file_name}.png', show_shapes=True)
            print(f"Model built. Saved {file_name}.png\n")
        except (ImportError, FileNotFoundError):
            print(f"Skipping plotting of model due to missing dependencies.")

    def train(self, path: str = None):
        self.model_path = path or f"enc_dec_models/model_emb{self.embedding_dim}_epochs{self.epochs}.hdf5"
        checkpoint = ModelCheckpoint(f"enc_dec_models/checkpoint_emb{self.embedding_dim}_epochs" + '{epoch:02d}.hdf5',
                                     verbose=1)
        # it is currently not possible to save a model with lambda layers/expressions
        # see: https://github.com/keras-team/keras/issues/8343
        # saving the model raises the pickling error

        # self.data_sequence - in this case y is one hot encoded with vocab size
        self.history = self.model.fit_generator(self.data_sequence,
                                                # callbacks=[checkpoint],
                                                epochs=self.epochs,
                                                shuffle=True,
                                                validation_data=self.val_sequence)
        self.model.save(self.model_path)

    def predict(self, data: List[str] = None, model_path: str = None):
        if self.model is None and model_path is not None:
            print(f"Loading model from {model_path}.")
            self.model = load_model(model_path)
            self.data_sequence.tokenizer = Tokenizer.from_vocab()
        elif self.model is None:
            print(f"No model file provided. Training new model.")
            self.build()
            self.train()

        pred_sequence = PredictionSequence(self.data_sequence, data)
        predictions = self.model.predict_generator(pred_sequence, steps=len(pred_sequence))
        for index, sample in enumerate(pred_sequence.samples):
            prediction = [K.argmax(char) for char in predictions[index]]
            print(f"Predicted for sample {sample}: {prediction}")
                  metrics=['accuracy'])

    from tensorflow.python.keras.utils import plot_model
    plot_model(model,
               to_file='keras-logos-gen-xception-model.png',
               show_shapes=True,
               show_layer_names=True)

    model.summary()

    tensorboard = TensorBoard(log_dir='./tensorboard-logs',
                              histogram_freq=1,
                              write_graph=False)

    model.fit_generator(train_dir_iterator,
                        epochs=EPOCHS,
                        validation_data=val_dir_iterator,
                        callbacks=[tensorboard])

    # unfreeze all layers for more training
    for layer in model.layers:
        layer.trainable = True

    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    model.fit_generator(train_dir_iterator,
                        epochs=EPOCHS,
                        validation_data=val_dir_iterator,
                        callbacks=[tensorboard])