Пример #1
0
    def build_model(self, configs):
        timer = Timer()
        timer.start()

        FeaturesPerStep = configs['data']['sequence_length']
        TimeSteps = configs['training']['batch_size']

        for layer in configs['model']['layers']:
            if layer["type"] == "dense":
                neurons = TimeSteps

            else:
                neurons = layer['neurons'] if 'neurons' in layer else None

            dropout_rate = layer['rate'] if 'rate' in layer else None
            activation = layer['activation'] if 'activation' in layer else None
            return_seq = layer['return_seq'] if 'return_seq' in layer else None

            if 'input_timesteps' in layer and layer['input_timesteps'] == -1:
                input_timesteps = TimeSteps
                input_dim = FeaturesPerStep

            else:
                input_timesteps = layer[
                    'input_timesteps'] if 'input_timesteps' in layer else None
                input_dim = layer['input_dim'] if 'input_dim' in layer else None

            print("INPUT TIME STEPS ARE: ", input_timesteps,
                  "time steps and FEATURES ARE: ", input_dim)

            if layer['type'] == 'dense':
                self.model.add(Dense(neurons, activation=activation))
            if layer['type'] == 'lstm':
                self.model.add(
                    LSTM(neurons,
                         input_shape=(input_timesteps, input_dim),
                         return_sequences=return_seq))
            if layer['type'] == 'dropout':
                self.model.add(Dropout(dropout_rate))

        self.model.compile(loss=configs['model']['loss'],
                           optimizer=configs['model']['optimizer'])

        print('[Model] Model Compiled')
        print(self.model.summary())
        timer.stop()
    def train_generator ( self, data_gen, epochs, batch_size, steps_per_epoch, save_dir ) :
        timer = Timer()
        timer.start()
        #print('[Model] Training Started')
        #print('[Model] %s epochs, %s batch size, %s batches per epoch' % (epochs, batch_size, steps_per_epoch))

        save_fname = os.path.join(save_dir, '%s-e%s.h5' % (dt.datetime.now().strftime('%d%m%Y-%H%M%S'), str(epochs)))
        callbacks = [
            ModelCheckpoint(filepath=save_fname, monitor='loss', save_best_only=True)
        ]
        self.model.fit_generator(
            data_gen,
            steps_per_epoch=steps_per_epoch,
            epochs=epochs,
            callbacks=callbacks,
            workers=1
        )

        print('[Model] Training Completed. Model saved as %s' % save_fname)
        timer.stop()
    def train ( self, x, y, epochs, batch_size, save_dir ) :
        timer = Timer()
        timer.start()

        save_fname = os.path.join(save_dir, '%s-e%s.h5' % (dt.datetime.now().strftime('%d%m%Y-%H%M%S'), str(epochs)))
        callbacks = [
            EarlyStopping(monitor='val_loss', patience=2),
            ModelCheckpoint(filepath=save_fname, monitor='val_loss', save_best_only=True)
        ]
        self.model.fit(
            x,
            y,
            epochs=epochs,
            batch_size=batch_size,
            callbacks=callbacks
        )
        self.model.save(save_fname)

        print('[Model] Training Completed. Model saved as %s' % save_fname)
        timer.stop()