예제 #1
0
    def train_generator(self, data_gen, epochs, batch_size, steps_per_epoch,
                        save_dir):
        timer = Timer()
        timer.start()
        print('[Model] Training Started')
        print('[Model] %s epochs, %s batch size, %s batches per epoch' %
              (epochs, batch_size, steps_per_epoch))

        save_fname = os.path.join(
            save_dir, '%s-e%s.h5' %
            (dt.datetime.now().strftime('%d%m%Y-%H%M%S'), str(epochs)))
        callbacks = [
            ModelCheckpoint(filepath=save_fname,
                            monitor='loss',
                            save_best_only=True)
        ]
        self.model.fit_generator(
            data_gen,
            steps_per_epoch=
            steps_per_epoch,  # 开始新一轮训练之前从生成器产生的总步数(批次样本)通常等于datasize/batchsize
            epochs=epochs,  # 训练模型的迭代总轮数
            callbacks=callbacks,
            workers=1)

        print('[Model] Training Completed. Model saved as %s' % save_fname)
        timer.stop()
    def train_generator(self, data_gen, epochs, batch_size, steps_per_epoch,
                        save_dir):
        '''
		由data_gen数据产生器来,逐步产生训练数据,而不是一次性将数据读入到内存
		'''
        timer = Timer()
        timer.start()
        print('[Model] Training Started')
        print('[Model] %s epochs, %s batch size, %s batches per epoch' %
              (epochs, batch_size, steps_per_epoch))

        save_fname = os.path.join(
            save_dir, '%s-e%s.h5' %
            (dt.datetime.now().strftime('%d%m%Y-%H%M%S'), str(epochs)))
        callbacks = [
            ModelCheckpoint(filepath=save_fname,
                            monitor='loss',
                            save_best_only=True)
        ]
        self.model.fit_generator(data_gen,
                                 steps_per_epoch=steps_per_epoch,
                                 epochs=epochs,
                                 callbacks=callbacks,
                                 workers=1)

        print('[Model] Training Completed. Model saved as %s' % save_fname)
        timer.stop()
예제 #3
0
    def train_generator(self, data_gen, epochs, batch_size, steps_per_epoch,
                        save_dir):
        timer = Timer()
        timer.start()
        print('MODEL Out-of-Memory Training Started')
        print(
            f'MODEL {epochs} epochs, {batch_size} batch size, {steps_per_epoch} batches per epoch'
        )

        save_fname = os.path.join(
            save_dir, '%s-e%s.h5' %
            (dt.datetime.now().strftime('%d%m%Y-%H%M%S'), str(epochs)))

        callbacks = [
            ModelCheckpoint(filepath=save_fname,
                            monitor='loss',
                            save_best_only=True)
        ]

        self.model.fit_generator(data_gen,
                                 steps_per_epoch=steps_per_epoch,
                                 epochs=epochs,
                                 callbacks=callbacks,
                                 workers=1)

        print(
            f'MODEL Out-of-Memory Training Completed. Model saved as {save_fname}'
        )
        timer.stop()
예제 #4
0
    def train(self, X, y, epochs, batch_size, save_dir, logs):
        timer = Timer()
        timer.start()
        print('MODEL Training Started')
        print(f'MODEL {epochs} epochs, {batch_size} batch size')

        save_fname = os.path.join(
            save_dir,
            f'{dt.datetime.now().strftime("%d%m%Y-%H%M%S")}-e{epochs}.h5')

        callbacks = [
            EarlyStopping(monitor='val_loss', patience=2),
            ModelCheckpoint(filepath=save_fname,
                            monitor='val_loss',
                            save_best_only=True),
            TensorBoard(
                log_dir=
                f'{logs}/{dt.datetime.now().strftime("%d%m%Y-%H%M%S")}-e{epochs}'
            )
        ]

        self.model.fit(X,
                       y,
                       epochs=epochs,
                       batch_size=batch_size,
                       callbacks=callbacks,
                       shuffle=False)
        self.model.save(save_fname)

        print(f'MODEL Training Completed. Model saved as {save_fname}')
        timer.stop()
    def train(self, x, y, epochs, batch_size):
        timer = Timer()
        timer.start()
        print('[Model] Training Started')
        print('[Model] %s epochs, %s batch size' % (epochs, batch_size))

        # save_fname = 'saved_models/%s-e%s.h5' % (dt.datetime.now().strftime('%d%m%Y-%H%M%S'), str(epochs))

        #for sp500
        save_fname = 'saved_models/tracker.h5'

        #for tracker data
        # save_fname = 'saved_models/tracker.h5'

        callbacks = [
            EarlyStopping(monitor='val_loss', patience=2),
            ModelCheckpoint(filepath=save_fname,
                            monitor='val_loss',
                            save_best_only=True)
        ]
        self.model.fit(x,
                       y,
                       epochs=epochs,
                       batch_size=batch_size,
                       callbacks=callbacks)
        self.model.save(save_fname)

        print('[Model] Training Completed. Model saved as %s' % save_fname)
        timer.stop()
예제 #6
0
    def continue_training(self, epoch_start, model_fname, statistics_fname,
                          data_obj, configs):
        timer = Timer()
        timer.start()
        epochs = configs['training']['epochs']
        batch_size = configs['training']['batch_size']
        print('[Model] Training Continuation')
        print('[Model] %s epochs, %s batch size' % (epochs, batch_size))
        callbacks = [
            ModelCheckpoint(filepath=model_fname,
                            monitor='loss',
                            save_best_only=True),
            TensorBoard(log_dir=configs['training']['log_dir'],
                        histogram_freq=0,
                        batch_size=configs['training']['batch_size'],
                        write_graph=True,
                        write_grads=False,
                        write_images=False,
                        embeddings_freq=0,
                        embeddings_layer_names=None,
                        embeddings_metadata=None,
                        embeddings_data=None,
                        update_freq='epoch'),
            ReduceLROnPlateau(monitor='loss',
                              factor=0.5,
                              patience=5,
                              min_lr=1e-6,
                              verbose=1,
                              cooldown=0),
            # CyclicLR(base_lr=configs['model']['lr'],
            #          max_lr=0.1,
            #          step_size=8*(data_obj.train_samples-configs['data']['sequence_length'])/configs['training']['batch_size'],
            #          scale_fn=None,
            #          mode='triangular2'),
            TerminateOnNaN(),
            CSVLogger(statistics_fname, separator=',', append=True),
            EarlyStopping(monitor='loss', patience=10, verbose=1)
        ]
        try:
            self.model.fit_generator(data_obj.train,
                                     steps_per_epoch=None,
                                     epochs=configs['training']['epochs'],
                                     verbose=0,
                                     callbacks=callbacks,
                                     validation_data=data_obj.validation,
                                     validation_steps=None,
                                     shuffle=True,
                                     max_queue_size=100,
                                     workers=5,
                                     use_multiprocessing=False,
                                     initial_epoch=epoch_start)
        except KeyboardInterrupt:
            print(
                '[Model] Training Interrupted by Keyboard. Model saved as %s' %
                model_fname)
            timer.stop()
            return

        print('[Model] Training Completed. Model saved as %s' % model_fname)
        timer.stop()
예제 #7
0
    def build_model(self, configs):
        timer = Timer()
        timer.start()

        for layer in configs['model']['layers']:
            neurons = layer['neurons'] if 'neurons' in layer else None
            dropout_rate = layer['rate'] if 'rate' in layer else None
            activation = layer['activation'] if 'activation' in layer else None
            return_seq = layer['return_seq'] if 'return_seq' in layer else None
            input_timesteps = layer[
                'input_timesteps'] if 'input_timesteps' in layer else None
            input_dim = layer['input_dim'] if 'input_dim' in layer else None

            if layer['type'] == 'dense':
                self.model.add(Dense(neurons, activation=activation))
            if layer['type'] == 'lstm':
                self.model.add(
                    LSTM(neurons,
                         input_shape=(input_timesteps, input_dim),
                         return_sequences=return_seq))  #100,(49,2),
            if layer['type'] == 'dropout':
                self.model.add(Dropout(dropout_rate))

        self.model.compile(loss=configs['model']['loss'],
                           optimizer=configs['model']['optimizer'])

        print('[Model] Model Compiled')
        timer.stop()
예제 #8
0
    def train_generator(self, data_gen, epochs, batch_size, steps_per_epoch,
                        save_dir):
        timer = Timer()
        timer.start()
        print("[Model] Training Started")
        print("[Model] %s epochs, %s batch size, %s batches per epoch" %
              (epochs, batch_size, steps_per_epoch))

        save_fname = os.path.join(
            save_dir,
            "%s-e%s.h5" %
            (dt.datetime.now().strftime("%d%m%Y-%H%M%S"), str(epochs)),
        )
        callbacks = [
            ModelCheckpoint(filepath=save_fname,
                            monitor="loss",
                            save_best_only=True)
        ]
        self.model.fit(
            data_gen,
            steps_per_epoch=steps_per_epoch,
            epochs=epochs,
            callbacks=callbacks,
            workers=1,
        )
        print("[Model] Training Completed. Model saved as %s" % save_fname)
        timer.stop()
예제 #9
0
    def train(self, x, y, epochs, batch_size, save_dir):
        timer = Timer()
        timer.start()
        print("[Model] Training Started")
        print("[Model] %s epochs, %s batch size" % (epochs, batch_size))

        save_fname = os.path.join(
            save_dir,
            "%s-e%s.h5" %
            (dt.datetime.now().strftime("%d%m%Y-%H%M%S"), str(epochs)),
        )
        callbacks = [
            EarlyStopping(monitor="val_loss", patience=2),
            ModelCheckpoint(filepath=save_fname,
                            monitor="val_loss",
                            save_best_only=True),
        ]
        self.model.fit(x,
                       y,
                       epochs=epochs,
                       batch_size=batch_size,
                       callbacks=callbacks)
        self.model.save(save_fname)

        print("[Model] Training Completed. Model saved as %s" % save_fname)
        timer.stop()
예제 #10
0
 def build_one_by_one_model_ngut(self):
     timer = Timer()
     timer.start()
     Options.KerasEarlyStopping = False
     self.model.add(
         Dense(64,
               activation='sigmoid',
               input_dim=Options.InputFeatureSize,
               activity_regularizer=keras.regularizers.l2(0.01)))
     self.model.add(BatchNormalization())
     self.model.add(LeakyReLU())
     self.model.add(
         Dense(16, activity_regularizer=keras.regularizers.l2(0.01)))
     self.model.add(BatchNormalization())
     self.model.add(LeakyReLU())
     self.model.add(Dense(1))
     self.model.add(Activation('linear'))
     optimizer = RMSprop(lr=0.00050)
     self.model_loss = 'mae'  #'mean_squared_error', 'mae'
     self.model.compile(optimizer=optimizer,
                        loss=self.model_loss,
                        metrics=list(set(['mae', self.model_loss])))
     print('[Model] Model Compiled')
     print('[Model] Model Loss Function is %s.' % (self.model_loss))
     timer.stop()
     self.model.summary()
     return self.model
예제 #11
0
	def train(self, x, y, epochs, batch_size, save_dir,X_test,Y_test,saveName):
		timer = Timer()
		timer.start()
		print('[Model] Training Started')
		print('[Model] %s epochs, %s batch size' % (epochs, batch_size))
		#logdir = "logs/scalars/" + dt.datetime.now().strftime("%Y%m%d-%H%M%S")
		
		es = tf.keras.callbacks.EarlyStopping(monitor='val_loss', mode='min', patience=5)

		tensorboard_callback = TensorBoard(log_dir="logs/scalars/" + saveName+"v3" ,histogram_freq=1)
		save_fname = os.path.join(save_dir, '%s-e%s.h5' % (dt.datetime.now().strftime('%d%m%Y-%H%M%S'), str(epochs )+"testv3"))
		callbacks = [
			es,
			#EarlyStopping(monitor='val_loss', patience=25),
			ModelCheckpoint(filepath=save_fname, monitor='val_loss', save_best_only=True),
			tensorboard_callback
		]
		self.model.fit(
			x,
			y,
			epochs=epochs,
			batch_size=batch_size,
			callbacks=callbacks,
			validation_data=(X_test, Y_test)
		)
		self.model.save(save_fname)

		print('[Model] Training Completed. Model saved as %s' % save_fname)
		timer.stop()
    def train_generator(self, data_gen, epochs, batch_size, steps_per_epoch,
                        save_dir, configs):
        timer = Timer()
        timer.start()
        print('[Model] Training Started')
        print('[Model] %s epochs, %s batch size, %s batches per epoch' %
              (epochs, batch_size, steps_per_epoch))
        log_dir = "logs/fit/" + datetime.datetime.now().strftime(
            "%Y%m%d-%H%M%S")
        tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir,
                                                              histogram_freq=1)
        save_fname = os.path.join(
            save_dir, '%s-%s-e%s.h5' %
            (configs['data']['filename'],
             dt.datetime.now().strftime('%d%m%Y-%H%M%S'), str(epochs)))
        callbacks = [
            ModelCheckpoint(filepath=save_fname,
                            monitor='loss',
                            save_best_only=True), tensorboard_callback
        ]
        self.model.fit_generator(data_gen,
                                 steps_per_epoch=steps_per_epoch,
                                 epochs=epochs,
                                 callbacks=callbacks,
                                 workers=1)

        print('[Model] Training Completed. Model saved as %s' % save_fname)
        timer.stop()
예제 #13
0
 def build_one_by_one_model_test(self):
     timer = Timer()
     timer.start()
     Options.KerasEarlyStopping = False
     self.model.add(Dropout(0.2, input_shape=(Options.InputFeatureSize, )))
     self.model.add(
         Dense(60,
               input_dim=Options.InputFeatureSize,
               activation='sigmoid',
               kernel_initializer='glorot_uniform',
               bias_initializer='zeros'))  #CAREFUL SIGMOID , NO NORMAL INIT
     self.model.add(Dropout(0.2))
     #self.model.add(Dense(25, activation = 'relu'))
     self.model.add(Dense(1, activation='linear'))  #CAREFUL SIGMOID
     optimizer = RMSprop(lr=0.00050)
     #optimizer = RMSprop()
     #optimizer = SGD()
     #optimizer = Adam()
     self.model_loss = 'mae'  #'mean_squared_error', 'mae', 'logcosh'
     self.model.compile(optimizer=optimizer,
                        loss=self.model_loss,
                        metrics=list(
                            set([
                                'mae', 'mean_absolute_percentage_error',
                                'mean_squared_error', 'logcosh',
                                self.model_loss
                            ])))
     print('[Model] Model Compiled')
     print('[Model] Model Loss Function is %s.' % (self.model_loss))
     timer.stop()
     self.model.summary()
     return self.model
예제 #14
0
    def train_generator(self,
                        data_gen,
                        epochs,
                        batch_size,
                        steps_per_epoch,
                        save_dir,
                        validation_data=None,
                        validation_steps=None):
        timer = Timer()
        timer.start()
        print('[Model] Training Started')
        print('[Model] %s epochs, %s batch size, %s batches per epoch' %
              (epochs, batch_size, steps_per_epoch))

        save_fname = os.path.join(
            save_dir, '%s-e%s.h5' %
            (dt.datetime.now().strftime('%d%m%Y-%H%M%S'), str(epochs)))
        callbacks = [
            ModelCheckpoint(filepath=save_fname,
                            monitor='loss',
                            save_best_only=True)
        ]
        hist = self.model.fit_generator(data_gen,
                                        steps_per_epoch=steps_per_epoch,
                                        epochs=epochs,
                                        callbacks=callbacks,
                                        workers=1,
                                        validation_data=validation_data,
                                        validation_steps=validation_steps)

        print('[Model] Training Completed. Model saved as %s' % save_fname)
        timer.stop()
        return hist
예제 #15
0
    def fit_one_by_one(self, x_train_o, y_train_o, x_val_o, y_val_o,
                       batch_size, epochs, callbacks):
        timer = Timer()
        timer.start()
        print('[Model] Training Started')
        print('[Model] %s epochs, %s batch size' % (epochs, batch_size))
        print('[Model] Train Shape: %s' % str(x_train_o.shape))
        print('[Model] Validation Shape: %s' % str(x_val_o.shape))

        try:
            self.model.fit(x_train_o,
                           y_train_o,
                           epochs=epochs,
                           batch_size=batch_size,
                           validation_data=[x_val_o, y_val_o],
                           verbose=Options.KerasVerbose,
                           callbacks=callbacks)
        except KeyboardInterrupt:
            print('Keyboard interrupt.')
            pass

        save_fname = os.path.join(
            Options.KerasNNSaveDirectory,
            '%s-e%s.hdf5' % (dt.datetime.now().strftime('%Y%m%d-%H%M%S'),
                             str(len(self.model.history.epoch))))
        self.model.save(save_fname)
        print('[Model] Training Completed. Model saved as %s' % save_fname)
        timer.stop()
예제 #16
0
파일: model.py 프로젝트: BenfenYU/HelpPlay
	def train(self, x, y, epochs, batch_size, save_dir,history,x_test = None,y_test = None):
		timer = Timer()
		timer.start()
		print('[Model] Training Started')
		print('[Model] %s epochs, %s batch size' % (epochs, batch_size))
		
		save_fname = os.path.join(save_dir, '%s-e%s.h5' % (dt.datetime.now().strftime('%d%m%Y-%H%M%S'), str(epochs)))
		callbacks = [
			EarlyStopping(monitor='val_loss', patience=2),
			ModelCheckpoint(filepath=save_fname, monitor='val_loss', save_best_only=True),
			history
		]
		# fit的时候要填入交叉验证集和callback返回值
		self.model.fit(
			x,
			y,
			epochs=epochs,
			batch_size=batch_size,
			callbacks=callbacks,
			validation_data=(x_test,y_test)
		)
		self.model.save(save_fname)

		global newest_model
		newest_model = save_fname

		print('[Model] Training Completed. Model saved as %s' % save_fname)
		timer.stop()
예제 #17
0
    def train(self, x, y, epochs, batch_size, save_dir, x_val, y_val):
        timer = Timer()
        timer.start()
        print('[Model] Training Started')
        print('[Model] %s epochs, %s batch size' % (epochs, batch_size))

        validation_data = (x_val, y_val) if x_val and y_val else None
        save_fname = os.path.join(
            save_dir, '%s-e%s.h5' %
            (dt.datetime.now().strftime('%d%m%Y-%H%M%S'), str(epochs)))
        callbacks = [
            ModelCheckpoint(filepath=save_fname,
                            monitor='val_loss',
                            save_best_only=True)
        ]
        hist = self.model.fit(x,
                              y,
                              epochs=epochs,
                              batch_size=batch_size,
                              callbacks=callbacks,
                              validation_data=validation_data)
        self.model.save(save_fname)

        print('[Model] Training Completed. Model saved as %s' % save_fname)
        timer.stop()

        return hist
예제 #18
0
    def train_generator(self, data_gen, epochs, batch_size, steps_per_epoch,
                        save_dir):
        timer = Timer()
        timer.start()
        print('[Model] Training Started')
        print('[Model] %s epochs, %s batch size, %s batches per epoch' %
              (epochs, batch_size, steps_per_epoch))

        save_fname = os.path.join(
            save_dir, '%s-e%s.h5' %
            (dt.datetime.now().strftime('%Y%m%d-%H%M%S'), str(epochs)))

        callbacks = [
            #ModelCheckpoint(filepath=save_fname, monitor='loss', save_best_only=True),
            SignalStopping(doubleSignalExits=True,
                           verbose=Options.KerasVerbose)
        ]

        self.model.fit_generator(
            data_gen,
            #validation_data = [],
            steps_per_epoch=steps_per_epoch,
            epochs=epochs,
            callbacks=callbacks,
            #,workers=1
        )

        print('[Model] Training Completed. Model saved as %s' % save_fname)
        timer.stop()
예제 #19
0
파일: model.py 프로젝트: BenfenYU/HelpPlay
	def build_model(self, configs):
		timer = Timer()
		timer.start()

		for layer in configs['model']['layers']:
			neurons = layer['neurons'] if 'neurons' in layer else None
			dropout_rate = layer['rate'] if 'rate' in layer else None
			activation = layer['activation'] if 'activation' in layer else None
			return_seq = layer['return_seq'] if 'return_seq' in layer else None
			input_timesteps = layer['input_timesteps'] if 'input_timesteps' in layer else None
			input_dim = layer['input_dim'] if 'input_dim' in layer else None

			if layer['type'] == 'dense':
				# 加一个全连接层
				self.model.add(Dense(neurons, activation=activation))
			if layer['type'] == 'lstm':
				self.model.add(LSTM(neurons, input_shape=(input_timesteps, input_dim), return_sequences=return_seq))
			if layer['type'] == 'dropout':
				# dropout应用于输入
				self.model.add(Dropout(dropout_rate))

		# 编译模型,一般在建立模型的时候才会用到,主要是配置优化器、损失函数等等,在预测的时候不用编译了。
		# 编译的时候选择loss,优化器和callbacks的metrics
		self.model.compile(loss=configs['model']['loss'], optimizer=configs['model']['optimizer']
		,metrics=["accuracy","mae"])

		print('[Model] Model Compiled')
		timer.stop()
예제 #20
0
    def build_windowed_batch_LSTM_model(self):
        timer = Timer()
        timer.start()
        Options.WindowSequenceLength = 56
        Options.WindowShiftStep = 1
        Options.KerasEpochs = 30
        Options.KerasWindowedBatchSize = 400
        Options.KerasEarlyStopping = False

        #self.model.add(Reshape((Options.WindowSequenceLength, Options.InputFeatureSize), input_shape=(Options.WindowSequenceLength, Options.InputFeatureSize,)))
        self.model.add(
            LSTM(100,
                 input_shape=(Options.WindowSequenceLength,
                              Options.InputFeatureSize),
                 return_sequences=True))
        self.model.add(Dropout(0.2))
        self.model.add(LSTM(100, return_sequences=True))
        self.model.add(LSTM(100, return_sequences=False))
        self.model.add(Dropout(0.2))
        self.model.add(
            Dense(1, kernel_initializer='normal', activation='linear'))
        optimizer = RMSprop(lr=0.00050)
        #optimizer = Adam()
        self.model_loss = 'mean_squared_error'  #'mean_squared_error', 'mae'
        self.model.compile(optimizer=optimizer,
                           loss=self.model_loss,
                           metrics=list(set(['mae', self.model_loss])))
        print('[Model] Model Compiled')
        timer.stop()
        return self.model
예제 #21
0
    def build_windowed_batch_CONV_model(self):
        timer = Timer()
        timer.start()
        Options.KerasEpochs = 20
        Options.KerasWindowedBatchSize = 400

        #self.model.add(Reshape((Options.WindowSequenceLength, Options.InputFeatureSize), input_shape=(input_shape,)))
        self.model.add(
            Conv1D(100,
                   10,
                   activation='relu',
                   input_shape=(Options.WindowSequenceLength,
                                Options.InputFeatureSize)))
        self.model.add(Conv1D(100, 10, activation='relu'))
        self.model.add(MaxPooling1D(3))
        self.model.add(Conv1D(160, 10, activation='relu'))
        #self.model.add(Conv1D(160, 10, activation='relu'))
        self.model.add(GlobalAveragePooling1D())
        self.model.add(Dropout(0.5))
        self.model.add(Dense(1, activation='linear'))

        optimizer = RMSprop(lr=0.00050)
        #optimizer = Adam()
        self.model_loss = 'mae'  #'mean_squared_error', 'mae'
        self.model.compile(optimizer=optimizer,
                           loss=self.model_loss,
                           metrics=list(set(['mae', self.model_loss])))
        print('[Model] Model Compiled')
        timer.stop()
    def train_generator(self, train_loader, val_loader, epochs, batch_size,
                        steps_per_epoch, validation_steps, save_dir, log_dir):
        timer = Timer()
        timer.start()
        print('[Model] Training Started')
        print('[Model] %s epochs, %s batch size, %s batches per epoch' %
              (epochs, batch_size, steps_per_epoch))

        time_format = time.localtime(time.time())
        model_fname = 'model-%s-e{epoch:02d}-{val_loss:.5f}.h5' % (
            time.strftime('%Y%m%d%H%M%S', time_format))
        model_save_path = os.path.join(save_dir, model_fname)
        lr_schedule = lambda epoch: 0.001 * 0.95**epoch
        learning_rate = np.array([lr_schedule(i) for i in range(epochs)])
        callbacks = [
            ModelCheckpoint(filepath=model_save_path,
                            monitor='val_loss',
                            save_best_only=False),
            LearningRateScheduler(lambda epoch: float(learning_rate[epoch])),
            EarlyStopping(monitor='val_loss', patience=2, verbose=1),
            TensorBoard(log_dir=log_dir, write_graph=True)
        ]
        self.model.fit_generator(train_loader,
                                 steps_per_epoch=steps_per_epoch,
                                 epochs=epochs,
                                 validation_data=val_loader,
                                 validation_steps=validation_steps,
                                 callbacks=callbacks)
        print('[Model] Training Completed. Model saved as %s' %
              model_save_path)
        timer.stop()
예제 #23
0
    def train(self, x, y, epochs, batch_size, save_dir):
        timer = Timer()
        timer.start()
        logger.info('[Model] Training Started')
        logger.info('[Model] %s epochs, %s batch size' % (epochs, batch_size))

        save_fname = os.path.join(
            save_dir, '%s-e%s.h5' %
            (dt.datetime.now().strftime('%d%m%Y-%H%M%S'), str(epochs)))
        callbacks = [
            EarlyStopping(monitor='val_loss', patience=2),
            ModelCheckpoint(filepath=save_fname,
                            monitor='val_loss',
                            save_best_only=True)
        ]
        self.model.fit(x,
                       y,
                       epochs=epochs,
                       batch_size=batch_size,
                       callbacks=callbacks)
        self.model.save(save_fname)

        logger.info('[Model] Training Completed. Model saved as %s' %
                    save_fname)
        timer.stop()
예제 #24
0
    def build_model(self, configs):
        timer = Timer()
        timer.start()

        for layer in configs["model"]["layers"]:
            neurons = layer["neurons"] if "neurons" in layer else None
            dropout_rate = layer["rate"] if "rate" in layer else None
            activation = layer["activation"] if "activation" in layer else None
            return_seq = layer["return_seq"] if "return_seq" in layer else None
            input_timesteps = (layer["input_timesteps"]
                               if "input_timesteps" in layer else None)
            input_dim = layer["input_dim"] if "input_dim" in layer else None

            if layer["type"] == "dense":
                self.model.add(Dense(neurons, activation=activation))
            if layer["type"] == "lstm":
                self.model.add(
                    LSTM(
                        neurons,
                        input_shape=(input_timesteps, input_dim),
                        return_sequences=return_seq,
                    ))
            if layer["type"] == "dropout":
                self.model.add(Dropout(dropout_rate))

        self.model.compile(loss=configs["model"]["loss"],
                           optimizer=configs["model"]["optimizer"])

        print("[Model] Model Compiled")
        timer.stop()
예제 #25
0
    def build_model(self, configs):
        timer = Timer()
        timer.start()

        for i, layer in enumerate(configs['model']['layers']):
            neurons = layer['neurons'] if 'neurons' in layer else None
            dropout_rate = layer['rate'] if 'rate' in layer else None
            activation = layer['activation'] if 'activation' in layer else None
            return_seq = layer['return_seq'] if 'return_seq' in layer else None
            input_timesteps = configs['data'][
                'sequence_length'] if 'input_timesteps' in layer else None
            input_dim = layer['input_dim'] if 'input_dim' in layer else None

            if layer['type'] == 'dense':
                self.model.add(Dense(neurons, activation=activation))
            if layer['type'] == 'lstm':
                self.model.add(
                    LSTM(neurons,
                         input_shape=(input_timesteps, input_dim),
                         return_sequences=return_seq))
            if layer['type'] == 'dropout':
                self.model.add(Dropout(dropout_rate))
        optimizer = Adam(lr=configs['model']['lr'])
        self.model.compile(loss=configs['model']['loss'], optimizer=optimizer)

        print('[Model] Model Compiled')
        print(self.model.summary())
        timer.stop()
예제 #26
0
	def train_generator(self, data_gen, epochs, batch_size, steps_per_epoch, save_dir):
		timer = Timer()
		timer.start()
		print('[Model] Training Started')
		print('[Model] %s epochs, %s batch size, %s batches per epoch' % (epochs, batch_size, steps_per_epoch))

		logdir = "logs/scalars/" + dt.datetime.now().strftime("%Y%m%d-%H%M%S")
		tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir)

		tensorboard_callback = TensorBoard(log_dir=".logs")
		#es = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=2,restore_best_weights=True)
		es = tf.keras.callbacks.EarlyStopping(monitor='loss', mode='min', verbose=1, patience=5)

		save_fname = os.path.join(save_dir, '%s-e%s.h5' % (dt.datetime.now().strftime('%d%m%Y-%H%M%S'), str(epochs)))
		callbacks = [
			ModelCheckpoint(filepath=save_fname, monitor='val_loss', save_best_only=True),
			tensorboard_callback,
			es
		]
		self.model.fit_generator(
			data_gen,
			steps_per_epoch=steps_per_epoch,
			epochs=epochs,
			callbacks=callbacks,
			workers=1
		)
		
		print('[Model] Training Completed. Model saved as %s' % save_fname)
		timer.stop()
예제 #27
0
    def continue_training(self, epoch_start, model_fname, statistics_fname,
                          data_obj, configs):
        timer = Timer()
        timer.start()
        epochs = configs['training']['epochs']
        batch_size = configs['training']['batch_size']
        print('[Model] Training Continuation')
        print('[Model] %s epochs, %s batch size' % (epochs, batch_size))
        callbacks = [
            ModelCheckpoint(filepath=model_fname,
                            monitor='loss',
                            save_best_only=True),
            TensorBoard(log_dir=configs['training']['log_dir'],
                        histogram_freq=5,
                        batch_size=configs['training']['batch_size'],
                        write_graph=True,
                        write_grads=False,
                        write_images=False,
                        embeddings_freq=0,
                        embeddings_layer_names=None,
                        embeddings_metadata=None,
                        embeddings_data=None,
                        update_freq='epoch'),
            ReduceLROnPlateau(monitor='loss',
                              factor=0.5,
                              patience=5,
                              min_lr=1e-6,
                              verbose=1,
                              cooldown=0),
            TerminateOnNaN(),
            CSVLogger(statistics_fname, separator=',', append=True),
            EarlyStopping(
                monitor='loss',
                patience=10,
                verbose=1,
                min_delta=1e-8,
            )
        ]
        try:
            self.model.fit(x=data_obj.train[0],
                           y=data_obj.train[1],
                           batch_size=configs['training']['batch_size'],
                           epochs=configs['training']['epochs'],
                           validation_data=data_obj.validation,
                           shuffle=True,
                           steps_per_epoch=None,
                           validation_steps=None,
                           callbacks=callbacks,
                           initial_epoch=epoch_start)
        except KeyboardInterrupt:
            print(
                '[Model] Training Interrupted by Keyboard. Model saved as %s' %
                model_fname)
            timer.stop()
            return

        print('[Model] Training Completed. Model saved as %s' % model_fname)
        timer.stop()
예제 #28
0
    def train(self, data_obj, configs):
        timer = Timer()
        timer.start()
        epochs = configs['training']['epochs']
        batch_size = configs['training']['batch_size']
        save_dir = configs['model']['save_dir']
        print('[Model] Training Started')
        print('[Model] %s epochs, %s batch size' % (epochs, batch_size))
        timestamp = dt.datetime.now().strftime('%d%m%Y-%H%M%S')
        model_fname = os.path.join(save_dir,
                                   '%s-e%s.h5' % (timestamp, str(epochs)))
        statistics_fname = os.path.join(
            save_dir, '%s-e%s.log' % (timestamp, str(epochs)))
        callbacks = [
            ModelCheckpoint(filepath=model_fname,
                            monitor='loss',
                            save_best_only=True),
            TensorBoard(log_dir=configs['training']['log_dir'],
                        histogram_freq=5,
                        batch_size=configs['training']['batch_size'],
                        write_graph=True,
                        write_grads=False,
                        write_images=False,
                        embeddings_freq=0,
                        embeddings_layer_names=None,
                        embeddings_metadata=None,
                        embeddings_data=None,
                        update_freq='epoch'),
            # ReduceLROnPlateau(monitor='val_loss',
            #                   factor=0.2,
            #                   patience=4,
            #                   min_lr=1e-6,
            #                   verbose=1,
            #                   cooldown=0),
            CyclicLR(
                base_lr=configs['model']['lr'],
                max_lr=0.1,
                step_size=8 *
                (data_obj.train_samples - configs['data']['sequence_length']) /
                configs['training']['batch_size'],
                scale_fn=None,
                mode='triangular2'),
            TerminateOnNaN(),
            CSVLogger(statistics_fname, separator=',', append=True),
            EarlyStopping(monitor='val_loss', patience=10, verbose=1)
        ]

        self.model.fit(x=data_obj.X_train,
                       y=data_obj.y_train,
                       batch_size=configs['training']['batch_size'],
                       epochs=configs['training']['epochs'],
                       validation_split=configs['data']['validation_portion'],
                       shuffle=True,
                       steps_per_epoch=None,
                       validation_steps=None,
                       callbacks=callbacks)
        print('[Model] Training Completed. Model saved as %s' % model_fname)
        timer.stop()
예제 #29
0
 def predict_point_by_point(self, data):
     #Predict each timestep given the last sequence of true data, in effect only predicting 1 step ahead each time
     timer = Timer()
     timer.start()
     logger.info('[Model] Predicting Point-by-Point...')
     predicted = self.model.predict(data)
     predicted = np.reshape(predicted, (predicted.size, ))
     timer.stop()
     return predicted
예제 #30
0
    def build_cnn(self, configs):
        timer = Timer()
        timer.start()

        for layer in configs['cnn_model']['layers']:
            neurons = layer['neurons'] if 'neurons' in layer else None
            dropout_rate = layer['rate'] if 'rate' in layer else None
            activation = layer['activation'] if 'activation' in layer else None
            return_seq = layer['return_seq'] if 'return_seq' in layer else None
            input_timesteps = layer['input_timesteps'] if 'input_timesteps' in layer else None
            input_dim = layer['input_dim'] if 'input_dim' in layer else None