Example #1
0
    def build_model(self, configs):
        timer = Timer()
        timer.start()

        for layer in configs['model']['layers']:
            neurons = layer['neurons'] if 'neurons' in layer else None
            dropout_rate = layer['rate'] if 'rate' in layer else None
            activation = layer['activation'] if 'activation' in layer else None
            return_seq = layer['return_seq'] if 'return_seq' in layer else None
            input_timesteps = layer[
                'input_timesteps'] if 'input_timesteps' in layer else None
            input_dim = layer['input_dim'] if 'input_dim' in layer else None

            if layer['type'] == 'dense':
                self.model.add(Dense(neurons, activation=activation))
            if layer['type'] == 'lstm':
                self.model.add(
                    LSTM(neurons,
                         input_shape=(input_timesteps, input_dim),
                         return_sequences=return_seq))
            if layer['type'] == 'dropout':
                self.model.add(Dropout(dropout_rate))

        self.model.compile(loss=configs['model']['loss'],
                           optimizer=configs['model']['optimizer'])

        print(f'{timer.stop()} Model Compiled')
Example #2
0
    def build_cnn(self, configs):
        timer = Timer()
        timer.start()

        for layer in configs['cnn_model']['layers']:
            neurons = layer['neurons'] if 'neurons' in layer else None
            dropout_rate = layer['rate'] if 'rate' in layer else None
            activation = layer['activation'] if 'activation' in layer else None
            return_seq = layer['return_seq'] if 'return_seq' in layer else None
            input_timesteps = layer['input_timesteps'] if 'input_timesteps' in layer else None
            input_dim = layer['input_dim'] if 'input_dim' in layer else None
Example #3
0
def generate_gics_sector(data: pd.DataFrame):
    """
    Append additional column with 2-digit GICS code

    :param data: Original Data Frame
    :return: None
    """

    print('Generating gics_sector column ...')
    timer = Timer().start()
    data.loc[:, 'gics_sector'] = data['gsubind'].apply(lambda x: str(x)[:2] if not pd.isna(x) else '')
    timer.stop()
Example #4
0
    def continue_training(self, epoch_start, model_fname, statistics_fname,
                          data_obj, configs):
        timer = Timer()
        timer.start()
        epochs = configs['training']['epochs']
        batch_size = configs['training']['batch_size']
        print('[Model] Training Continuation')
        print('[Model] %s epochs, %s batch size' % (epochs, batch_size))
        callbacks = [
            ModelCheckpoint(filepath=model_fname,
                            monitor='loss',
                            save_best_only=True),
            TensorBoard(log_dir=configs['training']['log_dir'],
                        histogram_freq=0,
                        batch_size=configs['training']['batch_size'],
                        write_graph=True,
                        write_grads=False,
                        write_images=False,
                        embeddings_freq=0,
                        embeddings_layer_names=None,
                        embeddings_metadata=None,
                        embeddings_data=None,
                        update_freq='epoch'),
            ReduceLROnPlateau(monitor='loss',
                              factor=0.5,
                              patience=5,
                              min_lr=1e-6,
                              verbose=1,
                              cooldown=0),
            # CyclicLR(base_lr=configs['model']['lr'],
            #          max_lr=0.1,
            #          step_size=8*(data_obj.train_samples-configs['data']['sequence_length'])/configs['training']['batch_size'],
            #          scale_fn=None,
            #          mode='triangular2'),
            TerminateOnNaN(),
            CSVLogger(statistics_fname, separator=',', append=True),
            EarlyStopping(monitor='loss', patience=10, verbose=1)
        ]
        try:
            self.model.fit_generator(data_obj.train,
                                     steps_per_epoch=None,
                                     epochs=configs['training']['epochs'],
                                     verbose=0,
                                     callbacks=callbacks,
                                     validation_data=data_obj.validation,
                                     validation_steps=None,
                                     shuffle=True,
                                     max_queue_size=100,
                                     workers=5,
                                     use_multiprocessing=False,
                                     initial_epoch=epoch_start)
        except KeyboardInterrupt:
            print(
                '[Model] Training Interrupted by Keyboard. Model saved as %s' %
                model_fname)
            timer.stop()
            return

        print('[Model] Training Completed. Model saved as %s' % model_fname)
        timer.stop()
Example #5
0
    def continue_training(self, epoch_start, model_fname, statistics_fname,
                          data_obj, configs):
        timer = Timer()
        timer.start()
        epochs = configs['training']['epochs']
        batch_size = configs['training']['batch_size']
        print('[Model] Training Continuation')
        print('[Model] %s epochs, %s batch size' % (epochs, batch_size))
        callbacks = [
            ModelCheckpoint(filepath=model_fname,
                            monitor='loss',
                            save_best_only=True),
            TensorBoard(log_dir=configs['training']['log_dir'],
                        histogram_freq=5,
                        batch_size=configs['training']['batch_size'],
                        write_graph=True,
                        write_grads=False,
                        write_images=False,
                        embeddings_freq=0,
                        embeddings_layer_names=None,
                        embeddings_metadata=None,
                        embeddings_data=None,
                        update_freq='epoch'),
            ReduceLROnPlateau(monitor='loss',
                              factor=0.5,
                              patience=5,
                              min_lr=1e-6,
                              verbose=1,
                              cooldown=0),
            TerminateOnNaN(),
            CSVLogger(statistics_fname, separator=',', append=True),
            EarlyStopping(
                monitor='loss',
                patience=10,
                verbose=1,
                min_delta=1e-8,
            )
        ]
        try:
            self.model.fit(x=data_obj.train[0],
                           y=data_obj.train[1],
                           batch_size=configs['training']['batch_size'],
                           epochs=configs['training']['epochs'],
                           validation_data=data_obj.validation,
                           shuffle=True,
                           steps_per_epoch=None,
                           validation_steps=None,
                           callbacks=callbacks,
                           initial_epoch=epoch_start)
        except KeyboardInterrupt:
            print(
                '[Model] Training Interrupted by Keyboard. Model saved as %s' %
                model_fname)
            timer.stop()
            return

        print('[Model] Training Completed. Model saved as %s' % model_fname)
        timer.stop()
Example #6
0
def predict_time_left(x : np.ndarray, y : np.ndarray, name : str, mode : str = "BAYESIAN") -> int:
    functions = {
        "OLS":OLS,
        "RANSAC":RANSAC,
        "BAYESIAN":BAYESIAN
    }

    # Reshape the array as a single feature array for the predictors
    x = x.reshape(-1, 1)

    if mode in functions.keys():
        logger.info("Predicting using the mode [%s]"%mode)
        with Timer("The prediction took {time}s"):
            m, q, p =  functions[mode](x, y)
            logger.info("The coefficents predicted are m [{m}] q[{q}]".format(**locals()))
        if m <= 0:
            logger.info("The predicted line is not growing so it will never reach the max")
            return "inf", p
        time_predicted = (1 - q)/m
        delta = time_predicted - x[-1]
        if delta > MAX_TIME:
            logger.info(f"The predicted time [{delta}] is over [{MAX_TIME}] so it's casted to [inf]")
            return "inf", p
        return delta[0], p
    logger.error("Mode [%s] not found, the available ones are %s"%(mode, functions.keys()))
    return None, 0
Example #7
0
def append_columns(data: pd.DataFrame, folder_path: str, file_name: str, column_list: list):
    """
    Retroactively append columns to index data

    :param column_list: Columns to append
    :param data: Original DataFrame to append data to
    :param folder_path: Path to index data
    :param file_name: Name of file containing the additional columns
    :return: None
    """

    saved_index_cols = None
    if isinstance(data.index, pd.RangeIndex):
        pass
    elif isinstance(data.index, pd.MultiIndex) or isinstance(data.index, pd.Index):
        saved_index_cols = list(data.index.names)
        if saved_index_cols[0] is None:
            saved_index_cols = None

    # JOB: Load DataFrame with new columns
    new_col_df = pd.read_csv(os.path.join(ROOT_DIR, folder_path, file_name),
                             header=0, dtype={'gvkey': str})

    new_col_df.loc[:, 'datadate'] = pd.to_datetime(new_col_df['datadate'], format='%Y%m%d')
    new_col_df.set_index(['datadate', 'gvkey', 'iid'], inplace=True)

    # JOB: Filter by relevant columns
    new_col_df = new_col_df.loc[:, column_list]

    # JOB: Merge data with new columns

    timer = Timer().start()
    if saved_index_cols is None:
        pass
    else:
        data.reset_index(inplace=True, drop=False)
        print('Resetting existing index.')

    print('Merging full data set with new columns ...')
    data.set_index(['datadate', 'gvkey', 'iid'], inplace=True)

    data = data.merge(new_col_df, how='left', left_index=True, right_index=True)
    data.reset_index(inplace=True)
    timer.stop()

    # Save to file
    print('\nSaving appended DataFrame to file ...')
    timer = Timer().start()
    data.to_csv(os.path.join(ROOT_DIR, folder_path, 'index_data_constituents_test.csv'))
    timer.stop()

    if saved_index_cols is not None:
        data.set_index(saved_index_cols, inplace=True)
Example #8
0
    def fit_one_by_one(self, x_train_o, y_train_o, x_val_o, y_val_o,
                       batch_size, epochs, callbacks):
        timer = Timer()
        timer.start()
        print('[Model] Training Started')
        print('[Model] %s epochs, %s batch size' % (epochs, batch_size))
        print('[Model] Train Shape: %s' % str(x_train_o.shape))
        print('[Model] Validation Shape: %s' % str(x_val_o.shape))

        try:
            self.model.fit(x_train_o,
                           y_train_o,
                           epochs=epochs,
                           batch_size=batch_size,
                           validation_data=[x_val_o, y_val_o],
                           verbose=Options.KerasVerbose,
                           callbacks=callbacks)
        except KeyboardInterrupt:
            print('Keyboard interrupt.')
            pass

        save_fname = os.path.join(
            Options.KerasNNSaveDirectory,
            '%s-e%s.hdf5' % (dt.datetime.now().strftime('%Y%m%d-%H%M%S'),
                             str(len(self.model.history.epoch))))
        self.model.save(save_fname)
        print('[Model] Training Completed. Model saved as %s' % save_fname)
        timer.stop()
Example #9
0
    def build_windowed_batch_LSTM_model(self):
        timer = Timer()
        timer.start()
        Options.WindowSequenceLength = 56
        Options.WindowShiftStep = 1
        Options.KerasEpochs = 30
        Options.KerasWindowedBatchSize = 400
        Options.KerasEarlyStopping = False

        #self.model.add(Reshape((Options.WindowSequenceLength, Options.InputFeatureSize), input_shape=(Options.WindowSequenceLength, Options.InputFeatureSize,)))
        self.model.add(
            LSTM(100,
                 input_shape=(Options.WindowSequenceLength,
                              Options.InputFeatureSize),
                 return_sequences=True))
        self.model.add(Dropout(0.2))
        self.model.add(LSTM(100, return_sequences=True))
        self.model.add(LSTM(100, return_sequences=False))
        self.model.add(Dropout(0.2))
        self.model.add(
            Dense(1, kernel_initializer='normal', activation='linear'))
        optimizer = RMSprop(lr=0.00050)
        #optimizer = Adam()
        self.model_loss = 'mean_squared_error'  #'mean_squared_error', 'mae'
        self.model.compile(optimizer=optimizer,
                           loss=self.model_loss,
                           metrics=list(set(['mae', self.model_loss])))
        print('[Model] Model Compiled')
        timer.stop()
        return self.model
Example #10
0
 def build_one_by_one_model_ngut(self):
     timer = Timer()
     timer.start()
     Options.KerasEarlyStopping = False
     self.model.add(
         Dense(64,
               activation='sigmoid',
               input_dim=Options.InputFeatureSize,
               activity_regularizer=keras.regularizers.l2(0.01)))
     self.model.add(BatchNormalization())
     self.model.add(LeakyReLU())
     self.model.add(
         Dense(16, activity_regularizer=keras.regularizers.l2(0.01)))
     self.model.add(BatchNormalization())
     self.model.add(LeakyReLU())
     self.model.add(Dense(1))
     self.model.add(Activation('linear'))
     optimizer = RMSprop(lr=0.00050)
     self.model_loss = 'mae'  #'mean_squared_error', 'mae'
     self.model.compile(optimizer=optimizer,
                        loss=self.model_loss,
                        metrics=list(set(['mae', self.model_loss])))
     print('[Model] Model Compiled')
     print('[Model] Model Loss Function is %s.' % (self.model_loss))
     timer.stop()
     self.model.summary()
     return self.model
Example #11
0
 def build_one_by_one_model_test(self):
     timer = Timer()
     timer.start()
     Options.KerasEarlyStopping = False
     self.model.add(Dropout(0.2, input_shape=(Options.InputFeatureSize, )))
     self.model.add(
         Dense(60,
               input_dim=Options.InputFeatureSize,
               activation='sigmoid',
               kernel_initializer='glorot_uniform',
               bias_initializer='zeros'))  #CAREFUL SIGMOID , NO NORMAL INIT
     self.model.add(Dropout(0.2))
     #self.model.add(Dense(25, activation = 'relu'))
     self.model.add(Dense(1, activation='linear'))  #CAREFUL SIGMOID
     optimizer = RMSprop(lr=0.00050)
     #optimizer = RMSprop()
     #optimizer = SGD()
     #optimizer = Adam()
     self.model_loss = 'mae'  #'mean_squared_error', 'mae', 'logcosh'
     self.model.compile(optimizer=optimizer,
                        loss=self.model_loss,
                        metrics=list(
                            set([
                                'mae', 'mean_absolute_percentage_error',
                                'mean_squared_error', 'logcosh',
                                self.model_loss
                            ])))
     print('[Model] Model Compiled')
     print('[Model] Model Loss Function is %s.' % (self.model_loss))
     timer.stop()
     self.model.summary()
     return self.model
    def train_generator(self, data_gen, epochs, batch_size, steps_per_epoch,
                        save_dir):
        '''
		由data_gen数据产生器来,逐步产生训练数据,而不是一次性将数据读入到内存
		'''
        timer = Timer()
        timer.start()
        print('[Model] Training Started')
        print('[Model] %s epochs, %s batch size, %s batches per epoch' %
              (epochs, batch_size, steps_per_epoch))

        save_fname = os.path.join(
            save_dir, '%s-e%s.h5' %
            (dt.datetime.now().strftime('%d%m%Y-%H%M%S'), str(epochs)))
        callbacks = [
            ModelCheckpoint(filepath=save_fname,
                            monitor='loss',
                            save_best_only=True)
        ]
        self.model.fit_generator(data_gen,
                                 steps_per_epoch=steps_per_epoch,
                                 epochs=epochs,
                                 callbacks=callbacks,
                                 workers=1)

        print('[Model] Training Completed. Model saved as %s' % save_fname)
        timer.stop()
    def train(self, x, y, epochs, batch_size):
        timer = Timer()
        timer.start()
        print('[Model] Training Started')
        print('[Model] %s epochs, %s batch size' % (epochs, batch_size))

        # save_fname = 'saved_models/%s-e%s.h5' % (dt.datetime.now().strftime('%d%m%Y-%H%M%S'), str(epochs))

        #for sp500
        save_fname = 'saved_models/tracker.h5'

        #for tracker data
        # save_fname = 'saved_models/tracker.h5'

        callbacks = [
            EarlyStopping(monitor='val_loss', patience=2),
            ModelCheckpoint(filepath=save_fname,
                            monitor='val_loss',
                            save_best_only=True)
        ]
        self.model.fit(x,
                       y,
                       epochs=epochs,
                       batch_size=batch_size,
                       callbacks=callbacks)
        self.model.save(save_fname)

        print('[Model] Training Completed. Model saved as %s' % save_fname)
        timer.stop()
Example #14
0
	def build_model(self, configs):
		timer = Timer()
		timer.start()

		for layer in configs['model']['layers']:
			neurons = layer['neurons'] if 'neurons' in layer else None
			dropout_rate = layer['rate'] if 'rate' in layer else None
			activation = layer['activation'] if 'activation' in layer else None
			return_seq = layer['return_seq'] if 'return_seq' in layer else None
			input_timesteps = layer['input_timesteps'] if 'input_timesteps' in layer else None
			input_dim = layer['input_dim'] if 'input_dim' in layer else None

			if layer['type'] == 'dense':
				# 加一个全连接层
				self.model.add(Dense(neurons, activation=activation))
			if layer['type'] == 'lstm':
				self.model.add(LSTM(neurons, input_shape=(input_timesteps, input_dim), return_sequences=return_seq))
			if layer['type'] == 'dropout':
				# dropout应用于输入
				self.model.add(Dropout(dropout_rate))

		# 编译模型,一般在建立模型的时候才会用到,主要是配置优化器、损失函数等等,在预测的时候不用编译了。
		# 编译的时候选择loss,优化器和callbacks的metrics
		self.model.compile(loss=configs['model']['loss'], optimizer=configs['model']['optimizer']
		,metrics=["accuracy","mae"])

		print('[Model] Model Compiled')
		timer.stop()
Example #15
0
    def train(self, x, y, epochs, batch_size, save_dir):
        timer = Timer()
        timer.start()
        print("[Model] Training Started")
        print("[Model] %s epochs, %s batch size" % (epochs, batch_size))

        save_fname = os.path.join(
            save_dir,
            "%s-e%s.h5" %
            (dt.datetime.now().strftime("%d%m%Y-%H%M%S"), str(epochs)),
        )
        callbacks = [
            EarlyStopping(monitor="val_loss", patience=2),
            ModelCheckpoint(filepath=save_fname,
                            monitor="val_loss",
                            save_best_only=True),
        ]
        self.model.fit(x,
                       y,
                       epochs=epochs,
                       batch_size=batch_size,
                       callbacks=callbacks)
        self.model.save(save_fname)

        print("[Model] Training Completed. Model saved as %s" % save_fname)
        timer.stop()
    def train_generator(self, data_gen, epochs, batch_size, steps_per_epoch,
                        save_dir, configs):
        timer = Timer()
        timer.start()
        print('[Model] Training Started')
        print('[Model] %s epochs, %s batch size, %s batches per epoch' %
              (epochs, batch_size, steps_per_epoch))
        log_dir = "logs/fit/" + datetime.datetime.now().strftime(
            "%Y%m%d-%H%M%S")
        tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir,
                                                              histogram_freq=1)
        save_fname = os.path.join(
            save_dir, '%s-%s-e%s.h5' %
            (configs['data']['filename'],
             dt.datetime.now().strftime('%d%m%Y-%H%M%S'), str(epochs)))
        callbacks = [
            ModelCheckpoint(filepath=save_fname,
                            monitor='loss',
                            save_best_only=True), tensorboard_callback
        ]
        self.model.fit_generator(data_gen,
                                 steps_per_epoch=steps_per_epoch,
                                 epochs=epochs,
                                 callbacks=callbacks,
                                 workers=1)

        print('[Model] Training Completed. Model saved as %s' % save_fname)
        timer.stop()
Example #17
0
    def train_generator(self, data_gen, epochs, batch_size, steps_per_epoch,
                        save_dir):
        timer = Timer()
        timer.start()
        print('MODEL Out-of-Memory Training Started')
        print(
            f'MODEL {epochs} epochs, {batch_size} batch size, {steps_per_epoch} batches per epoch'
        )

        save_fname = os.path.join(
            save_dir, '%s-e%s.h5' %
            (dt.datetime.now().strftime('%d%m%Y-%H%M%S'), str(epochs)))

        callbacks = [
            ModelCheckpoint(filepath=save_fname,
                            monitor='loss',
                            save_best_only=True)
        ]

        self.model.fit_generator(data_gen,
                                 steps_per_epoch=steps_per_epoch,
                                 epochs=epochs,
                                 callbacks=callbacks,
                                 workers=1)

        print(
            f'MODEL Out-of-Memory Training Completed. Model saved as {save_fname}'
        )
        timer.stop()
Example #18
0
    def train(self, x, y, epochs, batch_size, save_dir):
        timer = Timer()
        timer.start()
        logger.info('[Model] Training Started')
        logger.info('[Model] %s epochs, %s batch size' % (epochs, batch_size))

        save_fname = os.path.join(
            save_dir, '%s-e%s.h5' %
            (dt.datetime.now().strftime('%d%m%Y-%H%M%S'), str(epochs)))
        callbacks = [
            EarlyStopping(monitor='val_loss', patience=2),
            ModelCheckpoint(filepath=save_fname,
                            monitor='val_loss',
                            save_best_only=True)
        ]
        self.model.fit(x,
                       y,
                       epochs=epochs,
                       batch_size=batch_size,
                       callbacks=callbacks)
        self.model.save(save_fname)

        logger.info('[Model] Training Completed. Model saved as %s' %
                    save_fname)
        timer.stop()
Example #19
0
def demo(net, image_name):
    """Detect object classes in an image using pre-computed object proposals."""

    # Load the demo image
    im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)
    ims = [cv2.imread(im_file)]

    # Detect all object classes and regress object bounds
    timer = Timer()
    timer.tic()
    batch_scores, batch_boxes = im_detect(net, ims)
    scores = batch_scores[0]
    boxes = batch_boxes[0]
    timer.toc()
    print('Detection took {:.3f}s for '
          '{:d} object proposals').format(timer.total_time, boxes.shape[0])

    # Visualize detections for each class
    CONF_THRESH = 0.8
    NMS_THRESH = 0.3
    for cls_ind, cls in enumerate(CLASSES[1:]):
        cls_ind += 1  # because we skipped background
        cls_scores = scores[:, cls_ind]
        cls_boxes = boxes
        dets = np.hstack(
            (cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)
        keep = nms(dets, NMS_THRESH, force_cpu=True)
        dets = dets[keep, :]
        vis_detections(ims[0], cls, dets, thresh=CONF_THRESH)
Example #20
0
    def train_generator(self, data_gen, epochs, batch_size, steps_per_epoch,
                        save_dir):
        timer = Timer()
        timer.start()
        print("[Model] Training Started")
        print("[Model] %s epochs, %s batch size, %s batches per epoch" %
              (epochs, batch_size, steps_per_epoch))

        save_fname = os.path.join(
            save_dir,
            "%s-e%s.h5" %
            (dt.datetime.now().strftime("%d%m%Y-%H%M%S"), str(epochs)),
        )
        callbacks = [
            ModelCheckpoint(filepath=save_fname,
                            monitor="loss",
                            save_best_only=True)
        ]
        self.model.fit(
            data_gen,
            steps_per_epoch=steps_per_epoch,
            epochs=epochs,
            callbacks=callbacks,
            workers=1,
        )
        print("[Model] Training Completed. Model saved as %s" % save_fname)
        timer.stop()
Example #21
0
    def build_windowed_batch_CONV_model(self):
        timer = Timer()
        timer.start()
        Options.KerasEpochs = 20
        Options.KerasWindowedBatchSize = 400

        #self.model.add(Reshape((Options.WindowSequenceLength, Options.InputFeatureSize), input_shape=(input_shape,)))
        self.model.add(
            Conv1D(100,
                   10,
                   activation='relu',
                   input_shape=(Options.WindowSequenceLength,
                                Options.InputFeatureSize)))
        self.model.add(Conv1D(100, 10, activation='relu'))
        self.model.add(MaxPooling1D(3))
        self.model.add(Conv1D(160, 10, activation='relu'))
        #self.model.add(Conv1D(160, 10, activation='relu'))
        self.model.add(GlobalAveragePooling1D())
        self.model.add(Dropout(0.5))
        self.model.add(Dense(1, activation='linear'))

        optimizer = RMSprop(lr=0.00050)
        #optimizer = Adam()
        self.model_loss = 'mae'  #'mean_squared_error', 'mae'
        self.model.compile(optimizer=optimizer,
                           loss=self.model_loss,
                           metrics=list(set(['mae', self.model_loss])))
        print('[Model] Model Compiled')
        timer.stop()
Example #22
0
    def train(self, x, y, epochs, batch_size, save_dir, x_val, y_val):
        timer = Timer()
        timer.start()
        print('[Model] Training Started')
        print('[Model] %s epochs, %s batch size' % (epochs, batch_size))

        validation_data = (x_val, y_val) if x_val and y_val else None
        save_fname = os.path.join(
            save_dir, '%s-e%s.h5' %
            (dt.datetime.now().strftime('%d%m%Y-%H%M%S'), str(epochs)))
        callbacks = [
            ModelCheckpoint(filepath=save_fname,
                            monitor='val_loss',
                            save_best_only=True)
        ]
        hist = self.model.fit(x,
                              y,
                              epochs=epochs,
                              batch_size=batch_size,
                              callbacks=callbacks,
                              validation_data=validation_data)
        self.model.save(save_fname)

        print('[Model] Training Completed. Model saved as %s' % save_fname)
        timer.stop()

        return hist
Example #23
0
    def train_generator(self, data_gen, epochs, batch_size, steps_per_epoch,
                        save_dir):
        timer = Timer()
        timer.start()
        print('[Model] Training Started')
        print('[Model] %s epochs, %s batch size, %s batches per epoch' %
              (epochs, batch_size, steps_per_epoch))

        save_fname = os.path.join(
            save_dir, '%s-e%s.h5' %
            (dt.datetime.now().strftime('%Y%m%d-%H%M%S'), str(epochs)))

        callbacks = [
            #ModelCheckpoint(filepath=save_fname, monitor='loss', save_best_only=True),
            SignalStopping(doubleSignalExits=True,
                           verbose=Options.KerasVerbose)
        ]

        self.model.fit_generator(
            data_gen,
            #validation_data = [],
            steps_per_epoch=steps_per_epoch,
            epochs=epochs,
            callbacks=callbacks,
            #,workers=1
        )

        print('[Model] Training Completed. Model saved as %s' % save_fname)
        timer.stop()
Example #24
0
    def build_model(self, configs):
        timer = Timer()
        timer.start()

        for layer in configs["model"]["layers"]:
            neurons = layer["neurons"] if "neurons" in layer else None
            dropout_rate = layer["rate"] if "rate" in layer else None
            activation = layer["activation"] if "activation" in layer else None
            return_seq = layer["return_seq"] if "return_seq" in layer else None
            input_timesteps = (layer["input_timesteps"]
                               if "input_timesteps" in layer else None)
            input_dim = layer["input_dim"] if "input_dim" in layer else None

            if layer["type"] == "dense":
                self.model.add(Dense(neurons, activation=activation))
            if layer["type"] == "lstm":
                self.model.add(
                    LSTM(
                        neurons,
                        input_shape=(input_timesteps, input_dim),
                        return_sequences=return_seq,
                    ))
            if layer["type"] == "dropout":
                self.model.add(Dropout(dropout_rate))

        self.model.compile(loss=configs["model"]["loss"],
                           optimizer=configs["model"]["optimizer"])

        print("[Model] Model Compiled")
        timer.stop()
Example #25
0
	def train(self, x, y, epochs, batch_size, save_dir,history,x_test = None,y_test = None):
		timer = Timer()
		timer.start()
		print('[Model] Training Started')
		print('[Model] %s epochs, %s batch size' % (epochs, batch_size))
		
		save_fname = os.path.join(save_dir, '%s-e%s.h5' % (dt.datetime.now().strftime('%d%m%Y-%H%M%S'), str(epochs)))
		callbacks = [
			EarlyStopping(monitor='val_loss', patience=2),
			ModelCheckpoint(filepath=save_fname, monitor='val_loss', save_best_only=True),
			history
		]
		# fit的时候要填入交叉验证集和callback返回值
		self.model.fit(
			x,
			y,
			epochs=epochs,
			batch_size=batch_size,
			callbacks=callbacks,
			validation_data=(x_test,y_test)
		)
		self.model.save(save_fname)

		global newest_model
		newest_model = save_fname

		print('[Model] Training Completed. Model saved as %s' % save_fname)
		timer.stop()
Example #26
0
    def train(self, X, y, epochs, batch_size, save_dir, logs):
        timer = Timer()
        timer.start()
        print('MODEL Training Started')
        print(f'MODEL {epochs} epochs, {batch_size} batch size')

        save_fname = os.path.join(
            save_dir,
            f'{dt.datetime.now().strftime("%d%m%Y-%H%M%S")}-e{epochs}.h5')

        callbacks = [
            EarlyStopping(monitor='val_loss', patience=2),
            ModelCheckpoint(filepath=save_fname,
                            monitor='val_loss',
                            save_best_only=True),
            TensorBoard(
                log_dir=
                f'{logs}/{dt.datetime.now().strftime("%d%m%Y-%H%M%S")}-e{epochs}'
            )
        ]

        self.model.fit(X,
                       y,
                       epochs=epochs,
                       batch_size=batch_size,
                       callbacks=callbacks,
                       shuffle=False)
        self.model.save(save_fname)

        print(f'MODEL Training Completed. Model saved as {save_fname}')
        timer.stop()
Example #27
0
    def train_generator(self,
                        data_gen,
                        epochs,
                        batch_size,
                        steps_per_epoch,
                        save_dir,
                        validation_data=None,
                        validation_steps=None):
        timer = Timer()
        timer.start()
        print('[Model] Training Started')
        print('[Model] %s epochs, %s batch size, %s batches per epoch' %
              (epochs, batch_size, steps_per_epoch))

        save_fname = os.path.join(
            save_dir, '%s-e%s.h5' %
            (dt.datetime.now().strftime('%d%m%Y-%H%M%S'), str(epochs)))
        callbacks = [
            ModelCheckpoint(filepath=save_fname,
                            monitor='loss',
                            save_best_only=True)
        ]
        hist = self.model.fit_generator(data_gen,
                                        steps_per_epoch=steps_per_epoch,
                                        epochs=epochs,
                                        callbacks=callbacks,
                                        workers=1,
                                        validation_data=validation_data,
                                        validation_steps=validation_steps)

        print('[Model] Training Completed. Model saved as %s' % save_fname)
        timer.stop()
        return hist
Example #28
0
    def build_model(self, configs):
        timer = Timer()
        timer.start()

        for i, layer in enumerate(configs['model']['layers']):
            neurons = layer['neurons'] if 'neurons' in layer else None
            dropout_rate = layer['rate'] if 'rate' in layer else None
            activation = layer['activation'] if 'activation' in layer else None
            return_seq = layer['return_seq'] if 'return_seq' in layer else None
            input_timesteps = configs['data'][
                'sequence_length'] if 'input_timesteps' in layer else None
            input_dim = layer['input_dim'] if 'input_dim' in layer else None

            if layer['type'] == 'dense':
                self.model.add(Dense(neurons, activation=activation))
            if layer['type'] == 'lstm':
                self.model.add(
                    LSTM(neurons,
                         input_shape=(input_timesteps, input_dim),
                         return_sequences=return_seq))
            if layer['type'] == 'dropout':
                self.model.add(Dropout(dropout_rate))
        optimizer = Adam(lr=configs['model']['lr'])
        self.model.compile(loss=configs['model']['loss'], optimizer=optimizer)

        print('[Model] Model Compiled')
        print(self.model.summary())
        timer.stop()
    def train_generator(self, train_loader, val_loader, epochs, batch_size,
                        steps_per_epoch, validation_steps, save_dir, log_dir):
        timer = Timer()
        timer.start()
        print('[Model] Training Started')
        print('[Model] %s epochs, %s batch size, %s batches per epoch' %
              (epochs, batch_size, steps_per_epoch))

        time_format = time.localtime(time.time())
        model_fname = 'model-%s-e{epoch:02d}-{val_loss:.5f}.h5' % (
            time.strftime('%Y%m%d%H%M%S', time_format))
        model_save_path = os.path.join(save_dir, model_fname)
        lr_schedule = lambda epoch: 0.001 * 0.95**epoch
        learning_rate = np.array([lr_schedule(i) for i in range(epochs)])
        callbacks = [
            ModelCheckpoint(filepath=model_save_path,
                            monitor='val_loss',
                            save_best_only=False),
            LearningRateScheduler(lambda epoch: float(learning_rate[epoch])),
            EarlyStopping(monitor='val_loss', patience=2, verbose=1),
            TensorBoard(log_dir=log_dir, write_graph=True)
        ]
        self.model.fit_generator(train_loader,
                                 steps_per_epoch=steps_per_epoch,
                                 epochs=epochs,
                                 validation_data=val_loader,
                                 validation_steps=validation_steps,
                                 callbacks=callbacks)
        print('[Model] Training Completed. Model saved as %s' %
              model_save_path)
        timer.stop()
Example #30
0
    def train_generator(self, data_gen, epochs, batch_size, steps_per_epoch,
                        save_dir):
        timer = Timer()
        timer.start()
        print('[Model] Training Started')
        print('[Model] %s epochs, %s batch size, %s batches per epoch' %
              (epochs, batch_size, steps_per_epoch))

        save_fname = os.path.join(
            save_dir, '%s-e%s.h5' %
            (dt.datetime.now().strftime('%d%m%Y-%H%M%S'), str(epochs)))
        callbacks = [
            ModelCheckpoint(filepath=save_fname,
                            monitor='loss',
                            save_best_only=True)
        ]
        self.model.fit_generator(
            data_gen,
            steps_per_epoch=
            steps_per_epoch,  # 开始新一轮训练之前从生成器产生的总步数(批次样本)通常等于datasize/batchsize
            epochs=epochs,  # 训练模型的迭代总轮数
            callbacks=callbacks,
            workers=1)

        print('[Model] Training Completed. Model saved as %s' % save_fname)
        timer.stop()