def load_model(self, model_name): self._model_name = model_name model_path = os.path.join(get_parent_dir(), 'data', 'models', model_name) self.sess = tf.Session() saver = tf.train.Saver() saver.restore(sess=self.sess, save_path=model_path)
def save_model(self, model_name): self._model_name = model_name model_path = os.path.join(get_parent_dir(), 'data', 'models', model_name) saver = tf.train.Saver() save_path = saver.save(self.sess, model_path) logger.info("Saved to path:{0}".format(save_path))
def load_model(self, model_name): model_path = os.path.join(get_parent_dir(), 'data', 'models', '{0}'.format(model_name)) try: self.model = joblib.load(model_path) except Exception as ex: logger.info('Fail to load model: {0} with error:{1}'.format( model_path, ex)) return self.model
def init(context): # model_path = os.path.join(get_parent_dir(), 'data', 'models', 'stock_selection_{0}'.format(model_name)) model_path = os.path.join(get_parent_dir(), 'data', 'models', 'ridge_20150103_20181231_0.9_000905.ZICN') feature_names = get_selected_features() context.features = load_cache_features(__config__['base']['start_date'], __config__['base']['end_date'], __config__['base']['benchmark']) context.model = joblib.load(model_path) context.feature_names = feature_names context.neu = False
def train_model(self, train_X, train_Y, acc, n_epochs=100, batch_size=50, model_name=None): ''' :param train_X: feature input,N*K vector; N is sec number, K is the total number of sub-type feature :param train_Y: trained label; N*1 vector; e.g. the return of stock :param acc: industry feature input, N*I; N is sec number :param n_epochs: trained epoch :param batch_size: trained batch_size :param model_name: :return: ''' init = tf.global_variables_initializer() # saver = tf.train.Saver() self._dataset = DataSet(train_X, train_Y, acc) with tf.Session() as self.sess: train_writer = tf.summary.FileWriter( "E:\pycharm\quant_geek\quant_models\data\models", self.sess.graph) init.run() for epoch in range(n_epochs): logger.info('Run the {0} epoch out of {1}, with '.format( epoch, n_epochs)) for iteration in range(self._dataset.num_examples // batch_size): x_batch, y_batch, acc = self._dataset.next_batch( batch_size) self.sess.run( [self.training_op], feed_dict={ self.feature_inputs: x_batch, self.train_Y: y_batch, self.indust_inputs: acc }) x_test, y_test, acc_test = self._dataset.next_batch(batch_size) test_loss, test_summary = self.sess.run( [self.loss, self.summary_op], feed_dict={ self.feature_inputs: x_test, self.train_Y: y_test, self.indust_inputs: acc_test }) self._test_loss.append(test_loss) logger.info('epoch: {0}, test_loss:{1}'.format( epoch, test_loss)) train_writer.add_summary(test_summary, epoch) if model_name: self._model_name = model_name model_path = os.path.join(get_parent_dir(), 'data', 'models', model_name) saver = tf.train.Saver() save_path = saver.save(self.sess, model_path) logger.info("Saved to path:{0}".format(save_path)) self._test_loss = np.array(self._test_loss) logger.info('mean for test loss:{0}, std:{1}, var:{2}'.format( self._test_loss.mean(), self._test_loss.std(), self._test_loss.var()))
def save_model(self, model_path): model_path = os.path.join(get_parent_dir(), 'data', 'models', model_path) joblib.dump(self.model, model_path, protocol=2)