def __init__(self, config, net_dir=None, agent=None, agent_type="nn"): trader.Trader.__init__(self, config["input"]["global_period"], config, 0, net_dir, initial_BTC=1, agent=agent, agent_type=agent_type) if agent_type == "nn": data_matrices = self._rolling_trainer.data_matrices elif agent_type == "traditional": config["input"]["feature_number"] = 1 config["input"]["features_list"] = ["close"] data_matrices = DataMatrices.create_from_config(config) else: raise ValueError() #if config["brokerage"]: # pass self.__test_set = data_matrices.get_test_set() self.__test_length = self.__test_set["X"].shape[0] self._total_steps = self.__test_length self.__test_pv = 1.0 self.__test_pc_vector = []
def __init__(self, config, device="cpu"): """ :param config: config dictionary :param device: the device used to train the network """ self.config = config self.train_config = config["training"] self.input_config = config["input"] self.best_metric = 0 # np.random.seed(config["random_seed"]) self.__window_size = self.input_config["window_size"] self.__coin_number = self.input_config["coin_number"] self.__batch_size = self.train_config["batch_size"] self._matrix = DataMatrices.create_from_config(config) self.stock_code = self._matrix.stock_code self.test_set = self._matrix.get_test_set() # if not config["training"]["fast_train"]: self.training_set = self._matrix.get_training_set() self.upperbound_validation = 1 self.upperbound_test = 1 # tf.set_random_seed(self.config["random_seed"]) self.device = device if device == "cpu": os.environ["CUDA_VISIBLE_DEVICES"] = "" with tf.device("/cpu:0"): self._agent = NNAgent(config, device) else: self._agent = NNAgent(config, device)
def __init__(self, config, net_dir=None, agent=None, agent_type="nn", initial_asset=10000): trader.Trader.__init__(self, 0, config, 0, net_dir, initial_asset=initial_asset, agent=agent, agent_type=agent_type) if agent_type == "nn": data_matrices = self._rolling_trainer.data_matrices elif agent_type == "traditional": config["input"]["feature_number"] = 1 data_matrices = DataMatrices.create_from_config(config) else: raise ValueError() self.__test_set = data_matrices.get_test_set() self.__test_length = self.__test_set["X"].shape[0] self._total_steps = self.__test_length self.__test_pv = 1.0 self.__test_pc_vector = []
def update_matrix(self): now = time.time() now_ymdhm = datetime.fromtimestamp(now).strftime("%Y/%m/%d %H:%M") self.config['input']['end_date'] = now_ymdhm self._matrix = DataMatrices.create_from_config(self.config, _new_data=True) self.test_set = self._matrix.get_test_set() self.last_info = self._matrix.get_last_info() print(self.last_info)
def __init__(self, config, main_config, fake_data=False, restore_dir=None, save_path=None, device="cpu", agent=None, index=None): """ :param config: config dictionary :param fake_data: if True will use data generated randomly :param restore_dir: path to the model trained before :param save_path: path to save the model :param device: the device used to train the network :param agent: the nnagent object. If this is provides, the trainer will not create a new agent by itself. Therefore the restore_dir will not affect anything. """ self._matrix = DataMatrices.create_from_config(config) self._coin_number = self._matrix.coin_number() config['input']['coin_number'] = self._coin_number self.config = config self.train_config = config["training"] self.input_config = config["input"] self.save_path = save_path self.best_metric = 0 np.random.seed(config["random_seed"]) self.__window_size = self.input_config["window_size"] self.__batch_size = self.train_config["batch_size"] self.__snap_shot = self.train_config["snap_shot"] config["input"]["fake_data"] = fake_data self._asset_list = self._matrix.coin_list self._time_index = self._matrix.global_weights.index self._test_portion = self.input_config["test_portion"] self.test_set = self._matrix.get_test_set() if not config["training"]["fast_train"]: self.training_set = self._matrix.get_training_set() self.upperbound_validation = 1 self.upperbound_test = 1 tf.set_random_seed(self.config["random_seed"]) self.device = device if agent: self._agent = agent else: if device == "cpu": os.environ["CUDA_VISIBLE_DEVICES"] = "" with tf.device("/cpu:0"): self._agent = NNAgent(config, restore_dir, device) else: self._agent = NNAgent(config, restore_dir, device)
def __init__(self, config, fake_data=False, restore_dir=None, save_path=None, device="cpu", agent=None): """ :param config: config dictionary :param fake_data: if True will use data generated randomly :param restore_dir: path to the model trained before :param save_path: path to save the model :param device: the device used to train the network :param agent: the nnagent object. If this is provides, the trainer will not create a new agent by itself. Therefore the restore_dir will not affect anything. """ self.config = config self.train_config = config["training"] self.input_config = config["input"] self.save_path = save_path self.best_metric = 0 np.random.seed(config["random_seed"]) self.__window_size = self.input_config["window_size"] self.__coin_number = self.input_config["coin_number"] self.__batch_size = self.train_config["batch_size"] self.__snap_shot = self.train_config["snap_shot"] config["input"]["fake_data"] = fake_data #creates Datamatrices from net_config.json self._matrix = DataMatrices.create_from_config(config) #:test set :-windowsize+1 self.test_set = self._matrix.get_test_set() #fast train: true without pre-training if not config["training"]["fast_train"]: self.training_set = self._matrix.get_training_set() self.training_set = self._matrix.get_training_set() self.upperbound_validation = 1 self.upperbound_test = 1 tf.set_random_seed(self.config["random_seed"]) self.device = device #needed for backtetst if agent: self._agent = agent else: #first run else is used to initialize NNAgent if device == "cpu": os.environ["CUDA_VISIBLE_DEVICES"] = "" with tf.device("/cpu:0"): self._agent = NNAgent(config, restore_dir, device) else: self._agent = NNAgent(config, restore_dir, device)
def get_test_data(config): """ :return : a 2d numpy array with shape(coin_number, periods), each element the relative price """ config["input"]["feature_number"] = 1 config["input"]["norm_method"] = "relative" config["input"]["global_period"] = config["input"]["global_period"] price_matrix = DataMatrices.create_from_config(config) test_set = price_matrix.get_test_set()["y"][:, 0, :].T test_set = np.concatenate((np.ones((1, test_set.shape[1])), test_set), axis=0) return test_set
def __init__(self, config, net_dir=None, agent=None, agent_type="nn"): trader.Trader.__init__(self, 0, config, 0, net_dir, initial_BTC=1, agent=agent, agent_type=agent_type) if agent_type == "nn": data_matrices = self._rolling_trainer.data_matrices elif agent_type == "traditional": config["input"]["feature_number"] = 1 data_matrices = DataMatrices.create_from_config(config) else: raise ValueError() self.__test_set = data_matrices.get_test_set() self.__test_length = self.__test_set["X"].shape[0] self._total_steps = self.__test_length self.__test_pv = 1.0 self.__test_pc_vector = []
def __init__(self, config, fake_data=False, restore_dir=None, save_path=None, device="cpu", agent=None): """ :param config: config dictionary :param fake_data: if True will use data generated randomly :param restore_dir: path to the model trained before :param save_path: path to save the model :param device: the device used to train the network :param agent: the nnagent object. If this is provides, the trainer will not create a new agent by itself. Therefore the restore_dir will not affect anything. """ self.config = config self.train_config = config["training"] self.input_config = config["input"] self.save_path = save_path self.best_metric = 0 np.random.seed(config["random_seed"]) self.__window_size = self.input_config["window_size"] self.__coin_number = self.input_config["coin_number"] self.__batch_size = self.train_config["batch_size"] self.__snap_shot = self.train_config["snap_shot"] config["input"]["fake_data"] = fake_data self._matrix = DataMatrices.create_from_config(config) self.test_set = self._matrix.get_test_set() if not config["training"]["fast_train"]: self.training_set = self._matrix.get_training_set() self.upperbound_validation = 1 self.upperbound_test = 1 tf.set_random_seed(self.config["random_seed"]) self.device = device if agent: self._agent = agent else: if device == "cpu": os.environ["CUDA_VISIBLE_DEVICES"] = "" with tf.device("/cpu:0"): self._agent = NNAgent(config, restore_dir, device) else: self._agent = NNAgent(config, restore_dir, device)
def __init__(self, config, stockList, featureList, start_date, end_date, fake_data, net_dir=None, result_path=None, agent=None, agent_type="nn"): trader.Trader.__init__(self, 0, config, stockList, featureList, start_date, end_date, fake_data, 0, net_dir, result_path, initial_cash=1000000, agent=agent, agent_type=agent_type) if agent_type == "nn": data_matrices = self._rolling_trainer.data_matrices elif agent_type == "traditional": config["input"]["feature_number"] = 1 data_matrices = DataMatrices.create_from_config(config) else: raise ValueError() self.__test_set = data_matrices.get_test_set() self.__test_length = self.__test_set["X"].shape[0] #测试集的长度 self.__test_date = data_matrices.get_test_date() self._total_steps = self.__test_length self.__test_pv = 1.0 self.__test_pc_vector = [] self._window_size = config["input"]["window_size"]
def __init__(self, config, net_dir=None, agent=None, agent_type="nn"): trader.Trader.__init__(self, 300, config, 0, net_dir, initial_BTC=1, agent=agent, agent_type=agent_type) self.config = config #My Addition can cause error TODO if agent_type == "nn": data_matrices = self._rolling_trainer.data_matrices elif agent_type == "traditional": config["input"]["feature_number"] = 1 data_matrices = DataMatrices.create_from_config(config) else: raise ValueError() self.__test_set = data_matrices.get_test_set() self.__test_length = self.__test_set["X"].shape[0] self._total_steps = self.__test_length self.__test_pv = 1.0 self.__test_pc_vector = [] self.__period = config['input']['global_period']
def __init__(self, config, net_dir=None, agent=None, agent_type="nn"): trader.Trader.__init__(self, 0, config, 0, net_dir, initial_BTC=1, agent=agent, agent_type=agent_type) if agent_type == "nn": data_matrices = self._rolling_trainer.data_matrices # construct the data and return matrix elif agent_type == "traditional": config["input"]["feature_number"] = 1 # traditional algorithms only need close prices data_matrices = DataMatrices.create_from_config(config) else: raise ValueError() self.__test_set = data_matrices.get_test_set( ) # backtest only needs test set self.__test_length = self.__test_set["X"].shape[0] self._total_steps = self.__test_length self.__test_pv = 1.0 # total capital self.__test_pc_vector = [] #portfolio change ratio self.__turn_over = []
def __init__(self, config, stockList, featureList, start_date, end_date, fake_data=False, restore_dir=None, save_path=None, device="cpu", agent=None): """ :param config: config dictionary :param fake_data: if True will use data generated randomly :param restore_dir: path to the model trained before :param save_path: path to save the model :param device: the device used to train the network :param agent: the nnagent object. If this is provides, the trainer will not create a new agent by itself. Therefore the restore_dir will not affect anything. """ self.config = config self.train_config = config["training"] self.input_config = config["input"] self.save_path = save_path self.best_metric = 0 np.random.seed(config["random_seed"]) self.__window_size = self.input_config["window_size"] self.__batch_size = self.train_config["batch_size"] self.__snap_shot = self.train_config["snap_shot"] config["input"]["fake_data"] = fake_data self.stockList = stockList self.featureList = featureList self.start_date = start_date self.end_date = end_date self.fake_data = fake_data self._matrix = DataMatrices.create_from_config(config, stockList, featureList, start_date, end_date) #数据 self.test_set = self._matrix.get_test_set( ) #测试集 dict:{'X', 'y', 'last_w', 'setw'} # X: (260, 4, 3, 31), y: (260, 4, 3), last_w: (260, 3) # X: (test_length, feature_num, stock_num, time_windows) if not config["training"]["fast_train"]: self.training_set = self._matrix.get_training_set() #训练集 self.upperbound_validation = 1 self.upperbound_test = 1 tf.set_random_seed(self.config["random_seed"]) self.device = device if agent: self._agent = agent else: if device == "cpu": os.environ["CUDA_VISIBLE_DEVICES"] = "" with tf.device("/cpu:0"): self._agent = NNAgent(config, stockList, featureList, restore_dir, device) else: self._agent = NNAgent(config, stockList, featureList, restore_dir, device)
def __init__(self, config): self.__counter = 0 self.__start = parse_time(config["input"]["start_date"]) self.__end = parse_time(config["input"]["end_date"]) self.__number_of_coins = config["input"]["coin_number"] self.__batch_size = config["training"]["batch_size"] self.__window_size = config["input"]["window_size"]+1 span = self.__end - self.__start self.__end = self.__end - config["input"]["test_portion"] * span config2 = copy.deepcopy(config) config2["input"]["global_period"] = 300 self._matrix2 = DataMatrices.create_from_config(config2) self.__weightgarch = pd.DataFrame(index=range(0, self.__batch_size), columns=range(0, self.__number_of_coins)) self.__weightgarch = self.__weightgarch.fillna(1.0 / self.__number_of_coins) training_set = self._matrix2.get_training_set() set = training_set["X"] #good times sets for 3, 26: 1 and 2 are not functioning set5 = set[-5000:, 0, :, 0] set3 = set[-3000:, 0, :, 0] set2 = set[-2000:, 0, :, 0] #self.__abcdefg = set[:, 0] self.__lastvalue1 = set5[0, 0] self.__lastvalue2 = set5[0, 1] self.__lastvalue3 = set2[0, 2] self.__lastvalue4 = set5[0, 3] self.__lastvalue5 = set3[0, 4] self.__lastvalue6 = set3[0, 5] self.__lastvalue7 = set3[0, 6] self.__lastvalue8 = set5[0, 7] self.__lastvalue9 = set5[0, 8] self.__lastvalue10 = set5[0, 9] self.__lastvalue11 = set3[0, 10] #good times set5, bad times set3 #logreturns1 = np.log(set3[1:, 0] / set3[:-1, 0]) logreturns1 = np.log(set5[1:, 0] / set5[:-1, 0]) self.__lastsigma1 = np.sqrt(np.mean(logreturns1 ** 2)) self.__lastlogreturn1 = logreturns1[-1] #good times set5, bad times set3 #logreturns2 = np.log(set3[1:, 1] / set3[:-1, 1]) logreturns2 = np.log(set5[1:, 1] / set5[:-1, 1]) self.__lastsigma2 = np.sqrt(np.mean(logreturns2 ** 2)) self.__lastlogreturn2 = logreturns2[-1] logreturns3 = np.log(set2[1:, 2] / set2[:-1, 2]) self.__lastsigma3 = np.sqrt(np.mean(logreturns3 ** 2)) self.__lastlogreturn3 = logreturns3[-1] logreturns4 = np.log(set5[1:, 3] / set5[:-1, 3]) self.__lastsigma4 = np.sqrt(np.mean(logreturns4 ** 2)) self.__lastlogreturn4 = logreturns4[-1] logreturns5 = np.log(set3[1:, 4] / set3[:-1, 4]) self.__lastsigma5 = np.sqrt(np.mean(logreturns5 ** 2)) self.__lastlogreturn5 = logreturns5[-1] logreturns6 = np.log(set3[1:, 5] / set3[:-1, 5]) self.__lastsigma6 = np.sqrt(np.mean(logreturns6 ** 2)) self.__lastlogreturn6 = logreturns6[-1] logreturns7 = np.log(set3[1:, 6] / set3[:-1, 6]) self.__lastsigma7 = np.sqrt(np.mean(logreturns7 ** 2)) self.__lastlogreturn7 = logreturns7[-1] logreturns8 = np.log(set5[1:, 7] / set5[:-1, 7]) self.__lastsigma8 = np.sqrt(np.mean(logreturns8 ** 2)) self.__lastlogreturn8 = logreturns8[-1] logreturns9 = np.log(set5[1:, 8] / set5[:-1, 8]) self.__lastsigma9 = np.sqrt(np.mean(logreturns9 ** 2)) self.__lastlogreturn9 = logreturns9[-1] logreturns10 = np.log(set5[1:, 9] / set5[:-1, 9]) self.__lastsigma10 = np.sqrt(np.mean(logreturns10 ** 2)) self.__lastlogreturn10 = logreturns10[-1] logreturns11 = np.log(set3[1:, 10] / set3[:-1, 10]) self.__lastsigma11 = np.sqrt(np.mean(logreturns11 ** 2)) self.__lastlogreturn11 = logreturns11[-1] self.__firstsigma1 = self.__lastsigma1 self.__firstsigma2 = self.__lastsigma2 self.__firstsigma3 = self.__lastsigma3 self.__firstsigma4 = self.__lastsigma4 self.__firstsigma5 = self.__lastsigma5 self.__firstsigma6 = self.__lastsigma6 self.__firstsigma7 = self.__lastsigma7 self.__firstsigma8 = self.__lastsigma8 self.__firstsigma9 = self.__lastsigma9 self.__firstsigma10 = self.__lastsigma10 self.__firstsigma11 = self.__lastsigma11 self.__firstlogreturn1 = self.__lastlogreturn1 self.__firstlogreturn2 = self.__lastlogreturn2 self.__firstlogreturn3 = self.__lastlogreturn3 self.__firstlogreturn4 = self.__lastlogreturn4 self.__firstlogreturn5 = self.__lastlogreturn5 self.__firstlogreturn6 = self.__lastlogreturn6 self.__firstlogreturn7 = self.__lastlogreturn7 self.__firstlogreturn8 = self.__lastlogreturn8 self.__firstlogreturn9 = self.__lastlogreturn9 self.__firstlogreturn10 = self.__lastlogreturn10 self.__firstlogreturn11 = self.__lastlogreturn11 self.__firstlastvalue1 = self.__lastvalue1 self.__firstlastvalue2 = self.__lastvalue2 self.__firstlastvalue3 = self.__lastvalue3 self.__firstlastvalue4 = self.__lastvalue4 self.__firstlastvalue5 = self.__lastvalue5 self.__firstlastvalue6 = self.__lastvalue6 self.__firstlastvalue7 = self.__lastvalue7 self.__firstlastvalue8 = self.__lastvalue8 self.__firstlastvalue9 = self.__lastvalue9 self.__firstlastvalue10 = self.__lastvalue10 self.__firstlastvalue11 = self.__lastvalue11 self.__theta1 = self.negative_log_likelihood(logreturns1, (1, 0.5, 0.5)) self.__theta1 = self.fitting(logreturns1) print(self.__theta1) self.__theta2 = self.fitting(logreturns2) print(self.__theta2) self.__theta3 = self.fitting(logreturns3) print(self.__theta3) self.__theta4 = self.fitting(logreturns4) print(self.__theta4) self.__theta5 = self.fitting(logreturns5) print(self.__theta5) self.__theta6 = self.fitting(logreturns6) print(self.__theta6) self.__theta7 = self.fitting(logreturns7) print(self.__theta7) self.__theta8 = self.fitting(logreturns8) print(self.__theta8) self.__theta9 = self.fitting(logreturns9) print(self.__theta9) self.__theta10 = self.fitting(logreturns10) print(self.__theta10) self.__theta11 = self.fitting(logreturns11) print(self.__theta11)
else: drawdown_list.append(1.0 - pv_array[i] / max_benefit) return max(drawdown_list) def port_value(pv_array): p = np.array([np.prod(pv_array[:i + 1]) for i in range(pv_array.shape[0])]) return p #%% load config & data config_2 = load_config(2) # config_2['input']['start_date'] = '2015/05/01' # config_2['input']['end_date'] = '2017/04/27' # 自訂區間 data = DataMatrices.create_from_config(config_2) #%% network structure feature_number = config_2["input"]["feature_number"] rows = config_2["input"]["coin_number"] columns = config_2["input"]["window_size"] input_tensor = tf.placeholder(tf.float32, shape=[None, feature_number, rows, columns]) #[None, 3, 11, 31] previous_w = tf.placeholder(tf.float32, shape=[None, rows]) #[None, 11] input_num = tf.placeholder(tf.int32, shape=[]) #[11] y = tf.placeholder(tf.float32, shape=[None, feature_number, rows]) def allint(l):