def _collect_training_data(self): '''Realize training data collection through self-play.''' for i in range(self.n_games): # generate self-play training data self.board = Board(self.row, self.column) self.board.set_state() AI = Alpha(model_file=self.init_model, use_gpu=self.use_gpu) board_states, mcts_probs, current_players = [], [], [] while (True): move, move_probs = AI.self_play(self.row, self.column, self.board.board_state) board_states.append(self.board.current_state()) mcts_probs.append(move_probs) self.board.move(move) current_players.append(self.board.get_cur_player()) end, winner = self.board.who_win() if end: winners = np.zeros(len(current_players)) if winner != 0: winners[np.array(current_players) == winner] = 1.0 winners[np.array(current_players) != winner] = -1.0 print(winners) play_data = zip(board_states, mcts_probs, winners) break play_data = list(play_data)[:] self.episode_len = len(play_data) # print(play_data) # add data to buffer self.buffer.extend(play_data) print(len(self.buffer))
def test_model_finalization(self): # Initialize Alpha alpha = Alpha(2, {0: 2, 1: 2}, {0: LEN_SIMPLE_OPS, 1: 1}) # Set alpha_e for edge of op on ground level alpha.parameters[0][0][(0, 1)] = nn.Parameter(tensor([10., 0., 0., 0.])) alpha.parameters[1][0][(0, 1)] = nn.Parameter(tensor([10., 0.])) #Create simple model model = Model(alpha=alpha, primitives=SIMPLE_OPS, channels_in=1, channels_start=2, stem_multiplier=1, num_classes=5, test_mode=True) # Input x = tensor([[ # feature 1 [[1.]] ]]) # Expected output y = tensor([[ # feature 1 [[2.]] ]]) learnt_model = LegacyLearntModel(model) assert (learnt_model(x).equal(y))
def __init__(self): super(QtradeEnv, self).__init__() self.root_dir = '/Users/liuyehong/Dropbox/CICC/Algorithm_Trading/Platform2/OHLC/data/1Min/' self.list_dir = [d for d in os.listdir(self.root_dir) if '.csv' in d] self.df_dir = np.random.choice(self.list_dir) self.df = pd.read_csv(self.root_dir + self.df_dir) self.alpha = Alpha(self.df) self.cost = 0 #-0.00005 self.interest_rate = 0 / 240 / 240 # internal interest rate (necessary to avoid stuck of long-term training.) self.window = 50 self.cash = 1 self.stock = 0 self.t = self.window + 1 self.i = 0 self.T = len(self.df) self.total_steps = int(self.T / 5.) self.list_asset = np.ones(self.T) self.list_holding = np.ones(self.T) # alpha self.close = self.alpha.close self.high = self.alpha.high self.low = self.alpha.low self.open = self.alpha.open self.vol = self.alpha.vol self.close_diff = self.alpha.close_diff() self.high_diff = self.alpha.high_diff() self.low_diff = self.alpha.low_diff() self.open_diff = self.alpha.open_diff() self.ma = self.alpha.moving_average(window=self.window) self.ema = self.alpha.EMA(window=self.window) self.dema = self.alpha.DEMA(window=self.window) self.kama = self.alpha.KAMA(window=self.window) self.sma = self.alpha.SMA(window=self.window) self.tema = self.alpha.TEMA(window=self.window) self.trima = self.alpha.TRIMA(window=self.window) self.linearreg_slope = self.alpha.LINEARREG_SLOPE(window=self.window) self.mstd = self.alpha.moving_std(window=self.window) self.bollinger_lower_bound = self.alpha.bollinger_lower_bound( window=self.window, width=1) self.bollinger_upper_bound = self.alpha.bollinger_upper_bound( window=self.window, width=1) self.moving_max = self.alpha.moving_max(window=self.window) self.moving_min = self.alpha.moving_min(window=self.window) self.moving_med = self.alpha.moving_med(window=self.window) # Actions of the format Buy x%, Sell x%, Hold, etc. # Action space range must be symetric and the order matters. self.action_space = spaces.Box(low=np.array([-np.inf, -np.inf]), high=np.array([np.inf, np.inf]), dtype=np.float16) # Prices contains the OHCL values for the last five prices self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=(1, self.window, 4), dtype=np.float16)
def __init__(self): super(Window,self).__init__() self.set_size(600,600) self.alpha = Alpha(self.get_size()[0], self.get_size()[1], 10) pyglet.clock.schedule_interval(self.update,1.0/30.0)
def f1(self): if self.x.i1 <= self.y.i1 <= self.z.i1: i = self.y.i1 - self.x.i1 + self.z.i1 elif self.y.i1 > self.z.i1 > self.x.i1: i = Alpha.len() - self.z.i1 else: i = self.y.i1 + (self.x.i1 - self.z.i1) print("Meow: ", Alpha.get(i)) print("This cat ♥ pretending to be smart.")
def test_2level(self): ''' Testing HierarchicalOperation create_dag with 2 levels. ''' x = tensor([[ # feature 1 [[1, 1], [1, 1]] ]]) # Initialize Alpha alpha = Alpha(2, {0: 3, 1: 3}, {0: LEN_SIMPLE_OPS, 1: 1}) # Create hierarchical operation hierarchical_op = HierarchicalOperation.create_dag( level=1, alpha=alpha, alpha_dag=alpha.parameters[1][0], primitives=SIMPLE_OPS, channels_in=1) y = tensor([[[[0.7500, 0.7500], [0.7500, 0.7500]], [[1.1250, 1.1250], [1.1250, 1.1250]], [[0.5625, 0.5625], [0.5625, 0.5625]], [[0.84375, 0.84375], [0.84375, 0.84375]], [[0.84375, 0.84375], [0.84375, 0.84375]], [[1.265625, 1.265625], [1.265625, 1.265625]]]]) assert (y.equal(hierarchical_op(x)))
def test_1level(self): ''' Testing MNAS with just 1 level. Equivalent to darts in this case. Only Mixed Operations of primitives on nodes. Only tests base case of create_dag. ''' x = tensor([[ # feature 1 [[1, 1], [1, 1]] ]]) # Initialize Alpha alpha = Alpha(1, {0: 3}, {0: LEN_SIMPLE_OPS}) hierarchical_op = HierarchicalOperation.create_dag( level=0, alpha=alpha, alpha_dag=alpha.parameters[0][0], primitives=SIMPLE_OPS, channels_in=1) y = tensor([[ # feature 1 [[1.5, 1.5], [1.5, 1.5]], # feature 2 [[2.25, 2.25], [2.25, 2.25]] ]]) assert (y.equal(hierarchical_op(x)))
def _AI_player(self): '''the interface for AI Parameters required and updated: board status, which side to play Return: the next gomoku piece coordinate (x, y) Gomoku Board status: 0 means no pieces, 1 means black pieces and -1 means white pieces ''' self.human = False if self.is_start == False: return # AI_program AI = MCTS() AI = Alpha(model_file=self.model_file, use_gpu=False) [x, y] = AI.play(self.row, self.column, self.board) self._draw_piece(x, y, self.is_black) self.board[x][y] = self._ternary_op(1, -1, self.is_black) self.last_x, self.last_y = x, y self._gomoku_who_win() self.is_black = not self.is_black self.l_info.config( text=self._ternary_op('黑方行棋', '白方行棋', self.is_black)) self.human = True
def __init__(self, num_levels: int, num_nodes_at_level: Dict[int, int], num_ops_at_level: Dict[int, int], primitives: dict, channels_in: int, beta: float, image_height: int, image_width: int, writer=None, test_mode=False): ''' - Initializes member variables - Registers alpha parameters by creating a dummy alpha using the constructor and using get_alpha_level to get the alpha for a given level. This tensor is wrapped with nn.Parameter to indicate that is a Parameter for this controller (thus requires gradient computation with respect to itself). This nn.Parameter is added to the nn.ParameterList that is self.alphas. - Registers weights parameters by creating a model from aforementioned dummy alpha ''' # Superclass constructor super().__init__() # Initialize member variables self.num_levels = num_levels self.num_nodes_at_level = num_nodes_at_level self.num_ops_at_level = num_ops_at_level self.primitives = primitives self.channels_in = channels_in self.beta = beta self.writer = writer # Initialize Alpha self.alpha = Alpha(num_levels=self.num_levels, num_nodes_at_level=self.num_nodes_at_level, num_ops_at_level=self.num_ops_at_level) # Initialize model with initial alpha, self.model = BetaVAE(alpha=self.alpha, beta=beta, primitives=self.primitives, channels_in=self.channels_in, image_height=image_height, image_width=image_width, writer=writer, test_mode=test_mode) if not test_mode and torch.cuda.is_available(): self.model = self.model.cuda()
class Window(pyglet.window.Window): def __init__(self): super(Window,self).__init__() self.set_size(600,600) self.alpha = Alpha(self.get_size()[0], self.get_size()[1], 10) pyglet.clock.schedule_interval(self.update,1.0/30.0) def on_draw(self): self.clear() self.alpha.draw() def update(self,dt): self.alpha.run_rules()
def f3(self): r1i = abs(self.x.i2 - self.x.i1) r1ii = abs(self.x.i3 - self.x.i2) r2i = abs(self.y.i2 - self.y.i1) r2ii = abs(self.y.i3 - self.y.i2) # defining distances between pair-comparanda d1 = abs(self.y.i1 - self.x.i3) d0 = abs(self.z.i1 - self.y.i3) # defining the index-location ingredients for i comparandum and corresponding char u, s.t. alpha[u]=i iu1 = self.z.i3 + d1 iu2 = iu1 + r1i iu3 = iu2 + r1ii if r1i == r2i and r1ii == r2ii: print("Meow = ", Alpha.get(iu1) + Alpha.get(iu2) + Alpha.get(iu3)) else: print( "I'm confused but I know I'm in the primary else clause. So there." )
def __init__(self): super(QtradeEnv, self).__init__() self.dir = './data/BTCUSDT.csv' self.df = pd.read_csv(self.dir) self.alpha = Alpha(self.df) self.cost = 0.00 self.interest_rate = 0.0/240/240 # internal interest rate self.window = 50 self.cash = 1 self.stock = 0 self.t = self.window + 1 self.T = len(self.df) self.steps = 0 self.list_asset = np.ones(self.T) self.list_holding = np.ones(self.T) self.list_profit = np.zeros(self.T) # alpha self.close = self.alpha.close self.high = self.alpha.high self.low = self.alpha.low self.open = self.alpha.open self.vol = self.alpha.vol self.close_diff = self.alpha.close_diff() self.high_diff = self.alpha.high_diff() self.low_diff = self.alpha.low_diff() self.open_diff = self.alpha.open_diff() self.ma = self.alpha.moving_average(window=self.window) self.ema = self.alpha.EMA(window=self.window) self.mstd = self.alpha.moving_std(window=self.window) self.bollinger_lower_bound = self.alpha.bollinger_lower_bound(window=self.window, width=1) self.bollinger_upper_bound = self.alpha.bollinger_upper_bound(window=self.window, width=1) # Actions of the format Buy x%, Sell x%, Hold, etc. # Action space range must be symetric and the order matters. self.action_space = spaces.Box( low=np.array([-np.inf, -np.inf]), high=np.array([np.inf, np.inf]), dtype=np.float16) # Prices contains the OHCL values for the last five prices self.observation_space = spaces.Box( low=-np.inf, high=np.inf, shape=(1, self.window, 9), dtype=np.float16)
def run_alpha(self): log = [] log_row = [] for rows in self.text.get(1.0, END): if rows == '\n': if log_row != []: log.append(log_row) log_row = [] if rows.isalpha(): log_row.append(rows) #opened_file = deepcopy(self.opened_file) if self.radio_v.get() == 2: self.alpha = Alpha(log, splines='node') else: self.alpha = Alpha(log) print(self.alpha) print("direct succesor:", self.alpha.ds) print("causality:",self.alpha.cs) print("inversion causality:",self.alpha.inv_cs) print("parralell:",self.alpha.pr) print("no relation:",self.alpha.ind) self.graph_name = self.opened_filename.split('/')[-1][:-4] self.alpha.create_graph(self.graph_name, view=False) self.show_graph()
def test_2level_model(self): x = tensor([[ # feature 1 [[1., 1.], [1., 1.]] ]]) # Initialize Alpha alpha = Alpha(2, {0: 3, 1: 3}, {0: LEN_SIMPLE_OPS, 1: 1}) model = Model(alpha=alpha, primitives=SIMPLE_OPS, channels_in=1, channels_start=2, stem_multiplier=1, num_classes=5) raise NotImplementedError
def __init__(self, str1, str2): str1, str2 = list(str1), list(str2) fixGenomes(str1, str2) #fixGenomes remove genes imapeaveis print 'Genomas apos remocao de genes imapeaveis: \n%r\n%r' % (str1, str2) self.genoma1 = str1 self.genoma2 = str2 fmly1 = getFmly(self.genoma1) fmly2 = getFmly(self.genoma2) # listaDeFamilias = [alpha(posG1, posG2, geneId) para 'gene' em 'Genoma'] self.listaDeFamilias = [Alpha(fmly1[i], fmly2[i], i) for i in fmly1]
def test_initialization(self): num_levels = 3 num_nodes_at_level = {0: 3, 1: 3, 2: 3} num_ops_at_level = {0: 5, 1: 3, 2: 3} testAlpha = Alpha(num_levels, num_nodes_at_level, num_ops_at_level) # Check parameters for i in range(0, num_levels): alpha_i = testAlpha.parameters[i] for op_num in range(0, num_ops_at_level[i + 1]): for node_a in range(0, num_nodes_at_level[i]): for node_b in range(node_a + 1, num_nodes_at_level[i]): if i == 0: num_parameters = num_ops_at_level[i] + 2 else: num_parameters = num_ops_at_level[i] + 1 assert (alpha_i[op_num][(node_a, node_b)].equal( zeros(num_parameters)))
from alpha import Alpha import torch import util alpha_norm = Alpha(1, {0: 7}, {0: 8}) alpha_reduce = Alpha(1, {0: 7}, {0: 8}) for edge in alpha_norm.parameters[0][0]: alpha_norm.parameters[0][0][edge].requires_grad = False for edge in alpha_reduce.parameters[0][0]: alpha_reduce.parameters[0][0][edge].requires_grad = False # Set to DARTS Alpha Normal alpha_norm.parameters[0][0][(0, 2)][2] = 1 alpha_norm.parameters[0][0][(0, 3)][2] = 1 alpha_norm.parameters[0][0][(0, 4)][2] = 1 alpha_norm.parameters[0][0][(1, 2)][2] = 1 alpha_norm.parameters[0][0][(1, 3)][2] = 1 alpha_norm.parameters[0][0][(1, 4)][8] = 1 alpha_norm.parameters[0][0][(1, 5)][8] = 1 alpha_norm.parameters[0][0][(2, 5)][5] = 1 # Set to DARTS Alpha Reduce alpha_reduce.parameters[0][0][(0, 2)][1] = 1 alpha_reduce.parameters[0][0][(0, 4)][1] = 1 alpha_reduce.parameters[0][0][(1, 2)][1] = 1 alpha_reduce.parameters[0][0][(1, 3)][1] = 1 alpha_reduce.parameters[0][0][(1, 5)][1] = 1 alpha_reduce.parameters[0][0][(2, 3)][8] = 1 alpha_reduce.parameters[0][0][(2, 4)][8] = 1 alpha_reduce.parameters[0][0][(2, 5)][8] = 1
def f2(self): r1 = abs(self.x.i2 - self.x.i1) r2 = abs(self.y.i2 - self.y.i1) r3 = abs(self.z.i2 - self.z.i1) d1 = abs(self.y.i2 - self.x.i1) d0 = abs(self.y.i1 - self.x.i2) d1_vert1 = abs(self.y.i1 - self.x.i1) d1_vert2 = abs(self.x.i2 - self.y.i2) iu1 = self.z.i2 + d0 iu2 = iu1 + r2 r4 = abs(iu2 - iu1) d2 = abs(iu1 - self.z.i2) if self.z.i2 >= self.z.i1 >= self.y.i2 >= self.y.i1 >= self.x.i2 >= self.x.i1: if (r1 + d1 + r2 + d0 + r3 + d2 + r4) <= Alpha.len(): if d1_vert1 == d1_vert2: print("Meow = ", Alpha.get(iu1) + Alpha.get(iu2)) else: iu2 += r1 print("Meow = ", Alpha.get(iu1) + Alpha.get(iu2)) else: print( "Meow = ", Alpha.get(iu1 - Alpha.len()) + Alpha.get( (iu2 - Alpha.len()))) print("This cat ♥ pretending to be smart.") elif self.x.i2 > self.y.i2 > self.z.i2 and self.x.i1 < self.y.i1 < self.z.i1: r1 = abs(self.y.i1 - self.x.i1) iu1 = self.z.i1 + r1 iu2 = self.z.i2 - r1 if d1_vert1 == d1_vert2: print("Meow = ", Alpha.get(iu1) + Alpha.get(iu2)) else: iu2 -= d12_vert print("Meow = ", Alpha.get(iu1) + Alpha.get(iu2)) print("This cat ♥ pretending to be smart.") else: # applying to symmetric relations where the answer lies in the interval determined by, say, the first comparandum print( "I think this requires stochastic reasoning. Are you sure you know what a correct answer would look like?" )
class StartMenu: def __init__(self): self.root = Tk() self.root.geometry("700x500") #Width x Height self.root.title("Alpha miner") self.menubar = Menu(self.root, title="menu") self.filemenu = Menu(self.menubar, tearoff=0) self.editmenu = Menu(self.menubar, tearoff=0) self.runmenu = Menu(self.menubar, tearoff=0) self.helpmenu = Menu(self.menubar, tearoff=0) #self.log = [] self.opened_filename = '' self.opened_file = None self.alpha = None self.alpha_plus = None self.graph_name = '' def donothing(self): filewin = Toplevel(self.root) button = Button(filewin, text="Do nothing button") button.pack() def open_file(self): self.root.filename = filedialog.askopenfilename(initialdir = "/",title = "Select file",filetypes = (("txt files","*.txt"),("all files","*.*"))) #print(self.root.filename) self.opened_filename = self.root.filename self.opened_file = read(self.opened_filename) print(self.opened_file) self.show_log() def run_alpha(self): log = [] log_row = [] for rows in self.text.get(1.0, END): if rows == '\n': if log_row != []: log.append(log_row) log_row = [] if rows.isalpha(): log_row.append(rows) #opened_file = deepcopy(self.opened_file) if self.radio_v.get() == 2: self.alpha = Alpha(log, splines='node') else: self.alpha = Alpha(log) print(self.alpha) print("direct succesor:", self.alpha.ds) print("causality:",self.alpha.cs) print("inversion causality:",self.alpha.inv_cs) print("parralell:",self.alpha.pr) print("no relation:",self.alpha.ind) self.graph_name = self.opened_filename.split('/')[-1][:-4] self.alpha.create_graph(self.graph_name, view=False) self.show_graph() def run_alpha_plus(self): log = [] log_row = [] for rows in self.text.get(1.0, END): if rows == '\n': if log_row != []: log.append(log_row) log_row = [] if rows.isalpha(): log_row.append(rows) #print("tu:", log) #opened_file = deepcopy(self.opened_file) if self.radio_v.get() == 2: self.alpha_plus = AlphaPlus(log, splines='node') else: self.alpha_plus = AlphaPlus(log) print(self.alpha_plus) print("causality: ", self.alpha_plus.cs) print("inversion causality:",self.alpha_plus.inv_cs) print("parallel: ", self.alpha_plus.pr) print("direct succession: ", self.alpha_plus.ds) print("loop1: ", self.alpha_plus.get_loop1()) print("loop2: ", self.alpha_plus.get_loop2()) print("t_prim: ", self.alpha_plus.t_prim) print("l1l: ", self.alpha_plus.l1l) print("fl1l: ", self.alpha_plus.fl1l) print("log_minus_l1l: ", self.alpha_plus.log_minus_l1l()) print("no relation: ", self.alpha_plus.ind) print("log: ", self.alpha_plus.log) self.graph_name = self.opened_filename.split('/')[-1][:-4] self.alpha_plus.run_alpha() self.alpha_plus.run_alpha_plus(self.graph_name) self.show_graph() #print("tuuuuuutaj: ", self.text.get(1.0, END)) def create_menubar(self): self.menubar.add_cascade(label="File", menu=self.filemenu) self.menubar.add_cascade(label="Run", menu=self.runmenu) self.menubar.add_cascade(label="Help", menu=self.helpmenu) def create_file_menu(self): self.filemenu.add_command(label="Open", command=self.open_file) self.filemenu.add_command(label="Save as pdf", command=self.save_graph_as_pdf) #self.filemenu.add_command(label="Close", command=self.donothing) self.filemenu.add_separator() self.filemenu.add_command(label="Exit", command=self.root.quit) def create_run_menu(self): self.runmenu.add_command(label="Run alpha miner", command=self.run_alpha) self.runmenu.add_command(label="Run alpha+ miner", command=self.run_alpha_plus) def create_help_menu(self): #self.helpmenu.add_command(label="Help Index", command=self.donothing) self.helpmenu.add_command(label="About...", command=self.show_about) def show_about(self): messagebox.showinfo("About","""This is the simple python program that uses business mining algorithms: alpha and alpha+ in order to create BPMN graphs.\nLogs: Program uses .txt logs as input. Example: a b c d.\n Graphs:\n There are 2 types of graph to choose: node and ortho\n Running program:\n Click run in menu and choose type of algorithm: alpha or alpha+.\n Output:\n Program generates BPMN graph and shows it in another window. There is an option in file menu to save generated graph as pdf file.""") def show_graph(self): packs = self.root.pack_slaves() for l in packs: if str(l) != '.!label' and str(l) != '.!label3' and '.!text' not in str(l): l.destroy() '''new_window = Toplevel(self.root) new_window.title('BPMN graph') self.im = PhotoImage(file='../graphs/' + self.graph_name + '.png') self.label = Label(new_window, image=self.im) self.label.pack()''' self.im = PhotoImage(file='../graphs/' + self.graph_name + '.png') self.label = Label(image=self.im) self.label.pack() def save_graph_as_pdf(self): self.alpha.G.format = 'pdf' self.alpha.G.render('../graphs/' + self.graph_name, view=True) def selected(self): print(self.radio_v.get()) def add_radio_buttons(self): l1 = Label(self.root, text='Log:', justify = LEFT, padx = 20, relief='solid') l1.pack() l1.place(x=0, y=100) self.radio_v = IntVar() l2 = Label(self.root, text='Choose graph type', justify = LEFT, padx = 20, relief='solid', anchor=SW) l2.pack() l2.place(x=0,y=0) self.r1 = Radiobutton(self.root, text="Ortho", padx = 20, variable=self.radio_v, value=1, command=self.selected)#.pack(anchor=W) self.r1.select() self.r1.pack(anchor=W) self.r1.place(x=0,y=25) self.r2 = Radiobutton(self.root, text="Node", padx = 20, variable=self.radio_v, value=2, command=self.selected) self.r2.deselect() self.r2.pack(anchor=W) self.r2.place(x=0,y=50) def show_log(self): packs = self.root.pack_slaves() for l in packs: print(l) if str(l) != '.!label': l.destroy() log_height = 0 log = '' for row in self.opened_file: for task in row: log += ' ' + (task) log += '\n' log_height += 1 '''Label(self.root, text=log, justify = LEFT, padx = 20, relief='solid').pack()''' try: self.text.delete(1.0, END) except: pass self.text = Text(self.root, height=log_height+1, width=30) self.text.pack() self.text.place(x=0,y=125) self.text.insert(END, log) def show_menu(self): self.root.config(menu=self.menubar) self.root.mainloop()
def push(project_dir, single): alpha = Alpha() if single: alpha.push_single(project_dir) else: alpha.push_all(project_dir)
class VAEController(nn.Module): ''' This class is the controller for VAE and has alpha parameters registered in addition to the weights (weights) parameters automatically registered by Pytorch. get_weights -> returns weights parameters get_alpha_level(level) -> returns parameter (yes singular, as the whole tensor is wrapped as one parameter) corresponding to alpha_level ''' def __init__(self, num_levels: int, num_nodes_at_level: Dict[int, int], num_ops_at_level: Dict[int, int], primitives: dict, channels_in: int, beta: float, image_height: int, image_width: int, writer=None, test_mode=False): ''' - Initializes member variables - Registers alpha parameters by creating a dummy alpha using the constructor and using get_alpha_level to get the alpha for a given level. This tensor is wrapped with nn.Parameter to indicate that is a Parameter for this controller (thus requires gradient computation with respect to itself). This nn.Parameter is added to the nn.ParameterList that is self.alphas. - Registers weights parameters by creating a model from aforementioned dummy alpha ''' # Superclass constructor super().__init__() # Initialize member variables self.num_levels = num_levels self.num_nodes_at_level = num_nodes_at_level self.num_ops_at_level = num_ops_at_level self.primitives = primitives self.channels_in = channels_in self.beta = beta self.writer = writer # Initialize Alpha self.alpha = Alpha(num_levels=self.num_levels, num_nodes_at_level=self.num_nodes_at_level, num_ops_at_level=self.num_ops_at_level) # Initialize model with initial alpha, self.model = BetaVAE(alpha=self.alpha, beta=beta, primitives=self.primitives, channels_in=self.channels_in, image_height=image_height, image_width=image_width, writer=writer, test_mode=test_mode) if not test_mode and torch.cuda.is_available(): self.model = self.model.cuda() def forward(self, x): return self.model(x) def loss(self, x, output): return self.model.loss(x, output) def entanglement(self, x, output): return self.model.entanglement(x, output) # Get list of alpha parameters for a level def get_alpha_level(self, level): return self.alpha.get_alpha_level(level) # Get all the weights parameters def get_weights(self): weights = nn.ParameterList() for name, param in self.named_parameters(recurse=True): if 'alpha' not in name: weights.append(param) return weights
def promote(project_dir, alias): alpha = Alpha() alpha.promote_all(project_dir, alias)
class ModelController(nn.Module): ''' This class is the controller for model and has alpha parameters registered in addition to the weights (weights) parameters automatically registered by Pytorch. get_weights -> returns weights parameters get_alpha_level(level) -> returns parameter (yes singular, as the whole tensor is wrapped as one parameter) corresponding to alpha_level ''' def __init__(self, num_levels: int, num_nodes_at_level: Dict[int, int], num_ops_at_level: Dict[int, int], primitives: dict, channels_in: int, channels_start: int, stem_multiplier: int, num_classes: int, loss_criterion, num_cells: int, writer=None, test_mode=False): ''' - Initializes member variables - Registers alpha parameters by creating a dummy alpha using the constructor and using get_alpha_level to get the alpha for a given level. This tensor is wrapped with nn.Parameter to indicate that is a Parameter for this controller (thus requires gradient computation with respect to itself). This nn.Parameter is added to the nn.ParameterList that is self.alphas. - Registers weights parameters by creating a model from aforementioned dummy alpha ''' # Superclass constructor super().__init__() # Initialize member variables self.num_levels = num_levels self.num_nodes_at_level = num_nodes_at_level self.num_ops_at_level = num_ops_at_level self.primitives = primitives self.channels_in = channels_in self.channels_start = channels_start self.stem_multiplier = stem_multiplier self.num_classes = num_classes self.loss_criterion = loss_criterion self.writer = writer self.num_cells = num_cells self.test_mode = test_mode self.graph_added = False # Initialize Alpha for both types of cells # Normal Cell self.alpha_normal = Alpha(num_levels=self.num_levels, num_nodes_at_level=self.num_nodes_at_level, num_ops_at_level=self.num_ops_at_level, randomize=True) self.alpha_reduce = Alpha(num_levels=self.num_levels, num_nodes_at_level=self.num_nodes_at_level, num_ops_at_level=self.num_ops_at_level, randomize=True) # Initialize model with initial alpha self.model = Model(alpha_normal=self.alpha_normal, alpha_reduce=self.alpha_reduce, primitives=self.primitives, channels_in=self.channels_in, channels_start=self.channels_start, stem_multiplier=self.stem_multiplier, num_classes=self.num_classes, num_cells=num_cells, writer=writer, test_mode=test_mode) if not test_mode and torch.cuda.is_available(): self.model = self.model.cuda() def forward(self, x, temp=None): if self.test_mode and not self.graph_added: # Visualize model in tensorboard self.writer.add_graph(self.model, x) self.graph_added = True return self.model(x, temp=temp) # Get loss object using loss_criterion def loss(self, X, y): logits = self.forward(X) return self.loss_criterion(logits, y) # Get list of alpha parameters for a level def get_alpha_level(self, level): return self.alpha_normal.get_alpha_level(level).extend( self.alpha_reduce.get_alpha_level(level)) # Get all the weights parameters def get_weights(self): weights = nn.ParameterList() for name, param in self.named_parameters(recurse=True): if 'alpha' not in name: weights.append(param) return weights # Sets requires grad to false for alpha params / true for weight params def weight_training_mode(self): for name, param in self.named_parameters(recurse=True): if 'alpha' in name: param.requires_grad = False else: param.requires_grad = True # Sets requires grad to false for weight params / true for alpha params def alpha_training_mode(self): for name, param in self.named_parameters(recurse=True): if 'alpha' in name: param.requires_grad = True else: param.requires_grad = False # Assumes in alpha training mode overall i.e. weight gradients are turned off # Switches gradient off for all other levels def alpha_training_mode_for_level(self, level): for i in range(self.alpha_normal.num_levels): for param in self.alpha_normal.get_alpha_level(i): if i == level: param.requires_grad = True else: param.requires_grad = False
def __init__(self, num_levels: int, num_nodes_at_level: Dict[int, int], num_ops_at_level: Dict[int, int], primitives: dict, channels_in: int, channels_start: int, stem_multiplier: int, num_classes: int, loss_criterion, num_cells: int, writer=None, test_mode=False): ''' - Initializes member variables - Registers alpha parameters by creating a dummy alpha using the constructor and using get_alpha_level to get the alpha for a given level. This tensor is wrapped with nn.Parameter to indicate that is a Parameter for this controller (thus requires gradient computation with respect to itself). This nn.Parameter is added to the nn.ParameterList that is self.alphas. - Registers weights parameters by creating a model from aforementioned dummy alpha ''' # Superclass constructor super().__init__() # Initialize member variables self.num_levels = num_levels self.num_nodes_at_level = num_nodes_at_level self.num_ops_at_level = num_ops_at_level self.primitives = primitives self.channels_in = channels_in self.channels_start = channels_start self.stem_multiplier = stem_multiplier self.num_classes = num_classes self.loss_criterion = loss_criterion self.writer = writer self.num_cells = num_cells self.test_mode = test_mode self.graph_added = False # Initialize Alpha for both types of cells # Normal Cell self.alpha_normal = Alpha(num_levels=self.num_levels, num_nodes_at_level=self.num_nodes_at_level, num_ops_at_level=self.num_ops_at_level, randomize=True) self.alpha_reduce = Alpha(num_levels=self.num_levels, num_nodes_at_level=self.num_nodes_at_level, num_ops_at_level=self.num_ops_at_level, randomize=True) # Initialize model with initial alpha self.model = Model(alpha_normal=self.alpha_normal, alpha_reduce=self.alpha_reduce, primitives=self.primitives, channels_in=self.channels_in, channels_start=self.channels_start, stem_multiplier=self.stem_multiplier, num_classes=self.num_classes, num_cells=num_cells, writer=writer, test_mode=test_mode) if not test_mode and torch.cuda.is_available(): self.model = self.model.cuda()
class QtradeEnv(gym.Env): """Custom Environment that follows gym interface""" metadata = {'render.modes': ['human']} def __init__(self): super(QtradeEnv, self).__init__() self.dir = './data/BTCUSDT.csv' self.df = pd.read_csv(self.dir) self.alpha = Alpha(self.df) self.cost = 0.00 self.interest_rate = 0.0/240/240 # internal interest rate self.window = 50 self.cash = 1 self.stock = 0 self.t = self.window + 1 self.T = len(self.df) self.steps = 0 self.list_asset = np.ones(self.T) self.list_holding = np.ones(self.T) self.list_profit = np.zeros(self.T) # alpha self.close = self.alpha.close self.high = self.alpha.high self.low = self.alpha.low self.open = self.alpha.open self.vol = self.alpha.vol self.close_diff = self.alpha.close_diff() self.high_diff = self.alpha.high_diff() self.low_diff = self.alpha.low_diff() self.open_diff = self.alpha.open_diff() self.ma = self.alpha.moving_average(window=self.window) self.ema = self.alpha.EMA(window=self.window) self.mstd = self.alpha.moving_std(window=self.window) self.bollinger_lower_bound = self.alpha.bollinger_lower_bound(window=self.window, width=1) self.bollinger_upper_bound = self.alpha.bollinger_upper_bound(window=self.window, width=1) # Actions of the format Buy x%, Sell x%, Hold, etc. # Action space range must be symetric and the order matters. self.action_space = spaces.Box( low=np.array([-np.inf, -np.inf]), high=np.array([np.inf, np.inf]), dtype=np.float16) # Prices contains the OHCL values for the last five prices self.observation_space = spaces.Box( low=-np.inf, high=np.inf, shape=(1, self.window, 9), dtype=np.float16) def _next_observation(self): obs = [np.array([ self.close_diff[self.t - self.window + 1:self.t + 1] / self.close[self.t - self.window + 1], self.high_diff[self.t - self.window + 1:self.t + 1] / self.high[self.t - self.window + 1], self.open_diff[self.t - self.window + 1:self.t + 1] / self.open[self.t - self.window + 1], self.low_diff[self.t - self.window + 1:self.t + 1] / self.low[self.t - self.window + 1], self.close[self.t - self.window + 1:self.t + 1] / self.close[self.t - self.window + 1], self.high[self.t - self.window + 1:self.t + 1] / self.high[self.t - self.window + 1], self.open[self.t - self.window + 1:self.t + 1] / self.open[self.t - self.window + 1], self.low[self.t - self.window + 1:self.t + 1] / self.low[self.t - self.window + 1], self.list_holding[self.t - self.window + 1:self.t + 1] ]).T] return obs def step(self, action): # action[buy/sell/hold] print(self.steps, self.t, self.close[self.t]/self.close0, self.list_asset[self.t]/self.asset0, action,self.list_asset[self.t]/self.asset0 -self.close[self.t]/self.close0) decision = action[0] order_price_b = self.ma[self.t] + self.mstd[self.t] * action[0] order_price_s = self.ma[self.t] + self.mstd[self.t] * action[1] if self.cash > 0 and order_price_b > self.alpha.close[self.t + 1]: take_price = self.alpha.close[self.t + 1] self.stock = self.cash / take_price * (1 - self.cost) self.cash = 0 print('buy') elif self.stock > 0 and order_price_s < self.alpha.close[self.t + 1]: take_price = self.alpha.close[self.t + 1] self.cash = self.stock * take_price * (1 - self.cost) self.stock = 0 print('sell') self.list_asset[self.t+1] = self.stock*self.alpha.close[self.t+1] + self.cash self.list_cash = [self.cash > 0]*self.T self.list_holding[self.t+1] = self.cash>0 reward = (self.list_asset[self.t + 1] - self.list_asset[self.t])/self.list_asset[self.t] #- (self.close[self.t+1]-self.close[self.t])/self.close[self.t] done = self.steps > 2000 self.steps += 1 obs = self._next_observation() self.t += 1 return obs, reward, done, {} def reset(self): print('reset') self.t = self.window + np.random.random_integers(0, high=self.T-2000) self.steps = 0 self.list_cash = self.T * [1] self.list_holding = self.T*[1] self.cash = 1 self.stock = 0 self.asset0 = 1 self.close0 = self.close[self.t+1] return self._next_observation() def render(self, mode='human'): pass
class QtradeEnv(gym.Env): """Custom Environment that follows gym interface""" metadata = {'render.modes': ['human']} def __init__(self): super(QtradeEnv, self).__init__() self.root_dir = '/Users/liuyehong/Dropbox/CICC/Algorithm_Trading/Platform2/OHLC/data/1Min/' self.list_dir = [d for d in os.listdir(self.root_dir) if '.csv' in d] self.df_dir = np.random.choice(self.list_dir) self.df = pd.read_csv(self.root_dir + self.df_dir) self.alpha = Alpha(self.df) self.cost = 0 #-0.00005 self.interest_rate = 0 / 240 / 240 # internal interest rate (necessary to avoid stuck of long-term training.) self.window = 50 self.cash = 1 self.stock = 0 self.t = self.window + 1 self.i = 0 self.T = len(self.df) self.total_steps = int(self.T / 5.) self.list_asset = np.ones(self.T) self.list_holding = np.ones(self.T) # alpha self.close = self.alpha.close self.high = self.alpha.high self.low = self.alpha.low self.open = self.alpha.open self.vol = self.alpha.vol self.close_diff = self.alpha.close_diff() self.high_diff = self.alpha.high_diff() self.low_diff = self.alpha.low_diff() self.open_diff = self.alpha.open_diff() self.ma = self.alpha.moving_average(window=self.window) self.ema = self.alpha.EMA(window=self.window) self.dema = self.alpha.DEMA(window=self.window) self.kama = self.alpha.KAMA(window=self.window) self.sma = self.alpha.SMA(window=self.window) self.tema = self.alpha.TEMA(window=self.window) self.trima = self.alpha.TRIMA(window=self.window) self.linearreg_slope = self.alpha.LINEARREG_SLOPE(window=self.window) self.mstd = self.alpha.moving_std(window=self.window) self.bollinger_lower_bound = self.alpha.bollinger_lower_bound( window=self.window, width=1) self.bollinger_upper_bound = self.alpha.bollinger_upper_bound( window=self.window, width=1) self.moving_max = self.alpha.moving_max(window=self.window) self.moving_min = self.alpha.moving_min(window=self.window) self.moving_med = self.alpha.moving_med(window=self.window) # Actions of the format Buy x%, Sell x%, Hold, etc. # Action space range must be symetric and the order matters. self.action_space = spaces.Box(low=np.array([-np.inf, -np.inf]), high=np.array([np.inf, np.inf]), dtype=np.float16) # Prices contains the OHCL values for the last five prices self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=(1, self.window, 4), dtype=np.float16) def _next_observation(self): obs = [ np.array([ self.close[self.t - self.window + 1:self.t + 1] / self.ma[self.t], self.high[self.t - self.window + 1:self.t + 1] / self.ma[self.t], self.open[self.t - self.window + 1:self.t + 1] / self.ma[self.t], self.low[self.t - self.window + 1:self.t + 1] / self.ma[self.t], #self.ma[self.t - self.window + 1:self.t + 1] / self.ma[self.t], #self.ema[self.t - self.window + 1:self.t + 1] / self.ma[self.t], #self.dema[self.t - self.window + 1:self.t + 1] / self.ma[self.t], #self.kama[self.t - self.window + 1:self.t + 1] / self.ma[self.t], #self.sma[self.t - self.window + 1:self.t + 1] / self.ma[self.t], #self.tema[self.t - self.window + 1:self.t + 1] / self.ma[self.t], #self.trima[self.t - self.window + 1:self.t + 1] / self.ma[self.t], #self.bollinger_lower_bound[self.t - self.window + 1:self.t + 1] / self.ma[self.t], #self.bollinger_upper_bound[self.t - self.window + 1:self.t + 1] / self.ma[self.t], #np.zeros(self.window), # find optimize window size with constant observation. #self.list_holding[self.t - self.window + 1:self.t + 1], #self.list_cash[self.t - self.window + 1:self.t + 1], ]).T ] return obs def _utility(self, x): # if x > 0: return 1 * x else: return 1 * x def step(self, action): # action[buy/sell/hold] order_price_b = np.floor( 100 * (self.ma[self.t] + self.mstd[self.t] * action[0])) / 100. order_price_s = np.ceil(100 * self.ma[self.t] + self.mstd[self.t] * action[1]) / 100. if self.cash > 0 and order_price_b > self.close[self.t]: take_price = self.close[self.t] self.stock = self.cash / take_price * (1 - self.cost) self.cash = 0 print('buy: ' + str(take_price)) print( self.steps, self.t, self.close[self.t] / self.close0, self.list_asset[self.t] / self.asset0, action, self.list_asset[self.t] / self.asset0 - self.close[self.t] / self.close0) elif self.stock > 0 and order_price_s < self.close[self.t]: take_price = self.close[self.t] self.cash = self.stock * take_price * (1 - self.cost) self.stock = 0 print('sell: ' + str(take_price)) print( self.i, self.steps, self.t, self.close[self.t] / self.close0, self.list_asset[self.t] / self.asset0, action, self.list_asset[self.t] / self.asset0 - self.close[self.t] / self.close0) self.list_asset[ self.t + 1] = self.stock * self.alpha.close[self.t + 1] + self.cash self.list_cash = [self.cash > 0] * self.T self.list_holding[self.t + 1] = self.cash > 0 # it is important to use relative return as a reward reward = (self.list_asset[self.t + 1] - self.list_asset[self.t])/self.list_asset[self.t] - \ (self.close[self.t + 1] - self.close[self.t]) / self.close[self.t] if self.cash > 0: reward += -self.interest_rate done = self.steps > self.total_steps self.steps += 1 obs = self._next_observation() self.t += 1 return obs, reward, done, {} def reset(self): # To avoid stuck in local opt, it is important to increase the cost step by step #if self.cost<0.0005: # self.cost += 0.00001 # print('cost'+str(self.cost)) self.i += 1 self.df_dir = np.random.choice(self.list_dir) print(self.df_dir) self.df = pd.read_csv(self.root_dir + self.df_dir) print('reset: ' + str(self.i)) self.t = 1 + self.window + np.random.random_integers( 0, self.T - self.total_steps - self.window - 1) self.list_cash = self.T * [1] self.list_holding = self.T * [1] self.steps = 0 self.cash = self.close[self.t] self.stock = 0 self.asset0 = self.close[self.t] self.close0 = self.close[self.t] return self._next_observation() def render(self, mode='human'): pass
config = { 'begin_date': '20140328', 'end_date': '20170801', 'data_path': data_path, 'result_path': result_path, 'intraday_path': intraday_path, 'h5_path_5min': h5_path_5min, 'h5_path_1min': h5_path_1min, 'data_source': ('volume_price', 'money_flow', 'style' ) # ('volume_price','inter_day','money_flow','financial') } print u'计算alpha因子' print config psx_alpha = Alpha(config) #psx_alpha.work_data(data) psx_alpha.load_data() time1 = time.time() print 'load data time = %s seconds' % (time1 - time0) psx_alpha.get_new(psx_alpha.alpha_worldquant101_60) #运行alpha_daily_000的因子逻辑 #psx_alpha.get_new(psx_alpha.alpha_recount_000) #psx_alpha.get_intraday_1min([alpha.fstcount_1min_000]) #运行日内因子,1min #psx_alpha.get_intraday_1min([alpha.fstcount_5min_000]) #运行日内因子计算,5min
from alpha import Alpha def read_logs(path): logs = [] for line in open(path): logs.append(tuple(line.split())) return logs logs = read_logs("../logs/log2.txt") alpha = Alpha(logs) alpha.create_graph('graph2')