def real_data():
    print('For Real Data')
    train_data, test_data = split_data(round_return_rate(get_data()))
    # Must index at starting at 0
    train_data.index -= 1000
    # Get a 3 level uniform quantizer of training data for observations
    codebook, bounds = quantize(
        np.append(train_data['ibm'].values, train_data['msft'].values))
    # Init trading env
    obs_space = define_observations(n_stocks=2, options=[-1, 0, 1])
    env = TradingEnv(train_data,
                     init_capital=1000,
                     is_discrete=False,
                     source='M')
    env.specify_quantization_ranges(bounds)
    # Init Q table
    Q = QLearningTable(actions=list(range(env.action_space_size)),
                       observations=obs_space)
    Q.setup_table()
    # Train method
    update(env, Q)
    test_env = TradingEnv(test_data,
                          init_capital=100,
                          is_discrete=False,
                          source='M')
    print(tabulate(Q.q_table, tablefmt="markdown", headers="keys"))
    max_actions = Q.q_table.idxmax(axis=1).values
    for i in Q.q_table.idxmax(axis=1).values:
        print('maximizing action', env.actions[i])
    test(test_env, Q)
    return
def train_markov_test_real():
    # Load real data and split into training and testing
    real_train_data, real_test_data = split_data(round_return_rate(get_data()))
    real_train_data.index -= 1000
    # Get codebook and bounds for a 3-level uniform quantizer
    codebook, bounds = quantize(real_train_data['msft'].values)
    # Compute an empirical transition matrix over training data with bounds
    # From 3 level uniform quantizer
    P = empirical_transition_matrix(real_train_data['msft'].values, bounds)
    # Generate markov data according to this transition matrix and codebook values
    train_data, test_data = split_data(
        create_custom_markov_samples(5000, codebook, P))
    # Define observation space
    obs_space = [[-1, 0], [0, 0], [1, 0]]
    # Enviorment for markov samples
    env = TradingEnv(train_data,
                     init_capital=100000,
                     is_discrete=False,
                     source='M')
    # Set quantization ranges for bounds from 3 level uniform quantizer
    env.specify_quantization_ranges(bounds)
    Q = QLearningTable(actions=list(range(env.action_space_size)),
                       observations=obs_space)
    Q.setup_table()
    # Train Q learning agent
    update(env, Q)
    # Create new trading enviorment for testing policy on real data
    test_env = TradingEnv(real_test_data,
                          init_capital=100,
                          is_discrete=False,
                          source='Real')
    test_env.specify_quantization_ranges(bounds)
    print(tabulate(Q.q_table, tablefmt="markdown", headers="keys"))
    test(test_env, Q)
    return
示例#3
0
def test_real_data_markov_2():
    print('Test Different Model for Real Data')
    # must index at starting at 0
    train_data = create_markov_memory_2(2500)
    # train_data.index -= 100
    # init trading env with iid
    env = TradingEnv(train_data,
                     init_capital=100,
                     is_discrete=False,
                     source='IID')
    #init Q table
    Q = QLearningTable(actions=list(range(env.action_space_size)))
    #train method
    update(env, Q)

    # test real data
    data, test_data = split_data(round_return_rate(get_data()))
    test_env = TradingEnv(test_data,
                          init_capital=100,
                          is_discrete=False,
                          source='Real')
    print(Q.q_table)
    test(test_env, Q)

    return
示例#4
0
def markov_data():
    print('For Markov Source')
    #get train and test for 5000 days where return rates are dependent on previous day
    train_data, test_data = split_data(create_markov(5000))
    test_data.index -= (train_data.shape[0] + test_data.shape[0]) - 100
    #init trading envioourment
    env = TradingEnv(train_data,
                     init_capital=100,
                     is_discrete=True,
                     source='M')
    #init q learning Q_table
    Q = QLearningTable(actions=list(range(env.action_space_size)),
                       observations=train_data.drop_duplicates().values)
    Q.setup_table()
    #training method
    update(env, Q)
    test_env = TradingEnv(test_data,
                          init_capital=100,
                          is_discrete=True,
                          source='M')
    print(tabulate(Q.q_table, tablefmt="markdown", headers="keys"))
    test(test_env, Q)
    return
def iid_data():
    print('For IID Source')
    # Get train and test data for 5000 days where return rate is i.i.d
    train_data, test_data = split_data(create_iid(5000))
    test_data.index -= (train_data.shape[0] + test_data.shape[0]) - 1000
    # Init trading enviorment
    env = TradingEnv(train_data,
                     init_capital=100,
                     is_discrete=True,
                     source='IID')
    # Init q learing table
    Q = QLearningTable(actions=list(range(env.action_space_size)),
                       observations=[[0, 0]])
    Q.setup_table()
    # Traing method
    update(env, Q)
    print(tabulate(Q.q_table, tablefmt="markdown", headers="keys"))
    test_env = TradingEnv(test_data,
                          init_capital=100,
                          is_discrete=True,
                          source='IID')
    test(test_env, Q)
    return
示例#6
0
def real_data():
    print('For Real Data')
    train_data, test_data = split_data(round_return_rate(get_data()))
    #must index at starting at 0
    train_data.index -= 100
    #init trading env
    env = TradingEnv(train_data, init_capital=100, is_discrete=False)
    #init Q table
    Q = QLearningTable(actions=list(range(env.action_space_size)))
    #train method
    update(env, Q)
    test_env = TradingEnv(test_data, init_capital=100, is_discrete=False)
    print(Q.q_table)
    test(test_env, Q)
    return
示例#7
0
def mix():
    print('For a mixture of Markov and IID Source')
    #get train and test data for 5000 days
    train_data, test_data = split_data(create_markov_iid_mix(5000))
    test_data.index -= (train_data.shape[0] + test_data.shape[0]) - 100
    #init trading env, is not discrete for iid from np uniform module
    env = TradingEnv(train_data, init_capital=100, is_discrete=False)
    #init q learning table
    Q = QLearningTable(actions=list(range(env.action_space_size)))
    #training method
    update(env, Q)
    test_env = TradingEnv(test_data, init_capital=100, is_discrete=False)
    print(Q.q_table)
    test(test_env, Q)
    return
示例#8
0
def markov_data2():
    print('For Markov Memory 2 Source')
    #get train and test for 5000 days where return rates are dependent on previous day
    train_data, test_data = split_data(create_markov_memory_2(5000))
    test_data.index -= (train_data.shape[0] + test_data.shape[0]) - 100
    #init trading envioourment
    env = TradingEnv(train_data, init_capital=100, is_discrete=True)
    #init q learning Q_table
    Q = QLearningTable(actions=list(range(env.action_space_size)))
    #training method
    update(env, Q)
    test_env = TradingEnv(test_data, init_capital=100, is_discrete=True)
    print(Q.q_table)
    test(test_env, Q)
    return
示例#9
0
def iid_data():
    print('For IID Source')
    #get train and test data for 5000 days where return rate is i.i.d
    train_data, test_data = split_data(create_iid(5000))
    test_data.index -= (train_data.shape[0] + test_data.shape[0])-100
    #train_data = pd.read_pickle("data/train_data_iid")
    #test_data = pd.read_pickle("data/test_data_iid")
    #init trading enviorment
    env = TradingEnv(train_data, init_capital=100, is_discrete = False)
    #init q learing table
    Q = QLearningTable(actions=list(range(env.action_space_size)))
    #traing method
    update(env, Q)
    #print(Q.q_table)
    test_env = TradingEnv(test_data, init_capital=100, is_discrete = False)
    test(test_env, Q)
    return
示例#10
0
def train_markov_real():
    #get markov to train q table on
    train_data, ignore_test_data = split_data(create_markov(5000))
    env = TradingEnv(train_data,
                     init_capital=100,
                     is_discrete=True,
                     source='M')
    Q = QLearningTable(actions=list(range(env.action_space_size)))
    #training method
    update(env, Q)
    #get real data for testing
    real_train_data, real_test_data = split_data(round_return_rate(get_data()))
    test_env = TradingEnv(real_test_data,
                          init_capital=100,
                          is_discrete=True,
                          source='Real')
    print(tabulate(Q.q_table, tablefmt="markdown", headers="keys"))
    test(test_env, Q)
    return
示例#11
0
def real_data():
    print('For Real Data')
    train_data, test_data = split_data(round_return_rate(get_data()))
    #must index at starting at 0
    train_data.index -= 100
    #init trading env
    env = TradingEnv(train_data,
                     init_capital=100,
                     is_discrete=False,
                     source='Real')
    #init Q table
    Q = QLearningTable(actions=list(range(env.action_space_size)))
    #train method
    update(env, Q)
    test_env = TradingEnv(test_data,
                          init_capital=100,
                          is_discrete=False,
                          source='Real')
    print(tabulate(Q.q_table, tablefmt="markdown", headers="keys"))
    test(test_env, Q)
    return
def markov_data2():
    print('For Markov Memory 2 Source')
    # Get train and test for 5000 days where return rates are dependent on previous day
    train_data, test_data = split_data(create_markov_memory_2(5000))
    test_data.index -= (train_data.shape[0] + test_data.shape[0]) - 1000
    # Init trading envioronment
    env = TradingEnv(train_data,
                     init_capital=100,
                     is_discrete=True,
                     source='M2')
    # Init q learning Q_table
    Q = QLearningTable(actions=list(range(env.action_space_size)),
                       observations=[[0, 0]])
    # Training method
    update(env, Q)
    test_env = TradingEnv(test_data,
                          init_capital=100,
                          is_discrete=True,
                          source='M2')
    print(tabulate(Q.q_table, tablefmt="markdown", headers="keys"))
    test(test_env, Q)
    return
示例#13
0
            # RL choose action based on observation
            action = RL.choose_action(observation)
            # RL take action and get next observation and reward
            observation_, reward, done = env.step(action)

            # RL learn from this transition
            RL.learn(observation, action, reward, observation_)
            # swap observation
            observation = observation_
            # break while loop when end of this episode
            if done:
                #RL.epsilon += 0.001
                break
        print(reward)

    print(RL.q_table)
    print(reward)
    G = GrowUp()
    print("test")
    for i in range(len(RL.q_table) - 1):
        G.step(np.argmax(RL.q_table[i]))
    print(G.score)


if __name__ == "__main__":
    env = GrowUp()
    RL = QLearningTable(actions=list(range(env.n_actions)))

    update()