示例#1
0
    # 학습 데이터 분리
    features_training_data = [
        'open_lastclose_ratio', 'high_close_ratio', 'low_close_ratio',
        'close_lastclose_ratio', 'volume_lastvolume_ratio', 'close_ma5_ratio',
        'volume_ma5_ratio', 'close_ma10_ratio', 'volume_ma10_ratio',
        'close_ma20_ratio', 'volume_ma20_ratio', 'close_ma60_ratio',
        'volume_ma60_ratio', 'close_ma120_ratio', 'volume_ma120_ratio'
    ]
    training_data = training_data[features_training_data]

    # 강화학습 시작
    policy_learner = PolicyLearner(stock_code=stock_code,
                                   chart_data=chart_data,
                                   training_data=training_data,
                                   min_trading_unit=1,
                                   max_trading_unit=2,
                                   delayed_reward_threshold=.2,
                                   lr=.001)
    policy_learner.fit(balance=10000000,
                       num_epoches=1000,
                       discount_factor=0,
                       start_epsilon=.5)

    # 정책 신경망을 파일로 저장
    model_dir = os.path.join(settings.BASE_DIR, 'models/%s' % stock_code)
    if not os.path.isdir(model_dir):
        os.makedirs(model_dir)
    model_path = os.path.join(model_dir, 'model_%s.h5' % timestr)
    policy_learner.policy_network.save_model(model_path)
示例#2
0
    # 기간 필터링
    training_data = training_data[(training_data['date'] >= '2018-01-01') &
                                  (training_data['date'] <= '2018-01-31')]
    training_data = training_data.dropna()

    # 차트 데이터 분리
    features_chart_data = ['date', 'open', 'high', 'low', 'close', 'volume']
    chart_data = training_data[features_chart_data]

    # 학습 데이터 분리
    features_training_data = [
        'open_lastclose_ratio', 'high_close_ratio', 'low_close_ratio',
        'close_lastclose_ratio', 'volume_lastvolume_ratio',
        'close_ma5_ratio', 'volume_ma5_ratio',
        'close_ma10_ratio', 'volume_ma10_ratio',
        'close_ma20_ratio', 'volume_ma20_ratio',
        'close_ma60_ratio', 'volume_ma60_ratio',
        'close_ma120_ratio', 'volume_ma120_ratio'
    ]
    training_data = training_data[features_training_data]

    # 비 학습 투자 시뮬레이션 시작
    policy_learner = PolicyLearner(
        stock_code=stock_code, chart_data=chart_data, training_data=training_data,
        min_trading_unit=1, max_trading_unit=3)
    policy_learner.trade(balance=10000000,
                         model_path=os.path.join(
                             settings.BASE_DIR,
                             'models/{}/model_{}.h5'.format(stock_code, model_ver)))
    parser.add_argument('COIN', type=str, help="coin type?")
    parser.add_argument('BALANCE', type=int, help="initial balance?")

    args = parser.parse_args()
    MODEL = args.MODEL
    COIN = args.COIN
    BALANCE = args.BALANCE

    log_dir = os.path.join(settings.BASE_DIR, 'logs/%s' % COIN)
    timestr = settings.get_time_str()
    file_handler = logging.FileHandler(filename=os.path.join(
        log_dir, "%s_%s.log" % (COIN, timestr)),
                                       encoding='utf-8')
    stream_handler = logging.StreamHandler()
    file_handler.setLevel(logging.DEBUG)
    stream_handler.setLevel(logging.INFO)
    logging.basicConfig(format="%(message)s",
                        handlers=[file_handler, stream_handler],
                        level=logging.DEBUG)

    policy_learner = PolicyLearner(coin_code=COIN,
                                   coin_chart=None,
                                   training_data=None,
                                   min_trading_unit=1,
                                   max_trading_unit=3)

    policy_learner.trade(balance=BALANCE,
                         model_path=os.path.join(
                             settings.BASE_DIR,
                             'models/{}/model_{}.h5'.format(COIN, MODEL)))
示例#4
0
        'close_lastclose_ratio', 'volume_lastvolume_ratio', 'close_ma5_ratio',
        'volume_ma5_ratio', 'close_ma10_ratio', 'volume_ma10_ratio',
        'close_ma20_ratio', 'volume_ma20_ratio', 'close_ma60_ratio',
        'volume_ma60_ratio', 'close_ma120_ratio', 'volume_ma120_ratio'
    ]
    training_data = training_data[features_training_data]

    print("coin_len", len(coin_chart))
    print("train_len", len(training_data))
    print(FROM, TO)

    policy_learner = PolicyLearner(coin_code=COIN,
                                   coin_chart=coin_chart,
                                   training_data=training_data,
                                   model_ver=model_ver,
                                   min_trading_unit=0.001,
                                   max_trading_unit=0.01,
                                   delayed_reward_threshold=.3,
                                   start_date=FROM,
                                   end_date=TO,
                                   lr=.09)

    print("policy learner start")

    policy_learner.fit(balance=1000000,
                       num_epoches=10,
                       discount_factor=0,
                       start_epsilon=.5)

    model_dir = os.path.join(settings.BASE_DIR, 'models/%s' % COIN)
    if not os.path.isdir(model_dir):
        os.makedirs(model_dir)
示例#5
0
    if not os.path.exists('logs/%s' % STOCK_CODE):
        os.makedirs('logs/%s' % STOCK_CODE)
    file_handler = logging.FileHandler(filename=os.path.join(
        log_dir, "%s_%s.log" % (STOCK_CODE, timestr)), encoding='utf-8')
    stream_handler = logging.StreamHandler()  # send logging output to stdout
    file_handler.setLevel(logging.DEBUG)
    stream_handler.setLevel(logging.INFO)
    logging.basicConfig(format="%(message)s",
                        handlers=[file_handler, stream_handler], level=logging.DEBUG)

    # 강화학습 시작
    if LEARNING:
        chart_data, data = prepare_data(STOCK_CODE, MARKET_CODE, TRAINING_START_DATE, TRAINING_END_DATE)

        policy_learner = PolicyLearner(
            stock_code=STOCK_CODE, chart_data=chart_data, training_data=data,
            min_trading_unit=MIN_TRADING_UNIT, max_trading_unit=MAX_TRADING_UNIT,
            delayed_reward_threshold=DELAYED_REWARD_THRESHOLD, lr=LEARNING_RATE)
            
        policy_learner.fit(balance=INITIAL_BALANCE, num_epoches=NUM_EPOCHS,max_memory=MAX_MEMORY,
                        discount_factor=DISCOUNT_FACTOR, start_epsilon=START_EPSILON, learning=LEARNING_RATE)

        # 정책 신경망을 파일로 저장
        model_dir = os.path.join(settings.BASE_DIR, 'models/%s' % STOCK_CODE)
        if not os.path.isdir(model_dir):
            os.makedirs(model_dir)
        #model_path = os.path.join(model_dir, 'model_%s.h5' % timestr)
        model_path = os.path.join(model_dir, 'model_%s.h5' % STOCK_CODE)
        policy_learner.policy_network.save_model(model_path)

    if SIMULATION:
        chart_data, data = prepare_data(STOCK_CODE, MARKET_CODE, SIMULATION_START_DATE, SIMULATION_END_DATE)
示例#6
0
        #'close_ma10_ratio', 'volume_ma10_ratio',
        #'close_ma20_ratio', 'volume_ma20_ratio',
        #'close_ma60_ratio', 'volume_ma60_ratio',
        #'close_ma120_ratio', 'volume_ma120_ratio'
    ]
    training_data = training_data[features_training_data]

    # 강화학습 시작
    policy_learner = PolicyLearner(
        stock_code=stock_code,
        chart_data=chart_data,
        training_data=training_data,
        policy_model_path=os.path.join(
            settings.BASE_DIR,
            'models/{}/model_{}.h5'.format(model_code, policy_model_ver)),
        value_model_path=os.path.join(
            settings.BASE_DIR,
            'models/{}/model_{}.h5'.format(model_code, value_model_ver)),
        lr=0.00000001,
        discount_factor=0,
        start_epsilon=0,
        num_past_input=119,
        load_weight_and_learn=False)

    # 여기가 핵심
    policy_learner.fit(balance=1, num_epoches=1000)

    # 정책 신경망을 파일로 저장
    model_dir = os.path.join(settings.BASE_DIR, 'models/%s' % model_code)
    if not os.path.isdir(model_dir):
        os.makedirs(model_dir)
        features_chart_data = ['date', 'open', 'high', 'low', 'close', 'volume']
        chart_data = training_data[features_chart_data]

        # 학습 데이터 분리
        features_training_data = [
            'high_low_ratio', 'open_close_ratio',
            'high_open_ratio', 'low_open_ratio',
            'high_close_ratio', 'low_close_ratio',
            'close_lastclose_ratio', 'volume_lastvolume_ratio'
            ]
        training_data = training_data[features_training_data]

        # 비 학습 투자 시뮬레이션 시작
        if isFirst == True:
            policy_learner = PolicyLearner(
                stock_code=stock_code, chart_data=chart_data, training_data=training_data,
                lr=0.00000001, discount_factor=0, start_epsilon=0, num_past_input=119)
            isFirst = False
        else:
            policy_learner.stock_code = stock_code
            policy_learner.chart_data = chart_data
            policy_learner.training_data = training_data
        policy_learner.trade(balance=20000,
                             model_path=os.path.join(
                                 settings.BASE_DIR,
                                 'models/{}/model_{}.h5'.format(model_code, model_ver)))
        buy_probs.append(policy_learner.action[0])

    buy_probs = np.array(buy_probs)
    if np.max(buy_probs) > 0.5: # 매수하기
        depth = client.get_order_book(symbol='{}BTC'.format(learned_coins[np.argmax(buy_probs)]))
示例#8
0
    def learnFunc(self):
        if self.code is None or self.df is None:
            return
        self.change_value.emit(ZERO)
        # 데이터 전처리
        code = self.code
        chart_data = self.df
        prep_data = data_manager.preprocess(chart_data)
        training_data = data_manager.build_training_data(prep_data)
        training_data = training_data.dropna()

        # 차트데이터 분리
        feature_chart_data = ['date', 'open', 'high', 'low', 'close', 'volume']
        chart_data = training_data[feature_chart_data]

        # emit
        self.change_value.emit(TWENTY_FIVE)

        # 학습데이터 분리
        feature_chart_data = [
            'open_lastclose_ratio',
            'high_close_ratio',
            'low_close_ratio',
            'close_lastclose_ratio',
            'volume_lastvolume_ratio',
            'close_ma5_ratio',
            'volume_ma5_ratio',
            'close_ma10_ratio',
            'volume_ma10_ratio',
            'close_ma20_ratio',
            'volume_ma20_ratio',
            'close_ma60_ratio',
            'volume_ma60_ratio',
            'close_ma120_ratio',
            'volume_ma120_ratio',
        ]
        training_data = training_data[feature_chart_data]

        # 정책 신경망을 파일로 저장
        self.createFolder('model')
        mdir = os.path.join(settings.BASE_DIR, 'model')
        self.createFolder(os.path.join(mdir, code))

        model_dir = os.path.join(mdir, code)
        model_path = os.path.join(model_dir, 'model%s.h5' % code)

        # model_path 경로가 없으면 학습모델을 해당 dir에 만들어서 학습
        # model_path가 있으면 해당 모델 선택 후 예측
        print(model_path)

        # emit
        self.change_value.emit(FIFTY)

        if not os.path.isfile(model_path):
            start_time = time.time()
            policy_learner = PolicyLearner(stock_code=code,
                                           chart_data=chart_data,
                                           training_data=training_data,
                                           fig=self.fig,
                                           canvas=self.canvas,
                                           min_trading_unit=1,
                                           max_trading_unit=2,
                                           delayed_reward_threshold=0.2,
                                           lr=0.001)
            policy_learner.fit(balance=10000000,
                               num_epoches=200,
                               discount_factor=0,
                               start_epsilon=0.5)
            end_time = time.time()
            policy_learner.policy_network.save_model(model_path)
            print("LearningTime: {} sec".format(end_time - start_time))
        else:
            start_time = time.time()
            policy_learner = PolicyLearner(stock_code=code,
                                           chart_data=chart_data,
                                           training_data=training_data,
                                           fig=self.fig,
                                           canvas=self.canvas,
                                           min_trading_unit=1,
                                           max_trading_unit=2)
            end_time = time.time()
            print("LearningTime: {} sec".format(end_time - start_time))
            policy_learner.trade(balance=1000000,
                                 model_path=os.path.join(
                                     model_dir, 'model%s.h5' % (code)))

        # emit
        self.change_value.emit(A_HUNDRED)
示例#9
0
    training_data = data_manager.build_training_data(prep_data)

    training_data = training_data[(training_data['date'] >= '2018-07-01')
                                  & (training_data['date'] <= '2018-10-01')]
    training_data = training_data.dropna()

    features_chart_data = ['date', 'open', 'high', 'low', 'close', 'volume']
    coin_chart = training_data[features_chart_data]

    features_training_data = [
        'open_lastclose_ratio', 'high_close_ratio', 'low_close_ratio',
        'close_lastclose_ratio', 'volume_lastvolume_ratio', 'close_ma5_ratio',
        'volume_ma5_ratio', 'close_ma10_ratio', 'volume_ma10_ratio',
        'close_ma20_ratio', 'volume_ma20_ratio', 'close_ma60_ratio',
        'volume_ma60_ratio', 'close_ma120_ratio', 'volume_ma120_ratio'
    ]

    training_data = training_data[features_training_data]

    policy_learner = PolicyLearner(coin_code=coin_code,
                                   coin_chart=coin_chart,
                                   training_data=training_data,
                                   min_trading_unit=1,
                                   max_trading_unit=3)

    policy_learner.trade(balance=10000000,
                         model_path=os.path.join(
                             settings.BASE_DIR, 'models/{}/model_{}.h5'.format(
                                 coin_code, model_ver)))
    # exploration=True)
示例#10
0
    # Date range filtering
    training_data = training_data[(training_data['date'] >= '2019-01-01')
                                  & (training_data['date'] <= '2019-12-31')]
    training_data = training_data.dropna()

    # Chart Data Separation
    features_chart_data = ['date', 'open', 'high', 'low', 'close', 'volume']
    chart_data = training_data[features_chart_data]

    # Training data separation
    features_training_data = [
        'open_lastclose_ratio', 'high_close_ratio', 'low_close_ratio',
        'close_lastclose_ratio', 'volume_lastvolume_ratio', 'close_ma5_ratio',
        'volume_ma5_ratio', 'close_ma10_ratio', 'volume_ma10_ratio',
        'close_ma20_ratio', 'volume_ma20_ratio'
    ]
    training_data = training_data[features_training_data]

    # Start non-training investment simulation
    policy_learner = PolicyLearner(stock_code=stock_code,
                                   chart_data=chart_data,
                                   training_data=training_data,
                                   min_trading_unit=1,
                                   max_trading_unit=3,
                                   delayed_reward_threshold=reward,
                                   tax=tax)
    policy_learner.trade(balance=bal,
                         model_path=os.path.join(
                             settings.BASE_DIR, 'models/{}/model_{}.h5'.format(
                                 stock_code, model_ver)))
示例#11
0
        'close_lastclose_ratio', 'volume_lastvolume_ratio',
        'close_ma5_ratio', 'volume_ma5_ratio',
        'close_ma10_ratio', 'volume_ma10_ratio',
        'close_ma20_ratio', 'volume_ma20_ratio',
        'close_ma60_ratio', 'volume_ma60_ratio',
        'close_ma120_ratio', 'volume_ma120_ratio',
        'ema12','ema26','dn','mavg','up','pctB','macd','signal','cci'
        ]
        training_data = training_data[features_training_data]
        #print (training_data[:3])   
        training_data = training_data.dropna(axis=1)
        chart_data=chart_data.dropna(axis=1)
        chart_data = chart_data.loc[:'2018-06-30 23:00:00']
        training_data = training_data.loc[:'2018-06-30 23:00:00']
        delayed_reward_threshold=0.2
        policy_learner = PolicyLearner(
            stock_code=stock_code, chart_data=chart_data, training_data=training_data,  delayed_reward_threshold=drt, lr=.1)
        policy_learner.fit(balance=10000, num_epoches=10,discount_factor=0, start_epsilon=.5)
        

        model_dir = os.path.join(settings.BASE_DIR, 'models/%s' % stock_code)
        if not os.path.isdir(model_dir):
            os.makedirs(model_dir)
        model_path = os.path.join(model_dir, 'model_%s.h5' % timestr)
        policy_learner.policy_network.save_model(model_path)
        
        
        
        buycnt=0
        sellcnt=0
        confmat=[[0,0],[0,0]]
        class TestStrategy(bt.Strategy):
示例#12
0
    features_chart_data = ['date', 'open', 'high', 'low', 'close', 'volume']
    coin_chart = training_data[features_chart_data]

    features_training_data = [
        'open_lastclose_ratio', 'high_close_ratio', 'low_close_ratio',
        'close_lastclose_ratio', 'volume_lastvolume_ratio', 'close_ma5_ratio',
        'volume_ma5_ratio', 'close_ma10_ratio', 'volume_ma10_ratio',
        'close_ma20_ratio', 'volume_ma20_ratio', 'close_ma60_ratio',
        'volume_ma60_ratio', 'close_ma120_ratio', 'volume_ma120_ratio'
    ]
    training_data = training_data[features_training_data]

    policy_learner = PolicyLearner(coin_code=coin_code,
                                   coin_chart=coin_chart,
                                   training_data=training_data,
                                   min_trading_unit=0.01,
                                   max_trading_unit=3,
                                   delayed_reward_threshold=.1,
                                   lr=.001)

    policy_learner.fit(balance=10000000,
                       num_epoches=5,
                       discount_factor=0,
                       start_epsilon=.5)

    model_dir = os.path.join(settings.BASE_DIR, 'models/%s' % coin_code)
    if not os.path.isdir(model_dir):
        os.makedirs(model_dir)
    model_path = os.path.join(model_dir, 'model_%s.h5' % timestr)
    policy_learner.policy_network.save_model(model_path)
示例#13
0
    coin_chart = data_manager.load_chart_data(os.path.join(settings.BASE_DIR,'data/chart_data/{}.csv'.format(coin_code)))
    prep_data = data_manager.preprocess_min(coin_chart)
    training_data = data_manager.build_training_data(prep_data)

    start_date = '2018-10-13 00:00:00'
    end_date = '2018-10-14 00:00:00'
    training_data = training_data[(training_data['date'] >= start_date)&(training_data['date'] < end_date)]
    training_data = training_data.dropna()

    features_chart_data = ['date', 'open', 'high', 'low', 'close', 'volume']
    coin_chart = training_data[features_chart_data]

    features_training_data = [
        'open_lastclose_ratio', 'high_close_ratio', 'low_close_ratio',
        'close_lastclose_ratio', 'volume_lastvolume_ratio',
        'close_ma5_ratio', 'volume_ma5_ratio',
        'close_ma10_ratio', 'volume_ma10_ratio',
        'close_ma20_ratio', 'volume_ma20_ratio',
        'close_ma60_ratio', 'volume_ma60_ratio',
        'close_ma120_ratio', 'volume_ma120_ratio']

    training_data = training_data[features_training_data]

    policy_learner = PolicyLearner(
        coin_code=COIN, coin_chart=coin_chart, training_data=training_data,
        min_trading_unit=0.001, max_trading_unit=0.01,start_date=start_date, end_date=end_date)

    policy_learner.trade(balance=BALANCE,
                         model_path=os.path.join(settings.BASE_DIR, 'models/{}/model_{}.h5'.format(COIN,MODEL)))