Exemple #1
0
    # 학습 데이터 분리
    features_training_data = [
        'open_lastclose_ratio', 'high_close_ratio', 'low_close_ratio',
        'close_lastclose_ratio', 'volume_lastvolume_ratio', 'close_ma5_ratio',
        'volume_ma5_ratio', 'close_ma10_ratio', 'volume_ma10_ratio',
        'close_ma20_ratio', 'volume_ma20_ratio', 'close_ma60_ratio',
        'volume_ma60_ratio', 'close_ma120_ratio', 'volume_ma120_ratio'
    ]
    training_data = training_data[features_training_data]

    # 강화학습 시작
    policy_learner = PolicyLearner(stock_code=stock_code,
                                   chart_data=chart_data,
                                   training_data=training_data,
                                   min_trading_unit=1,
                                   max_trading_unit=2,
                                   delayed_reward_threshold=.2,
                                   lr=.001)
    policy_learner.fit(balance=10000000,
                       num_epoches=1000,
                       discount_factor=0,
                       start_epsilon=.5)

    # 정책 신경망을 파일로 저장
    model_dir = os.path.join(settings.BASE_DIR, 'models/%s' % stock_code)
    if not os.path.isdir(model_dir):
        os.makedirs(model_dir)
    model_path = os.path.join(model_dir, 'model_%s.h5' % timestr)
    policy_learner.policy_network.save_model(model_path)
Exemple #2
0
        chart_data=chart_data,
        training_data=training_data,
        policy_model_path=os.path.join(
            settings.BASE_DIR,
            'models/{}/model_{}.h5'.format(model_code, policy_model_ver)),
        value_model_path=os.path.join(
            settings.BASE_DIR,
            'models/{}/model_{}.h5'.format(model_code, value_model_ver)),
        lr=0.00000001,
        discount_factor=0,
        start_epsilon=0,
        num_past_input=119,
        load_weight_and_learn=False)

    # 여기가 핵심
    policy_learner.fit(balance=1, num_epoches=1000)

    # 정책 신경망을 파일로 저장
    model_dir = os.path.join(settings.BASE_DIR, 'models/%s' % model_code)
    if not os.path.isdir(model_dir):
        os.makedirs(model_dir)
    model_path = os.path.join(model_dir, 'model_policy_%s.h5' % timestr)
    policy_learner.policy_network.save(model_path,
                                       include_optimizer=False,
                                       overwrite=True)
    #policy_learner.policy_network_obj.save_model(model_path)
    model_path = os.path.join(model_dir, 'model_value_%s.h5' % timestr)
    policy_learner.value_network.save(model_path,
                                      include_optimizer=False,
                                      overwrite=True)
    #policy_learner.value_network_obj.save_weights(model_path)
Exemple #3
0
                                  (training_data['date'] <= '2018-12-31')]
    training_data = training_data.dropna()

    # Chart Data Separation
    features_chart_data = ['date', 'open', 'high', 'low', 'close', 'volume']
    chart_data = training_data[features_chart_data]

    # Training data separation
    features_training_data = [
        'open_lastclose_ratio', 'high_close_ratio', 'low_close_ratio',
        'close_lastclose_ratio', 'volume_lastvolume_ratio',
        'close_ma5_ratio', 'volume_ma5_ratio',
        'close_ma10_ratio', 'volume_ma10_ratio',
        'close_ma20_ratio', 'volume_ma20_ratio'
    ]
    training_data = training_data[features_training_data]

    # Strat reinforcement learning
    policy_learner = PolicyLearner(
        stock_code=stock_code, chart_data=chart_data, training_data=training_data,
        min_trading_unit=1, max_trading_unit=2, delayed_reward_threshold=reward, lr=.0001,tax=tax)
    policy_learner.fit(balance=bal, num_epoches=500,
                       discount_factor=0, start_epsilon=.5,monkey=monkey)

    # Save Policy Neural Network to File
    model_dir = os.path.join(settings.BASE_DIR, 'models/%s' % stock_code)
    if not os.path.isdir(model_dir):
        os.makedirs(model_dir)
    model_path = os.path.join(model_dir, 'model_%s.h5' % timestr)
    policy_learner.policy_network.save_model(model_path)
Exemple #4
0
    stream_handler = logging.StreamHandler()  # send logging output to stdout
    file_handler.setLevel(logging.DEBUG)
    stream_handler.setLevel(logging.INFO)
    logging.basicConfig(format="%(message)s",
                        handlers=[file_handler, stream_handler], level=logging.DEBUG)

    # 강화학습 시작
    if LEARNING:
        chart_data, data = prepare_data(STOCK_CODE, MARKET_CODE, TRAINING_START_DATE, TRAINING_END_DATE)

        policy_learner = PolicyLearner(
            stock_code=STOCK_CODE, chart_data=chart_data, training_data=data,
            min_trading_unit=MIN_TRADING_UNIT, max_trading_unit=MAX_TRADING_UNIT,
            delayed_reward_threshold=DELAYED_REWARD_THRESHOLD, lr=LEARNING_RATE)
            
        policy_learner.fit(balance=INITIAL_BALANCE, num_epoches=NUM_EPOCHS,max_memory=MAX_MEMORY,
                        discount_factor=DISCOUNT_FACTOR, start_epsilon=START_EPSILON, learning=LEARNING_RATE)

        # 정책 신경망을 파일로 저장
        model_dir = os.path.join(settings.BASE_DIR, 'models/%s' % STOCK_CODE)
        if not os.path.isdir(model_dir):
            os.makedirs(model_dir)
        #model_path = os.path.join(model_dir, 'model_%s.h5' % timestr)
        model_path = os.path.join(model_dir, 'model_%s.h5' % STOCK_CODE)
        policy_learner.policy_network.save_model(model_path)

    if SIMULATION:
        chart_data, data = prepare_data(STOCK_CODE, MARKET_CODE, SIMULATION_START_DATE, SIMULATION_END_DATE)

        policy_learner = PolicyLearner(
            stock_code=STOCK_CODE, chart_data=chart_data, training_data=data,
            min_trading_unit=MIN_TRADING_UNIT, max_trading_unit=MAX_TRADING_UNIT)
Exemple #5
0
    def learnFunc(self):
        if self.code is None or self.df is None:
            return
        self.change_value.emit(ZERO)
        # 데이터 전처리
        code = self.code
        chart_data = self.df
        prep_data = data_manager.preprocess(chart_data)
        training_data = data_manager.build_training_data(prep_data)
        training_data = training_data.dropna()

        # 차트데이터 분리
        feature_chart_data = ['date', 'open', 'high', 'low', 'close', 'volume']
        chart_data = training_data[feature_chart_data]

        # emit
        self.change_value.emit(TWENTY_FIVE)

        # 학습데이터 분리
        feature_chart_data = [
            'open_lastclose_ratio',
            'high_close_ratio',
            'low_close_ratio',
            'close_lastclose_ratio',
            'volume_lastvolume_ratio',
            'close_ma5_ratio',
            'volume_ma5_ratio',
            'close_ma10_ratio',
            'volume_ma10_ratio',
            'close_ma20_ratio',
            'volume_ma20_ratio',
            'close_ma60_ratio',
            'volume_ma60_ratio',
            'close_ma120_ratio',
            'volume_ma120_ratio',
        ]
        training_data = training_data[feature_chart_data]

        # 정책 신경망을 파일로 저장
        self.createFolder('model')
        mdir = os.path.join(settings.BASE_DIR, 'model')
        self.createFolder(os.path.join(mdir, code))

        model_dir = os.path.join(mdir, code)
        model_path = os.path.join(model_dir, 'model%s.h5' % code)

        # model_path 경로가 없으면 학습모델을 해당 dir에 만들어서 학습
        # model_path가 있으면 해당 모델 선택 후 예측
        print(model_path)

        # emit
        self.change_value.emit(FIFTY)

        if not os.path.isfile(model_path):
            start_time = time.time()
            policy_learner = PolicyLearner(stock_code=code,
                                           chart_data=chart_data,
                                           training_data=training_data,
                                           fig=self.fig,
                                           canvas=self.canvas,
                                           min_trading_unit=1,
                                           max_trading_unit=2,
                                           delayed_reward_threshold=0.2,
                                           lr=0.001)
            policy_learner.fit(balance=10000000,
                               num_epoches=200,
                               discount_factor=0,
                               start_epsilon=0.5)
            end_time = time.time()
            policy_learner.policy_network.save_model(model_path)
            print("LearningTime: {} sec".format(end_time - start_time))
        else:
            start_time = time.time()
            policy_learner = PolicyLearner(stock_code=code,
                                           chart_data=chart_data,
                                           training_data=training_data,
                                           fig=self.fig,
                                           canvas=self.canvas,
                                           min_trading_unit=1,
                                           max_trading_unit=2)
            end_time = time.time()
            print("LearningTime: {} sec".format(end_time - start_time))
            policy_learner.trade(balance=1000000,
                                 model_path=os.path.join(
                                     model_dir, 'model%s.h5' % (code)))

        # emit
        self.change_value.emit(A_HUNDRED)
Exemple #6
0
    # 학습 데이터 분리
    features_training_data = [
        'open_lastclose_ratio', 'high_close_ratio', 'low_close_ratio',
        'close_lastclose_ratio', 'volume_lastvolume_ratio', 'close_ma5_ratio',
        'volume_ma5_ratio', 'close_ma10_ratio', 'volume_ma10_ratio',
        'close_ma20_ratio', 'volume_ma20_ratio', 'close_ma60_ratio',
        'volume_ma60_ratio', 'close_ma120_ratio', 'volume_ma120_ratio'
    ]
    training_data = training_data[features_training_data]

    # 강화학습 시작
    policy_learner = PolicyLearner(stock_code=stock_code,
                                   chart_data=chart_data,
                                   training_data=training_data,
                                   min_trading_unit=1,
                                   max_trading_unit=2,
                                   delayed_reward_threshold=.2,
                                   lr=.001)
    policy_learner.fit(balance=10000000,
                       num_epoches=100,
                       discount_factor=0,
                       start_epsilon=.5,
                       index_change_rate=index_change_rate)

    # 정책 신경망을 파일로 저장
    model_dir = os.path.join(settings.BASE_DIR, 'models/%s' % stock_code)
    if not os.path.isdir(model_dir):
        os.makedirs(model_dir)
    model_path = os.path.join(model_dir, 'model_%s.h5' % timestr)
    policy_learner.policy_network.save_model(model_path)