Beispiel #1
0
    def __init__(self):
        stock_code = '005930'
        start_date = '2010-03-01'
        end_date = '2015-03-04'

        chart_data = data_manager.load_chart_data(
            os.path.join(settings.BASE_DIR,
                         'data/chart_data/{}.csv'.format(stock_code)))
        prep_data = data_manager.preprocess(chart_data)
        training_data = data_manager.build_training_data(prep_data)
        
        # 기간 필터링
        training_data = training_data[(training_data['date'] >= self.start_date) &
                                      (training_data['date'] <= self.end_date)]
        training_data = training_data.dropna()
        
        # 차트 데이터 분리
        features_chart_data = ['date', 'open', 'high', 'low', 'close', 'volume']
        chart_data = training_data[features_chart_data]


        chart_data['data']= pd.to_datetime(chart_data.date).astype(np.int64)/1000000
        data = torch.from_numpy(chart_data.values)

        self.data = torch.stack([data[:,0],data[:,4],data[:,5]],dim=1).float()

        self.data = self.data - self.data.mean(dim=0)
        self.data = self.data/self.data.std(0)

        self.count_max= self.data.size(0)
Beispiel #2
0
    def _pre_process(self):
        chart_data = data_manager.load_chart_data('test.csv')
        print("chart_data:", chart_data.head())

        prep_data = data_manager.preprocess(chart_data)
        print("prep_data:", prep_data)

        training_data = data_manager.build_training_data(prep_data)
        print("training_data:", training_data)
Beispiel #3
0
    def reset(self):
        stock_code = np.random.choice([
            '005930', '000270', '000660', '005380', '005490', '009240',
            '009540'
        ])

        self.prev_action = 0
        self.count = 0
        self.balance = self.init_money
        self.num_stocks = 0
        self.sum_action = 0

        chart_data = data_manager.load_chart_data(
            os.path.join('./', 'data/chart_data/{}.csv'.format(stock_code)))
        prep_data = data_manager.preprocess(chart_data)
        training_data = data_manager.build_training_data(prep_data)

        # 기간 필터링
        start = random.randint(self.view_seq,
                               (len(training_data) - self.count_max - 200))

        training_data = training_data[start - self.view_seq:start +
                                      self.count_max + 200]

        #        training_data = training_data[(training_data['date'] >= self.start_date) &
        #                                      (training_data['date'] <= self.end_date)]
        training_data = training_data.dropna()

        # 차트 데이터 분리
        features_chart_data = [
            'date', 'open', 'high', 'low', 'close', 'volume'
        ]
        self.chart_data = training_data[features_chart_data]
        self.chart_data['date'] = pd.to_datetime(self.chart_data.date).astype(
            np.int64) / 1e12

        # 학습 데이터 분리
        features_training_data = [
            'open_lastclose_ratio', 'high_close_ratio', 'low_close_ratio',
            'close_lastclose_ratio', 'volume_lastvolume_ratio',
            'close_ma5_ratio', 'volume_ma5_ratio', 'close_ma10_ratio',
            'volume_ma10_ratio', 'close_ma20_ratio', 'volume_ma20_ratio',
            'close_ma60_ratio', 'volume_ma60_ratio', 'close_ma120_ratio',
            'volume_ma120_ratio'
        ]
        training_data = training_data[features_training_data]

        self.data = torch.from_numpy(training_data.values).float()

        state = self.data[self.count:self.count + self.view_seq].view(1, -1)
        state = torch.cat(
            [state, torch.Tensor([self.sum_action]).view(1, -1)], dim=1)
        return state
        pass
Beispiel #4
0
    def __init__(self, stock_list, start, end):

        self.stock_list = stock_list
        self.start_date = start
        self.end_date = end

        data_base = []

        for stock_code in self.stock_list:
            # 주식 데이터 준비
            chart_data = data_manager.load_chart_data(
                os.path.join(settings.BASE_DIR,
                             'data/chart_data/{}.csv'.format(stock_code)))
            prep_data = data_manager.preprocess(chart_data)
            training_data = data_manager.build_training_data(prep_data)

            # 기간 필터링
            training_data = training_data[
                (training_data['date'] >= self.start_date)
                & (training_data['date'] <= self.end_date)]
            training_data = training_data.dropna()

            # 차트 데이터 분리
            features_chart_data = [
                'date', 'open', 'high', 'low', 'close', 'volume'
            ]
            chart_data = training_data[features_chart_data]
            chart_data['date'] = pd.to_datetime(chart_data.date).values.astype(
                np.int64)

            #차트 index축을 date 로변경
            #    chart_data.set_index('date', inplace=True)

            chart_data = torch.from_numpy(chart_data.values)
            data_base.append(chart_data)

        data_base = torch.cat(data_base, dim=1).float()
        scaled_data = (data_base - data_base.mean(dim=0))
        self.scaled_data = scaled_data / scaled_data.std()

        self.max_count = self.scaled_data.size(0)
Beispiel #5
0
    # 로그 기록
    log_dir = os.path.join(settings.BASE_DIR, 'logs/%s' % stock_code)
    timestr = settings.get_time_str()
    file_handler = logging.FileHandler(filename=os.path.join(
        log_dir, "%s_%s.log" % (stock_code, timestr)), encoding='utf-8')
    stream_handler = logging.StreamHandler()
    file_handler.setLevel(logging.DEBUG)
    stream_handler.setLevel(logging.INFO)
    logging.basicConfig(format="%(message)s",
        handlers=[file_handler, stream_handler], level=logging.DEBUG)

    # 주식 데이터 준비
    chart_data = data_manager.load_chart_data(
        os.path.join(settings.BASE_DIR,
                     'data/chart_data/{}.csv'.format(stock_code)))
    prep_data = data_manager.preprocess(chart_data)
    training_data = data_manager.build_training_data(prep_data)

    # 기간 필터링
    training_data = training_data[(training_data['date'] >= '2018-01-01') &
                                  (training_data['date'] <= '2018-01-31')]
    training_data = training_data.dropna()

    # 차트 데이터 분리
    features_chart_data = ['date', 'open', 'high', 'low', 'close', 'volume']
    chart_data = training_data[features_chart_data]

    # 학습 데이터 분리
    features_training_data = [
        'open_lastclose_ratio', 'high_close_ratio', 'low_close_ratio',
        'close_lastclose_ratio', 'volume_lastvolume_ratio',
def main():
    #Directory Settings
    DATASET_DIR = './dataset/'
    EXPORT_DIR = './export/crnn_simple_cnn_frame/'

    #Parameter Settings
    MODE = 'frame'
    DEVICE = 1  # 0 : cpu, 1 : gpu0, 2 : gpu1, ...
    NUM_CLASS = 25  # 0 : Silence, 1 - 12: Major, 13 - 24: Minor, Don't change this parameter
    EPOCH = 200
    BATCH_SIZE = 32
    SEQ_LENGTH = 128
    LEARN_RATE = 0.001

    parser = argparse.ArgumentParser()
    parser.add_argument('--export_dir',
                        type=str,
                        default=EXPORT_DIR,
                        help='export directory')
    parser.add_argument('--mode',
                        type=str,
                        default=MODE,
                        help='which mode? frame or beatsync')
    parser.add_argument('--device',
                        type=int,
                        default=DEVICE,
                        help='which device? 0 : cpu, over 1 : gpu')
    parser.add_argument('--epoch',
                        type=int,
                        default=EPOCH,
                        help='how many epoch?')
    parser.add_argument('--batch_size',
                        type=int,
                        default=BATCH_SIZE,
                        help='how many batch?')
    parser.add_argument('--seq_length',
                        type=int,
                        default=SEQ_LENGTH,
                        help='how much sequence length?')
    parser.add_argument('--learn_rate',
                        type=float,
                        default=LEARN_RATE,
                        help='learning rate')

    args = parser.parse_args()
    EXPORT_DIR = args.export_dir
    MODE = args.mode
    DEVICE = args.device
    EPOCH = args.epoch
    BATCH_SIZE = args.batch_size
    SEQ_LENGTH = args.seq_length
    LEARN_RATE = args.learn_rate

    #Preprocess
    x, y, info_test = data_manager.preprocess(DATASET_DIR,
                                              BATCH_SIZE,
                                              SEQ_LENGTH,
                                              mode=MODE)
    total_batch = float(x.train.shape[0] + x.test.shape[0] + x.valid.shape[0])
    print('Data Loaded\n' + 'Train Ratio : ' +
          str(round(100 * x.train.shape[0] / total_batch, 2)) +
          '%, Test Ratio : ' +
          str(round(100 * x.test.shape[0] / total_batch, 2)) +
          '%, Valid Ratio : ' +
          str(round(100 * x.valid.shape[0] / total_batch, 2)) + '%')

    acc_train = np.zeros(EPOCH)
    acc_valid = np.zeros(EPOCH)
    loss_train = np.zeros(EPOCH)
    loss_valid = np.zeros(EPOCH)

    #Train
    print('\n--------- Training Start ---------')
    model = model_archive.CRNN_simple_cnn(x.train.shape[-1], NUM_CLASS)
    #model = model_archive.CRNN(16, 1, 25, 64)
    #model = torch.load("./export/crnn4/model.pth")
    wrapper = Wrapper(model, LEARN_RATE)  #(12, 25, 0.0001)

    for e in range(EPOCH):
        _, acc_train[e], loss_train[e] = wrapper.run_model(
            x.train, y.train, DEVICE, 'train')
        _, acc_valid[e], loss_valid[e] = wrapper.run_model(
            x.valid, y.valid, DEVICE, 'eval')

        #if wrapper.early_stop(loss_valid[e]): break

        print('Epoch [' + str(e + 1).zfill(3) + '/' + str(EPOCH) + ']' +
              ' acc : ' + str(round(acc_train[e], 4)) + ' - val_acc : ' +
              str(round(acc_valid[e], 4)) + ' | loss : ' +
              str(round(loss_train[e], 4)) + ' - val_loss : ' +
              str(round(loss_valid[e], 4)))
    print('-------- Training Finished -------')

    #Test
    pred_test, _, _ = wrapper.run_model(x.test, y.test, DEVICE,
                                        'eval')  #pred_test: (426848,)

    chroma_test = data_manager.batch_dataset(
        info_test.chroma, BATCH_SIZE
    )  #ndarray(songs X num_of_batches_for_each_song, batch_size, seq_len, 12)
    chord_test = data_manager.batch_dataset(info_test.chord, BATCH_SIZE)
    chroma_test = chroma_test.reshape(
        chroma_test.shape[0] * chroma_test.shape[1], chroma_test.shape[-1])
    chord_test = chord_test.reshape(chord_test.shape[0] * chord_test.shape[1])

    acc_test, pred_test = data_manager.frame_accuracy(chord_test,
                                                      pred_test,
                                                      mode=MODE)
    print('\nTest Accuracy : ' + str(round(100 * acc_test, 2)) + '%')

    #Export
    wrapper.export(EXPORT_DIR, chroma_test, chord_test, pred_test)
    print('Exported files to ' + os.path.abspath(EXPORT_DIR))
Beispiel #7
0
    if not os.path.exists('logs/%s' % COIN):
        os.makedirs('logs/%s' % COIN)
    file_handler = logging.FileHandler(filename=os.path.join(
        log_dir, "%s_%s.log" % (COIN, timestr)),
                                       encoding='utf-8')
    stream_handler = logging.StreamHandler()
    file_handler.setLevel(logging.DEBUG)
    stream_handler.setLevel(logging.INFO)
    logging.basicConfig(format="%(message)s",
                        handlers=[file_handler, stream_handler],
                        level=logging.DEBUG)

    coin_chart = data_manager.load_chart_data(
        os.path.join(settings.BASE_DIR, 'data/chart_data/{}.csv'.format(COIN)))
    print("coin chart get")
    prep_data = data_manager.preprocess(coin_chart)
    training_data = data_manager.build_training_data(prep_data)

    #    start_date = '2018-05-24 00:00:00'
    #    end_date = '2018-07-10 00:00:00'

    training_data = training_data[(training_data['date'] >= FROM)
                                  & (training_data['date'] < TO)]

    training_data = training_data.dropna()

    features_chart_data = ['date', 'open', 'high', 'low', 'close', 'volume']
    coin_chart = training_data[features_chart_data]

    features_training_data = [
        'open_lastclose_ratio', 'high_close_ratio', 'low_close_ratio',
Beispiel #8
0
def main():
    #Directory Settings
    DATASET_DIR = './dataset/'
    EXPORT_DIR = './export/CNN_range/'

    #Parameter Settings
    MODE = 'beatsync'
    DEVICE = 1  # 0 : cpu, 1 : gpu0, 2 : gpu1, ...
    NUM_CLASS = 25  # 0 : Silence, 1 - 12: Major, 13 - 24: Minor, Don't change this parameter
    EPOCH = 60
    BATCH_SIZE = 128
    LEARN_RATE = 0.01
    SEQ_LENGTH = 10

    parser = argparse.ArgumentParser()
    parser.add_argument('--export_dir',
                        type=str,
                        default=EXPORT_DIR,
                        help='export directory')
    parser.add_argument('--mode',
                        type=str,
                        default=MODE,
                        help='which mode? frame or beatsync')
    parser.add_argument('--device',
                        type=int,
                        default=DEVICE,
                        help='which device? 0 : cpu, over 1 : gpu')
    parser.add_argument('--epoch',
                        type=int,
                        default=EPOCH,
                        help='how many epoch?')
    parser.add_argument('--batch_size',
                        type=int,
                        default=BATCH_SIZE,
                        help='how many batch?')
    parser.add_argument('--learn_rate',
                        type=float,
                        default=LEARN_RATE,
                        help='learning rate')
    parser.add_argument('--seq_length',
                        type=int,
                        default=SEQ_LENGTH,
                        help='CNN sequence length')
    args = parser.parse_args()
    EXPORT_DIR = args.export_dir
    MODE = args.mode
    DEVICE = args.device
    EPOCH = args.epoch
    BATCH_SIZE = args.batch_size
    LEARN_RATE = args.learn_rate
    SEQ_LENGTH = args.seq_length

    #Preprocess
    x, y, info_test = data_manager.preprocess(DATASET_DIR,
                                              BATCH_SIZE,
                                              SEQ_LENGTH,
                                              mode=MODE)
    total_batch = float(x.train.shape[0] + x.test.shape[0] + x.valid.shape[0])
    print('Data Loaded\n' + 'Train Ratio : ' +
          str(round(100 * x.train.shape[0] / total_batch, 2)) +
          '%, Test Ratio : ' +
          str(round(100 * x.test.shape[0] / total_batch, 2)) +
          '%, Valid Ratio : ' +
          str(round(100 * x.valid.shape[0] / total_batch, 2)) + '%')

    acc_train = np.zeros(EPOCH)
    acc_valid = np.zeros(EPOCH)
    loss_train = np.zeros(EPOCH)
    loss_valid = np.zeros(EPOCH)

    #Train
    print('\n--------- Training Start ---------')
    wrapper = Wrapper(x.train.shape[-1], NUM_CLASS, LEARN_RATE)
    #wrapper.model.cuda(device=DEVICE-1)
    # x = minibatch x batchsize x chroma // y = minibatch x batchsize

    for e in range(EPOCH):
        shuff_train = np.arange(x.train.shape[0])
        np.random.shuffle(shuff_train)
        shuff_valid = np.arange(x.valid.shape[0])
        np.random.shuffle(shuff_valid)
        _, acc_train[e], loss_train[e] = wrapper.run_model(
            x.train[shuff_train], y.train[shuff_train], DEVICE, 'train')
        _, acc_valid[e], loss_valid[e] = wrapper.run_model(
            x.valid[shuff_valid], y.valid[shuff_valid], DEVICE, 'eval')
        #_, acc_train[e], loss_train[e] = wrapper.run_model(x.train, y.train, DEVICE, 'train')
        #_, acc_valid[e], loss_valid[e] = wrapper.run_model(x.valid, y.valid, DEVICE, 'eval')
        if wrapper.early_stop(loss_valid[e]): break

        print('Epoch [' + str(e + 1).zfill(3) + '/' + str(EPOCH) + ']' +
              ' acc : ' + str(round(acc_train[e], 4)) + ' - val_acc : ' +
              str(round(acc_valid[e], 4)) + ' | loss : ' +
              str(round(loss_train[e], 4)) + ' - val_loss : ' +
              str(round(loss_valid[e], 4)))
    print('-------- Training Finished -------')

    #Test
    pred_test, _, _ = wrapper.run_model(x.test, y.test, DEVICE, 'eval')

    chroma_test = data_manager.batch_dataset(info_test.chroma, BATCH_SIZE)
    chord_test = data_manager.batch_dataset(info_test.chord, BATCH_SIZE)
    chroma_test = chroma_test.reshape(
        chroma_test.shape[0] * chroma_test.shape[1], chroma_test.shape[-1])
    chord_test = chord_test.reshape(chord_test.shape[0] * chord_test.shape[1])

    acc_test, pred_test = data_manager.frame_accuracy(chord_test,
                                                      pred_test,
                                                      info_test,
                                                      BATCH_SIZE,
                                                      mode=MODE)
    print('\nTest Accuracy : ' + str(round(100 * acc_test, 2)) + '%')

    # Export
    wrapper.export(EXPORT_DIR, chroma_test, chord_test, pred_test, acc_test)
    print('Exported files to ' + os.path.abspath(EXPORT_DIR))
Beispiel #9
0
    def learnFunc(self):
        if self.code is None or self.df is None:
            return
        self.change_value.emit(ZERO)
        # 데이터 전처리
        code = self.code
        chart_data = self.df
        prep_data = data_manager.preprocess(chart_data)
        training_data = data_manager.build_training_data(prep_data)
        training_data = training_data.dropna()

        # 차트데이터 분리
        feature_chart_data = ['date', 'open', 'high', 'low', 'close', 'volume']
        chart_data = training_data[feature_chart_data]

        # emit
        self.change_value.emit(TWENTY_FIVE)

        # 학습데이터 분리
        feature_chart_data = [
            'open_lastclose_ratio',
            'high_close_ratio',
            'low_close_ratio',
            'close_lastclose_ratio',
            'volume_lastvolume_ratio',
            'close_ma5_ratio',
            'volume_ma5_ratio',
            'close_ma10_ratio',
            'volume_ma10_ratio',
            'close_ma20_ratio',
            'volume_ma20_ratio',
            'close_ma60_ratio',
            'volume_ma60_ratio',
            'close_ma120_ratio',
            'volume_ma120_ratio',
        ]
        training_data = training_data[feature_chart_data]

        # 정책 신경망을 파일로 저장
        self.createFolder('model')
        mdir = os.path.join(settings.BASE_DIR, 'model')
        self.createFolder(os.path.join(mdir, code))

        model_dir = os.path.join(mdir, code)
        model_path = os.path.join(model_dir, 'model%s.h5' % code)

        # model_path 경로가 없으면 학습모델을 해당 dir에 만들어서 학습
        # model_path가 있으면 해당 모델 선택 후 예측
        print(model_path)

        # emit
        self.change_value.emit(FIFTY)

        if not os.path.isfile(model_path):
            start_time = time.time()
            policy_learner = PolicyLearner(stock_code=code,
                                           chart_data=chart_data,
                                           training_data=training_data,
                                           fig=self.fig,
                                           canvas=self.canvas,
                                           min_trading_unit=1,
                                           max_trading_unit=2,
                                           delayed_reward_threshold=0.2,
                                           lr=0.001)
            policy_learner.fit(balance=10000000,
                               num_epoches=200,
                               discount_factor=0,
                               start_epsilon=0.5)
            end_time = time.time()
            policy_learner.policy_network.save_model(model_path)
            print("LearningTime: {} sec".format(end_time - start_time))
        else:
            start_time = time.time()
            policy_learner = PolicyLearner(stock_code=code,
                                           chart_data=chart_data,
                                           training_data=training_data,
                                           fig=self.fig,
                                           canvas=self.canvas,
                                           min_trading_unit=1,
                                           max_trading_unit=2)
            end_time = time.time()
            print("LearningTime: {} sec".format(end_time - start_time))
            policy_learner.trade(balance=1000000,
                                 model_path=os.path.join(
                                     model_dir, 'model%s.h5' % (code)))

        # emit
        self.change_value.emit(A_HUNDRED)
    file_handler = logging.FileHandler(filename=os.path.join(
        log_dir, "%s_%s.log" % (stock_code, timestr)),
                                       encoding='utf-8')
    stream_handler = logging.StreamHandler()
    file_handler.setLevel(logging.DEBUG)
    stream_handler.setLevel(logging.INFO)
    logging.basicConfig(format="%(message)s",
                        handlers=[file_handler, stream_handler],
                        level=logging.DEBUG)

    # 강화학습에 필요한 주식 데이터 준비
    # 1) csv 파일에서 데이터 불러오기
    # chart_data = data_manager.load_chart_data_fromCSV(os.path.join(settings.BASE_DIR, 'data/chart_data/{}.csv'.format(stock_code)))
    # 2) database에서 데이터 불러오기
    chart_data = data_manager.load_chart_data_fromDB(stock_code)
    prep_data = data_manager.preprocess(
        chart_data)  # 불러온 차트데이터 전처리하여 학습 데이터를 만들 준비
    training_data = data_manager.build_training_data(
        prep_data)  # 학습 데이터에 포함될 열들을 추가
    # 이 training_data는 차트 데이터의 열들, 전처리에서 추가된 열들, 학습 데이터의 열들이 모두 포함된 데이터이다.

    # 기간 필터링
    training_data = training_data[(training_data['date'] >= '2017-01-01')
                                  & (training_data['date'] <= '2017-12-31')]
    training_data = training_data.dropna()

    # 데이터를 강화학습에 필요한 차트 데이터와 학습 데이터로 분리하기 --> 여러 feature를 가진 training_data는 필요한 부분들(DOHLCV의 차트 데이터와 15개의 feature를 가진 학습 데이터)로 떼어낸다.
    # 차트 데이터 분리
    features_chart_data = ['date', 'open', 'high', 'low', 'close', 'volume']
    chart_data = training_data[features_chart_data]

    # 학습 데이터 분리
Beispiel #11
0
            def __init__(self):
                stock_code='BTCUSDT'
                global timestr #global 함수로 불러와 바로 사용
                model_ver=timestr
                self.dataclose = self.datas[0].close
                log_dir = os.path.join(settings.BASE_DIR, 'logs/%s' % stock_code)
                timestr = settings.get_time_str()
                if not os.path.exists('logs/%s' % stock_code):
                    os.makedirs('logs/%s' % stock_code)
                file_handler = logging.FileHandler(filename=os.path.join(
                    log_dir, "%s_%s.log" % (stock_code, timestr)), encoding='utf-8')
                stream_handler = logging.StreamHandler()
                file_handler.setLevel(logging.DEBUG)
                stream_handler.setLevel(logging.INFO)
                logging.basicConfig(format="%(message)s",
                        handlers=[file_handler, stream_handler], level=logging.DEBUG)

    
                chart_data = data_manager.load_chart_data(
                    os.path.join(settings.BASE_DIR,
                             '{}'.format(stock_code)))
                prep_data = data_manager.preprocess(chart_data)
                training_data = data_manager.build_training_data(prep_data)

   
                training_data = training_data.loc['2018-07-01 01:00:00':]
    


                features_chart_data = ['o_t', 'open', 'high', 'low', 'close', 'volume']
                chart_data = training_data[features_chart_data]


                features_training_data = [
             
         'high_close_ratio', 'low_close_ratio',
        'close_lastclose_ratio', 'volume_lastvolume_ratio',
        'close_ma5_ratio', 'volume_ma5_ratio',
        'close_ma10_ratio', 'volume_ma10_ratio',
        'close_ma20_ratio', 'volume_ma20_ratio',
        'close_ma60_ratio', 'volume_ma60_ratio',
        'close_ma120_ratio', 'volume_ma120_ratio',
        'ema12','ema26','dn','mavg','up','pctB','macd','signal','cci'
                                            ]
                training_data = training_data[features_training_data]
                #print (training_data[:3])   
                training_data = training_data.dropna(axis=1)
                chart_data=chart_data.dropna(axis=1)
                #chart_data = chart_data.loc[:1530352800000]
                #training_data = training_data.loc[:1530352800000]
                delayed_reward_threshold=.001
                lr=0.1
                self.TRADING_TAX =0
                self.TRADING_CHARGE=0
                self.stock_code = stock_code  
                self.chart_data = chart_data
                self.environment = Environment(chart_data)  
                self.agent = Agent(self.environment, delayed_reward_threshold=delayed_reward_threshold)
                self.training_data = training_data  
                self.sample = None
                self.pvdata=[]
                self.training_data_idx = -1
                self.num_features = self.training_data.shape[1] #+ self.agent.STATE_DIM
                self.policy_network = PolicyNetwork(
                input_dim=self.num_features, output_dim=self.agent.NUM_ACTIONS, lr=lr)
                model_path=os.path.join(settings.BASE_DIR,
                'models/{}/model_{}.h5'.format(stock_code,model_ver))
                self.policy_network.load_model(model_path=model_path)
                self.agent.set_balance(self.broker.getcash())
                self.epsilon=0
                self.num_stocks=0
                df=pd.read_csv('out.csv')
                self.stopit=df.loc[7692]['c_p'] #2903 4135 7692
Beispiel #12
0
from data_manager import load_data, preprocess, get_train_data
from environment import Environment

ticker = 'MSFT'
start_date = '2010-01-01'

df = load_data(ticker, start_date)
df['Date'] = df['Date'].astype('str')
# print(df.head())
predf = preprocess(df)
# print(predf.head())

print(Environment(predf).observe())
print(Environment(predf).get_price())
Beispiel #13
0
def main():
    #Directory Settings
    DATASET_DIR = './dataset/'
    EXPORT_DIR = './export/result/'

    #Parameter Settings
    MODE = 'beatsync'
    DEVICE = 1  # 0 : cpu, 1 : gpu0, 2 : gpu1, ...
    NUM_CLASS = 25  # 0 : Silence, 1 - 12: Major, 13 - 24: Minor, Don't change this parameter
    EPOCH = 100
    BATCH_SIZE = 128
    LEARN_RATE = 0.001
    SEQ_LENGTH = 10

    parser = argparse.ArgumentParser()
    parser.add_argument('--export_dir',
                        type=str,
                        default=EXPORT_DIR,
                        help='export directory')
    parser.add_argument('--mode',
                        type=str,
                        default=MODE,
                        help='which mode? frame or beatsync')
    parser.add_argument('--device',
                        type=int,
                        default=DEVICE,
                        help='which device? 0 : cpu, over 1 : gpu')
    parser.add_argument('--epoch',
                        type=int,
                        default=EPOCH,
                        help='how many epoch?')
    parser.add_argument('--batch_size',
                        type=int,
                        default=BATCH_SIZE,
                        help='how many batch?')
    parser.add_argument('--learn_rate',
                        type=float,
                        default=LEARN_RATE,
                        help='learning rate')
    parser.add_argument('--seq_length',
                        type=int,
                        default=SEQ_LENGTH,
                        help='CNN sequence length')
    args = parser.parse_args()
    EXPORT_DIR = args.export_dir
    MODE = args.mode
    DEVICE = args.device
    EPOCH = args.epoch
    BATCH_SIZE = args.batch_size
    LEARN_RATE = args.learn_rate
    SEQ_LENGTH = args.seq_length

    #Preprocess
    x, y, info_test = data_manager.preprocess(DATASET_DIR,
                                              BATCH_SIZE,
                                              SEQ_LENGTH,
                                              mode=MODE)
    total_batch = float(x.train.shape[0] + x.test.shape[0] + x.valid.shape[0])
    print('Data Loaded\n' + 'Train Ratio : ' +
          str(round(100 * x.train.shape[0] / total_batch, 2)) +
          '%, Test Ratio : ' +
          str(round(100 * x.test.shape[0] / total_batch, 2)) +
          '%, Valid Ratio : ' +
          str(round(100 * x.valid.shape[0] / total_batch, 2)) + '%')

    #Train
    print('\n--------- Training Start ---------')
    wrapper = Wrapper(x.train.shape[-1], NUM_CLASS, LEARN_RATE)
    #wrapper.model.cuda(device=DEVICE-1)
    # x = minibatch x batchsize x chroma // y = minibatch x batchsize

    #Load model
    model = torch.load('export/model_56.489192.pth')
    wrapper.model = model

    #Test
    pred_test, _, _ = wrapper.run_model(x.test, y.test, DEVICE, 'eval')

    chroma_test = data_manager.batch_dataset(info_test.chroma, BATCH_SIZE)
    chord_test = data_manager.batch_dataset(info_test.chord, BATCH_SIZE)
    chroma_test = chroma_test.reshape(
        chroma_test.shape[0] * chroma_test.shape[1], chroma_test.shape[-1])
    chord_test = chord_test.reshape(chord_test.shape[0] * chord_test.shape[1])

    acc_test, pred_test = data_manager.frame_accuracy(chord_test,
                                                      pred_test,
                                                      info_test,
                                                      BATCH_SIZE,
                                                      mode=MODE)
    print('\nTest Accuracy : ' + str(round(100 * acc_test, 2)) + '%')

    # Export
    wrapper.export(EXPORT_DIR, chroma_test, chord_test, pred_test, acc_test)
    print('Exported files to ' + os.path.abspath(EXPORT_DIR))