Ejemplo n.º 1
0
 def run(self):
     """
     | 処理の最後
     """
     print("---" + __class__.__name__ + ": run")
     with self.output().open("w") as target:
         exp_data = pd.read_pickle(self.intermediate_folder +
                                   mu.convert_date_to_str(self.end_date) +
                                   '_exp_data.pkl')
         # 予測を実施
         pred_df = self.skproc.proc_predict_sk_model(exp_data)
         print("End_baoz_predict run: pred_df", pred_df.shape)
         import_df = self.skproc.create_import_data(pred_df)
         if self.export_mode:
             print("export data")
             import_df.to_pickle(self.intermediate_folder +
                                 self.skproc.version_str + '/' +
                                 'export_data.pkl')
             if self.skproc.version_str == "win":
                 analyze_df = self.skproc.eval_pred_data(import_df)
                 print(analyze_df)
         self.skproc.import_data(import_df)
         Output.post_slack_text(dt.now().strftime("%Y/%m/%d %H:%M:%S") +
                                " finish predict job:" +
                                self.skproc.version_str)
         print(__class__.__name__ +
               " says: task finished".format(task=self.__class__.__name__))
Ejemplo n.º 2
0
 def run(self):
     """
     渡されたexp_data_nameに基づいてSK_DATA_MODELから説明変数のデータを取得する処理を実施。pickelファイル形式でデータを保存
     """
     print("----" + __class__.__name__ + ": run")
     Output.post_slack_text(dt.now().strftime("%Y/%m/%d %H:%M:%S") +
                            " start download jrdb file")
     with self.output().open("w") as target:
         download = JrdbDownload()
         download.procedure_download()
         download.move_file()
Ejemplo n.º 3
0
 def run(self):
     """
     渡されたexp_data_nameに基づいてSK_DATA_MODELから説明変数のデータを取得する処理を実施。pickelファイル形式でデータを保存
     """
     print("----" + __class__.__name__ + ": run")
     Output.post_slack_text(dt.now().strftime("%Y/%m/%d %H:%M:%S") +
                            " start predict job:" + self.skproc.version_str)
     with self.output().open("w") as target:
         print("------ モデル毎に予測データが違うので指定してデータ作成を実行")
         predict_df = self.skproc.create_predict_data()
         print("Sub_get_exp_data run: predict_df", predict_df.shape)
         predict_df.to_pickle(self.intermediate_folder +
                              mu.convert_date_to_str(self.end_date) +
                              '_exp_data.pkl')
         print(__class__.__name__ +
               " says: task finished".format(task=self.__class__.__name__))
Ejemplo n.º 4
0
 def run(self):
     # 特徴量作成処理を実施。learningの全データ分を取得してSkModel特徴作成処理を実行する
     print("---" + __class__.__name__ + ": run")
     Output.post_slack_text(dt.now().strftime("%Y/%m/%d %H:%M:%S") +
                            " start Sub_create_feature_select_data job:" +
                            self.skproc.version_str)
     with self.output().open("w") as target:
         file_name = self.intermediate_folder + "_learning.pkl"
         with open(file_name, 'rb') as f:
             learning_df = pickle.load(f)
             learning_df = self.skproc.merge_learning_df(learning_df)
             self.skproc.create_featrue_select_data(learning_df)
         Output.post_slack_text(
             dt.now().strftime("%Y/%m/%d %H:%M:%S") +
             " finish Sub_create_feature_select_data job:" +
             self.skproc.version_str)
         print(__class__.__name__ +
               " says: task finished".format(task=self.__class__.__name__))
Ejemplo n.º 5
0
    def run(self):
        # SkModelを読んで学習データを作成する。すべてのデータを作成後、競馬場毎のデータを作成する
        print("----" + __class__.__name__ + ": run")
        Output.post_slack_text(dt.now().strftime("%Y/%m/%d %H:%M:%S") +
                               " start Sub_get_learning_data job:" +
                               self.skproc.version_str)
        with self.output().open("w") as target:
            print("------ learning_dfを作成")
            self.skproc.set_learning_df()
            print("------ 学習用データを保存")
            self.skproc.learning_df.to_pickle(self.intermediate_folder +
                                              '_learning.pkl')

            Output.post_slack_text(dt.now().strftime("%Y/%m/%d %H:%M:%S") +
                                   " finish Sub_get_learning_data job:" +
                                   self.skproc.version_str)
            print(__class__.__name__ +
                  " says: task finished".format(task=self.__class__.__name__))
Ejemplo n.º 6
0
 def run(self):
     # 目的変数、場コード毎に学習を実施し、学習モデルを作成して中間フォルダに格納する
     print("---" + __class__.__name__ + ": run")
     Output.post_slack_text(dt.now().strftime("%Y/%m/%d %H:%M:%S") +
                            " start End_baoz_learning job:" +
                            self.skproc.version_str)
     with self.output().open("w") as target:
         file_name = self.intermediate_folder + "_learning.pkl"
         with open(file_name, 'rb') as f:
             df = pickle.load(f)
             # 学習を実施
             df = self.skproc.merge_learning_df(df)
             self.skproc.proc_learning_sk_model(df)
         Output.post_slack_text(dt.now().strftime("%Y/%m/%d %H:%M:%S") +
                                " finish End_baoz_learning job:" +
                                self.skproc.version_str)
         print(__class__.__name__ +
               " says: task finished".format(task=self.__class__.__name__))
Ejemplo n.º 7
0
from modules.report import Report
from modules.output import Output
from modules.import_to_cosmosdb import Import_to_CosmosDB

from datetime import datetime as dt
from datetime import timedelta

n = 0
start_date = (dt.now() + timedelta(days=n)).strftime('%Y/%m/%d')
end_date = (dt.now() + timedelta(days=n)).strftime('%Y/%m/%d')
mock_flag = False
output = Output()
rep = Report(start_date, end_date, mock_flag)

post_text = ''
now_time = dt.now()

def export_to_dropbox():
    start_date = dt.now().strftime('%Y/%m') + '/01'
    end_date = dt.now().strftime('%Y/%m/%d')
    rep = Report(start_date, end_date, mock_flag)
    rep.export_bet_df()
    rep.export_race_df()
    rep.export_raceuma_df()


current_text = rep.get_current_text()

if rep.check_flag:
    bet_text = rep.get_todays_bet_text()
    post_text += current_text
Ejemplo n.º 8
0
 def run(self):
     print("---" + __class__.__name__ + ": run")
     with self.output().open("w") as target:
         target_sr = pd.read_pickle(self.dict_path +
                                    'model/kaime/target_sr.pkl')
         cond_df = pd.read_pickle(self.dict_path +
                                  'model/kaime/cond_df.pkl')
         to = Output(self.start_date, self.end_date, self.term_start_date,
                     self.term_end_date, self.test_flag, target_sr, cond_df)
         to.set_pred_df()
         to.set_result_df()
         to.create_raceuma_score_file()
         to.create_main_mark_file()
         to.create_raceuma_mark_file()
         to.create_result_race_comment_file()
         to.create_result_raceuma_comment_file()
         to.create_target_mark_df()
         to.create_vote_file()
         to.create_pbi_file()
         to.create_pbi_result_file()
Ejemplo n.º 9
0
    if args.pdbs:
        pdbs = args.pdbs

    if args.depth_files:
        depth_files = args.depth_files

    if args.out:
        out = args.out[0]

    exp_list = Reference(referee).get_residues()

    #obtain models from input and run scoring.
    #scores collected in models dictionary

    models = {}

    if pdbs and not depth_files:
        for pdb in pdbs:
            depth = Depth(pdb, None, depth_path)
            models[pdb.split('.pdb')[0]] = Score(depth, exp_list).score_mono()
    elif depth_files:
        for depth_file in depth_files:
            depth = Depth(None, depth_file, depth_path)
            models[depth_file.split('-residue.depth')[0]] = Score(
                depth, exp_list).score_mono()
    else:
        print("Please specify pdbs or depth files, use -h flag for help.")
        sys.exit()

    Output(models, out)