def act_modify_data(self, is_test=False):
     if is_test is True:
         _TPI(self, locals())
     else:
         self.y_train[self.y_train > 4] = 4
         self.y_valid[self.y_valid > 4] = 4
         self.y_test[self.y_test > 4] = 4
 def act_run_instant_match(is_test=False):
     if is_test is True:
         _TPI(CLASS_NAME, locals())
     else:
         MACRO.move_click_sleep((-174, 104,), 1, 1)
         MACRO.move_click_sleep((-114, 180,), 1, 1)
         MACRO.press_sleep('space', 2)
Пример #3
0
    def __init__(self, *args, **kwargs):
        _TPI(self, locals())
        super(TabNetBase, self).__init__(*args, **kwargs)
        ACT = self.env.act
        MATCH = self.env.match_loader

        self.X_train, self.X_valid, self.X_test = None, None, None
        self.y_train, self.y_valid, self.y_test = None, None, None
        self.cat_idxs, self.cat_dims, self.cat_emb_dim = MATCH.get_categorical(
        )
        self.ai = None

        self._scenario_tactics = None
        self._scenario_matches = None

        self._scenario_learn_from_file = list([[
            1,
            # [self.epochs,
            [
                1,
                # [len(MATCH),
                [
                    1,
                    (self.act_register_data,
                     dict(data=MATCH.act_get(is_flat=True))),
                    self.act_modify_data,
                    self.act_init_ai,
                    # self.act_load_game,
                    self.act_run_ai_with_learn,
                    # self.act_test
                ]
            ],
        ]])

        self.set_mode(self.mode)
 def act_modify_data(self, is_test=False):
     if is_test is True:
         _TPI(self, locals())
     else:
         self.y_train = self.y_train.reshape(-1, 1)
         self.y_valid = self.y_train.reshape(-1, 1)
         self.y_test = self.y_test.reshape(-1, 1)
Пример #5
0
    def act_modify_data(self, is_test=False):
        if is_test is True:
            _TPI(self, locals())
        else:
            MATCH = self.env.match_loader
            for i in range(len(self.y_train)):
                result = MATCH.train_plus[i] - MATCH.train_minus[i]

                if result < 0:
                    result = 0
                elif result == 0:
                    result = 1
                else:
                    result = 2
                self.y_train[i] = result

            for i in range(len(self.y_valid)):
                result = MATCH.valid_plus[i] - MATCH.valid_minus[i]
                if result < 0:
                    result = 0
                elif result == 0:
                    result = 1
                else:
                    result = 2
                self.y_valid[i] = result

            for i in range(len(self.y_valid)):
                result = MATCH.test_plus[i] - MATCH.test_minus[i]
                if result < 0:
                    result = 0
                elif result == 0:
                    result = 1
                else:
                    result = 2
                self.y_test[i] = result
 def act_move_to_statistics_window(is_test=False):
     if is_test is True:
         _TPI(CLASS_NAME, locals())
     else:
         MACRO.press_sleep('space', 1.4)
         MACRO.press_sleep('space', 1.4)
         MACRO.press_sleep('space', 2.8)
    def set_mode(self, mode):
        _TPI(self, locals())
        assert mode in available_modes, "NOT valid mode"

        def set_scenario(_self, actions: list = None):
            _self.scenario = copy.deepcopy(actions)

        def set_action_as_test(scenario) -> list:
            return_value = list()
            for item in scenario:
                unit = list()
                unit.append(item[0])
                actions = item[1:]
                for action in actions:
                    if type(action).__name__ is "function" or "method":
                        unit.append((action, dict(is_test=True)))
                    else:
                        action[1]['is_test'] = True
                        unit.append((action[0], action[1]))

                return_value.append(unit)
            return return_value

        if mode == "TACTICS":
            assert len(
                self._scenario_tactics) > 0, "Scenario of tactics is empty."
            set_scenario(self, self._scenario_tactics)
            self.is_testing_scenario = False
        elif mode == "MATCHES":
            assert len(
                self._scenario_matches) > 0, "Scenario of matches is empty."
            set_scenario(self, self._scenario_matches)
            self.is_testing_scenario = False
        elif mode == "TEST_TACTICS":
            assert len(
                self._scenario_tactics) > 0, "Scenario of tactics is empty."
            # test_actions: list = set_action_as_test(self._scenario_tactics)
            set_scenario(self, self._scenario_tactics)
            self.is_testing_scenario = True
        elif mode == "TEST_MATCHES":
            assert len(
                self._scenario_matches) > 0, "Scenario of matches is empty."
            # test_actions: list = set_action_as_test(self._scenario_matches)
            set_scenario(self, self._scenario_matches)
            self.is_testing_scenario = True
        elif mode == "TESTING":
            raise NotImplementedError("NOT Implemented")
        elif mode == "LEARN_FROM_FILE":
            assert len(self._scenario_learn_from_file
                       ) > 0, "Scenario of 'learn from file' is empty."
            set_scenario(self, self._scenario_learn_from_file)
            self.is_testing_scenario = False
        elif mode == "TEST_LEARN_FROM_FILE":
            assert len(self._scenario_learn_from_file
                       ) > 0, "Scenario of 'learn from file' is empty."
            set_scenario(self, self._scenario_learn_from_file)
            self.is_testing_scenario = True
        elif mode == "PREDICT_DATA":
            raise NotImplementedError("NOT Implemeneted")
Пример #8
0
 def act_load_game(self, is_test=False):
     if is_test is True:
         _TPI(self, locals())
     else:
         save = self.save_name + ".zip"
         if os.path.isfile(save):
             print("Load Network")
             self.ai.load_model(save)
 def act_get_lineup(is_test=False):
     if is_test is True:
         _TPI(CLASS_NAME, locals())
     else:
         # result: str = MACRO.mouse_ocr((973, 209), (1015, 627))
         lineup = MACRO.mouse_ocr((1371, 200), (1452, 624))
         # return result.replace('F', 'R')
         return lineup
 def __init__(self, mode=None, epochs=1, players=1):
     _TPI(self, locals())
     assert mode is not None, "Running mode is not specified"
     self.mode = mode
     self.env = Environment()
     self.scenario = list()
     self.epochs = epochs
     self.batches = players
     self.is_testing_scenario = False
Пример #11
0
 def act_register_data(self, data, is_test=False):
     if is_test is True:
         _TPI(self, locals())
     else:
         self.X_train = np.array(self.env.match_loader.train_players)
         self.y_train = np.array(self.env.match_loader.train_plus)
         self.X_valid = np.array(self.env.match_loader.valid_players)
         self.y_valid = np.array(self.env.match_loader.valid_plus)
         self.X_test = np.array(self.env.match_loader.test_players)
         self.y_test = np.array(self.env.match_loader.test_plus)
 def __init__(self,
              csv_field: str = None,
              csv_keeper: str = None,
              players: int = 1):
     _TPI(self, locals())
     self.Actions = Actions
     self.data_loader = DataLoaderFM(files=(ConstantFile.file_fd(),
                                            ConstantFile.file_gk()),
                                     players=players)
     self.match_loader = MatchLoader()
Пример #13
0
 def act_test(self, is_test=False):
     if is_test is True:
         _TPI(self, locals())
     else:
         predictions = self.ai.predict(self.X_test)
         y_true = self.y_test
         test_score = mean_squared_error(y_pred=predictions, y_true=y_true)
         #np.savetxt("predict.txt", predictions, delimiter=',', fmt='%d')
         #np.savetxt("true.txt", y_true, delimiter=',', fmt='%d')
         print(test_score)
Пример #14
0
 def act_init_ai(self, is_test=False):
     if is_test is True:
         _TPI(self, locals())
     else:
         MATCH = self.env.match_loader
         self.ai = TabNetRegressor(n_steps=10,
                                   input_dim=MATCH.count_cols *
                                   MATCH.count_players,
                                   cat_dims=self.cat_dims,
                                   cat_emb_dim=self.cat_emb_dim,
                                   cat_idxs=self.cat_idxs)
Пример #15
0
 def act_run_ai_with_learn(self, is_test=False):
     if is_test is True:
         _TPI(self, locals())
     else:
         self.ai.fit(X_train=self.X_train,
                     y_train=self.y_train,
                     X_valid=self.X_valid,
                     y_valid=self.y_valid,
                     max_epochs=self.epochs,
                     patience=500,
                     batch_size=512,
                     drop_last=False)
    def __init__(self, ai_name=None, mode=None, epochs=1, players=1):
        _TPI(self, locals())
        assert ai_name is not None, "You didn't select any ai model."
        assert mode is not None, "You didn't select any mode."

        def get_network_available(name):
            assert name in available_networks, "Available : {} / Inp : {}".format(
                str(available_networks.keys()), name)
            return available_networks[name]

        self.ai_network: AI_Base = get_network_available(ai_name)(
            mode=mode, epochs=epochs, players=players)
    def act_load_save_file(save_name=None, is_test=False):
        if is_test is True:
            _TPI(CLASS_NAME, locals())
        else:
            MACRO.cmd_key('ctrl', 'o')
            time.sleep(2)
            pyperclip.copy(save_name[:len(save_name) - 1])
            MACRO.cmd_key('ctrl', 'v')
            pyautogui.typewrite(")")
            MACRO.press_sleep('enter', 0.1)

            # 로딩 끝나기 인식
            MACRO.screen_check(33 + 279, 1920 - 1652, [29, 22, 15, 255])
Пример #18
0
    def __init__(self, files, players):
        _TPI(self, locals())
        self.length = ConstantData.length()

        if self.length == 1:
            assert (type(files) is str
                    ), "INVALID count of dataloader files, or wrong file path"
        assert self.length == len(
            files), "INVALID count of dataloader files, or wrong file path"

        self.data = list()
        print(files)
        for i in range(self.length):
            self.data.append(pd.read_csv(files[i]))
Пример #19
0
    def __init__(self, *args, **kwargs):
        _TPI(self, locals())
        super(RNNv0, self).__init__(*args, **kwargs)

        ACT = self.env.act
        DATA = self.env.data

        self._scenario_predict_data = list([[1, self.act_output_console]])

        self.rnn_plus = RNNmulti()
        self.rnn_minus = RNNmulti()

        self.plus = 0
        self.minus = 0
Пример #20
0
    def __init__(self, *args, **kwargs):
        _TPI(self, locals())
        super(TestModel, self).__init__(*args, **kwargs)

        ACT = self.env.act
        MATCH = self.env.match_loader

        self._scenario_tactics = list([
            [1, ACT.act_activate_window],
            [
                self.epochs,
                [1, ACT.act_move_to_statistics_window],
                [
                    self.batches, ACT.act_get_lineup,
                    self.act_get_players_data, self.act_run_ai_with_learn,
                    ACT.act_change_tactics, ACT.act_run_instant_match
                ],
                [1, ACT.act_wait_match_finished],
                [1, ACT.act_close_match_result_window],
                [1, ACT.act_get_matches_result],
                [1, self.act_ai_learn],
                [1, ACT.act_load_save_file],
                [1, ACT.act_wait_save_is_loaded],
            ],
        ])

        self._scenario_matches = list(
            [ACT.act_activate_window, ACT.act_move_to_statistics_window])

        self._scenario_learn_from_file = list([
            # [self.epochs,
            [
                4,
                [
                    len(MATCH),
                    # MATCH.act_get,
                    self.act_get_players_data,
                    self.act_run_ai_with_learn,
                    self.act_ai_learn,
                    MATCH.act_next
                ]
            ]
        ])

        self.set_mode(self.mode)
    def act_get(self, ind=None, is_flat=False, is_test=False):
        if is_test is True:
            _TPI(self, locals())
        else:
            if ind is None:
                filename = self.files[self.index]
            else:
                filename = self.files[ind]

            with open(self.filepath + filename, 'r') as f:
                data_list = json.load(f)
                result = list()
                tactics = list()
                plus = list()
                minus = list()

                for data in data_list:
                    one_data = list()
                    my_team = data['m']
                    for player in my_team:
                        player.insert(0, 0)
                        if is_flat:
                            one_data += player
                        else:
                            one_data.append(player)

                    your_team = data['o']
                    for player in your_team:
                        player.insert(0, 1)
                        if is_flat:
                            one_data += player
                        else:
                            one_data.append(player)

                    result.append(one_data)

                    tactics.append(data['t'])
                    plus.append(data['s'])
                    minus.append(data['l'])

                return result, tactics, plus, minus
 def act_next(self, is_test=False):
     if is_test is True:
         _TPI(self, locals())
     else:
         self.index += 1
Пример #23
0
 def act_modify_data(self, is_test=False):
     if is_test is True:
         _TPI(self, locals())
     else:
         pass
Пример #24
0
 def act_save_model(self, is_test=False):
     if is_test is True:
         _TPI(self, locals())
     else:
         print(self.save_name)
         self.ai.save_model(self.save_name)
 def run(self):
     _TPI(self, locals())
     self.ai_network.run(self.ai_network.scenario)
 def get_players_stat(self, team):
     _TPI(self, locals())
     result = None
     return result
 def act_register_data(self, data, is_test=False):
     if is_test is True:
         _TPI(self, locals())
     else:
         raise NotImplementedError("ERROR")
 def act_save_match_result(self, is_test=False):
     if is_test is True:
         _TPI(self, locals())
     else:
         raise NotImplementedError("ERROR")
 def data(self):
     _TPI(self, locals())
 def act_ai_learn(self, is_test=False):
     if is_test is True:
         _TPI(self, locals())
     else:
         raise NotImplementedError("ERROR")