def guess_action_cal_reward(state_path, output_path): state_file = open(state_path, "r") output = open(output_path, 'w') lines = state_file.readlines() state_logs = [] prev_state = None for line in lines: cur_state = StateUtil.parse_state_log(line) if cur_state.tick == StateUtil.TICK_PER_STATE: print("clear") prev_state = None elif prev_state is not None and prev_state.tick >= cur_state.tick: print ("clear") prev_state = None if prev_state is not None: state_logs.append(prev_state) prev_state = cur_state if prev_state is not None: state_logs.append(prev_state) # 猜测玩家行为 for idx in range(1, len(state_logs)-1): prev_state = state_logs[idx-1] cur_state = state_logs[idx] next_state = state_logs[idx+1] if cur_state.tick >= 55044: db = 1 hero = prev_state.get_hero("27") line_index = 1 near_enemy_heroes = StateUtil.get_nearby_enemy_heros(prev_state, hero.hero_name, StateUtil.LINE_MODEL_RADIUS) near_enemy_units = StateUtil.get_nearby_enemy_units(prev_state, hero.hero_name, StateUtil.LINE_MODEL_RADIUS) nearest_enemy_tower = StateUtil.get_nearest_enemy_tower(prev_state, hero.hero_name, StateUtil.LINE_MODEL_RADIUS) near_enemy_units_in_line = StateUtil.get_units_in_line(near_enemy_units, line_index) nearest_enemy_tower_in_line = StateUtil.get_units_in_line([nearest_enemy_tower], line_index) if len(near_enemy_heroes) != 0 or len(near_enemy_units_in_line) != 0 or len( nearest_enemy_tower_in_line) != 0: player_action = Replayer.guess_player_action(prev_state, cur_state, next_state, "27", "28") action_str = StateUtil.build_command(player_action) print('玩家行为分析:' + str(action_str) + ' tick:' + str(prev_state.tick) + ' prev_pos: ' + hero.pos.to_string() + ', cur_pos: ' + cur_state.get_hero(hero.hero_name).pos.to_string()) prev_state.add_action(player_action) # 测试计算奖励值 state_logs_with_reward = LineModel.update_rewards(state_logs) for state_with_reward in state_logs_with_reward: # 将结果记录到文件 state_encode = state_with_reward.encode() state_json = JSON.dumps(state_encode) output.write(strftime("%Y-%m-%d %H:%M:%S", gmtime()) + " -- " + state_json + "\n") output.flush() print(len(state_logs))
def upgrade_skills(self, state_info, hero_name): # 如果有可以升级的技能,优先升级技能3 hero = state_info.get_hero(hero_name) skills = StateUtil.get_skills_can_upgrade(hero) if len(skills) > 0: skillid = 3 if 3 in skills else skills[0] update_cmd = CmdAction(hero.hero_name, CmdActionEnum.UPDATE, skillid, None, None, None, None, None, None) update_str = StateUtil.build_command(update_cmd) return update_str
def guess_hero_actions(self, state_index, real_heros=None): prev_state = self.state_cache[state_index - 1] cur_state = self.state_cache[state_index] next_state = self.state_cache[state_index + 1] # 如果有必要的话,更新这一帧中真人玩家的行为信息 if real_heros is not None: for hero_name in real_heros: hero_action = Replayer.guess_player_action(prev_state, cur_state, next_state, hero_name, '28') cur_state.add_action(hero_action) action_str = StateUtil.build_command(hero_action) print('玩家行为分析:' + str(action_str) + ' tick:' + str(cur_state.tick))
def add_other_hero_action(input_data, hero_info, action_cmd, debug=False): # 如果是自己,则忽略 if hero_info.hero_name == action_cmd.hero_name: return input_data # 如果不是攻击类行为,忽略 if action_cmd.action != CmdActionEnum.ATTACK and action_cmd.action != CmdActionEnum.CAST: return input_data # 如果不是己方的动作,忽略 friends, opponents = TeamBattleUtil.get_friend_opponent_heros( TeamBattleInput.HERO_LIST, hero_info.hero_name) if action_cmd.hero_name != hero_info.hero_name and action_cmd.hero_name not in friends: return input_data # 更新输入数据 # 首先找到目标英雄ID,然后找到使用的技能ID hero_index = friends.index(action_cmd.hero_name) + 1 tgt_hero_index = TeamBattleInput.TEAM_A.index(action_cmd.tgtid) if action_cmd.tgtid in TeamBattleInput.TEAM_A \ else TeamBattleInput.TEAM_B.index(action_cmd.tgtid) change_index = hero_index * 89 + 15 + tgt_hero_index if action_cmd.action == CmdActionEnum.ATTACK \ else hero_index * 89 + 20 + (int(action_cmd.skillid) - 1) * 23 + 18 + tgt_hero_index prev_value = input_data[change_index] if prev_value != 0: if debug: print("add_other_hero_action", "must be something wrong", "prev_value not zero") input_data[change_index] = 1 debug_action_str = StateUtil.build_command(action_cmd) if debug: print("add_other_hero_action", "add_hero_info", hero_info.hero_name, "hero_index", hero_index, "tgt_hero_index", tgt_hero_index, "action_cmd.action", action_cmd.action, "action_cmd_skill", action_cmd.skillid, "change_index", change_index, "cmd", debug_action_str) return input_data
def build_response(self, state_cache, state_index, hero_name): action_strs=[] restart = False # 对于模型,分析当前帧的行为 if self.real_hero != hero_name: state_info = state_cache[state_index] prev_hero = state_cache[state_index-1].get_hero(hero_name) if len(state_cache) >= 2 is not None else None # 如果有真实玩家,我们需要一些历史数据,所以分析3帧前的行为 elif len(state_cache) > 3: state_info = state_cache[state_index-3] next1_state_info = state_cache[state_index-2] next2_state_info = state_cache[state_index-1] next3_state_info = state_cache[state_index] else: return action_strs, False # 决定是否购买道具 buy_action = EquipUtil.buy_equip(state_info, hero_name) if buy_action is not None: buy_str = StateUtil.build_command(buy_action) action_strs.append(buy_str) # 如果有可以升级的技能,优先升级技能3 hero = state_info.get_hero(hero_name) skills = StateUtil.get_skills_can_upgrade(hero) if len(skills) > 0: skillid = 3 if 3 in skills else skills[0] update_cmd = CmdAction(hero.hero_name, CmdActionEnum.UPDATE, skillid, None, None, None, None, None, None) update_str = StateUtil.build_command(update_cmd) action_strs.append(update_str) # 回城相关逻辑 # 如果在回城中且没有被打断则继续回城,什么也不用返回 if prev_hero is not None: if hero.hero_name in self.hero_strategy and self.hero_strategy[hero.hero_name] == ActionEnum.town_ing \ and prev_hero.hp <= hero.hp \ and not StateUtil.if_hero_at_basement(hero): if not hero.skills[6].canuse: print(self.battle_id, hero.hero_name, '回城中,继续回城') return action_strs, False else: print(self.battle_id, hero.hero_name, '回城失败') town_action = CmdAction(hero.hero_name, CmdActionEnum.CAST, 6, hero.hero_name, None, None, None, None, None) action_str = StateUtil.build_command(town_action) action_strs.append(action_str) return action_strs, False if hero.hp <= 0: self.hero_strategy[hero.hero_name] = None return action_strs, False # # 补血逻辑 # if prev_hero is not None and hero.hero_name in self.hero_strategy and self.hero_strategy[ # hero.hero_name] == ActionEnum.hp_restore: # if StateUtil.cal_distance2(prev_hero.pos, hero.pos) < 100: # print(self.battle_id, hero_name, '到达补血点', '血量增长', hero.hp - prev_hero.hp) # del self.hero_strategy[hero_name] # if hero == self.model1_hero: # self.model1_hp_restore = time.time() # else: # self.model2_hp_restore = time.time() # 撤退逻辑 # TODO 甚至可以使用移动技能移动 if prev_hero is not None and hero.hero_name in self.hero_strategy and self.hero_strategy[hero.hero_name] == ActionEnum.retreat_to_town: if StateUtil.cal_distance2(prev_hero.pos, hero.pos) < 100: print(self.battle_id, hero_name, '开始回城') self.hero_strategy[hero.hero_name] = ActionEnum.town_ing town_action = CmdAction(hero.hero_name, CmdActionEnum.CAST, 6, hero.hero_name, None, None, None, None, None) action_str = StateUtil.build_command(town_action) action_strs.append(action_str) else: print(self.battle_id, hero_name, '还在撤退中', StateUtil.cal_distance2(prev_hero.pos, hero.pos)) return action_strs, False # 如果击杀了对方英雄,扫清附近小兵之后则启动撤退回城逻辑 if prev_hero is not None: if hero.hero_name in self.hero_strategy and self.hero_strategy[hero.hero_name] == ActionEnum.town_ing and prev_hero.hp <= hero.hp \ and not StateUtil.if_hero_at_basement(hero): if not hero.skills[6].canuse: return action_strs, False else: town_action = CmdAction(hero.hero_name, CmdActionEnum.CAST, 6, hero.hero_name, None, None, None, None, None) action_str = StateUtil.build_command(town_action) action_strs.append(action_str) if hero.hp <= 0: self.hero_strategy[hero.hero_name] = None return action_strs, False # 检查周围状况 near_enemy_heroes = StateUtil.get_nearby_enemy_heros(state_info, hero.hero_name, StateUtil.LINE_MODEL_RADIUS) near_enemy_units = StateUtil.get_nearby_enemy_units(state_info, hero.hero_name, StateUtil.LINE_MODEL_RADIUS) nearest_enemy_tower = StateUtil.get_nearest_enemy_tower(state_info, hero.hero_name, StateUtil.LINE_MODEL_RADIUS + 3) nearest_friend_units = StateUtil.get_nearby_friend_units(state_info, hero.hero_name, StateUtil.LINE_MODEL_RADIUS) line_index = 1 near_enemy_units_in_line = StateUtil.get_units_in_line(near_enemy_units, line_index) nearest_enemy_tower_in_line = StateUtil.get_units_in_line([nearest_enemy_tower], line_index) # 如果击杀对面英雄就回城补血。整体逻辑为,周围没有兵的情况下启动撤退逻辑,到达撤退地点之后启动回城。补满血之后再跟兵出来 # 处在泉水之中的时候设置策略层为吃线 if len(near_enemy_units_in_line) == 0 and len(near_enemy_heroes) == 0: if (hero_name == self.model1_hero and self.model2_just_dead == 1 and not StateUtil.if_hero_at_basement(hero)) \ or (hero_name == self.model2_hero and self.model1_just_dead == 1 and not StateUtil.if_hero_at_basement(hero)): if hero.hp / float(hero.maxhp) > 0.8: if hero_name == self.model1_hero: self.model2_just_dead = 0 else: self.model1_just_dead = 0 else: print(self.battle_id, hero_name, '选择撤退') self.hero_strategy[hero_name] = ActionEnum.retreat_to_town retreat_pos = StateUtil.get_retreat_pos(state_info, hero, line_index=1) action = CmdAction(hero_name, CmdActionEnum.MOVE, None, None, retreat_pos, None, None, -1, None) action_str = StateUtil.build_command(action) action_strs.append(action_str) if hero_name == self.model1_hero: self.model2_just_dead = 0 else: self.model1_just_dead = 0 return action_strs, False if StateUtil.if_hero_at_basement(hero): if hero_name == self.model1_hero: self.model2_just_dead = 0 else: self.model1_just_dead = 0 if hero.hp < hero.maxhp: if hero_name in self.hero_strategy: del self.hero_strategy[hero_name] return action_strs, False # # 残血并且周围没有敌人的情况下,可以去塔后吃加血 # if hero.hp / float(hero.maxhp) < 0.9 and hero not in self.hero_strategy: # print('补血条件', self.battle_id, hero_name, time.time(), self.model1_hp_restore, self.model2_hp_restore) # if hero == self.model1_hero and time.time() - self.model1_hp_restore > LineTrainerPPO.HP_RESTORE_GAP: # print(self.battle_id, hero_name, '选择加血') # self.hero_strategy[hero_name] = ActionEnum.hp_restore # elif hero == self.model2_hero and time.time() - self.model2_hp_restore > LineTrainerPPO.HP_RESTORE_GAP: # print(self.battle_id, hero_name, '选择加血') # self.hero_strategy[hero_name] = ActionEnum.hp_restore # # if self.hero_strategy[hero_name] == ActionEnum.hp_restore: # restore_pos = StateUtil.get_hp_restore_place(state_info, hero) # action = CmdAction(hero_name, CmdActionEnum.MOVE, None, None, restore_pos, None, None, -1, None) # action_str = StateUtil.build_command(action) # action_strs.append(action_str) # return action_strs, False # 开始根据策略决定当前的行动 # 对线情况下,首先拿到兵线,朝最前方的兵线移动 # 如果周围有危险(敌方单位)则启动对线模型 # 如果周围有小兵或者塔,需要他们都是在指定线上的小兵或者塔 if (len(near_enemy_units_in_line) == 0 and len(nearest_enemy_tower_in_line) == 0 and ( len(near_enemy_heroes) == 0 or StateUtil.if_in_line(hero, line_index, 4000) == -1) ) or\ (len(nearest_friend_units) == 0 and len(near_enemy_units_in_line) == 0 and len(near_enemy_heroes) == 0 and len(nearest_enemy_tower_in_line) == 1): # 跟兵线或者跟塔,优先跟塔 self.hero_strategy[hero.hero_name] = ActionEnum.line_1 # print("策略层:因为附近没有指定兵线的敌人所以开始吃线 " + hero.hero_name) front_soldier = StateUtil.get_frontest_soldier_in_line(state_info, line_index, hero.team) first_tower = StateUtil.get_first_tower(state_info, hero) if front_soldier is None or (hero.team == 0 and first_tower.pos.x > front_soldier.pos.x) or (hero.team == 1 and first_tower.pos.x < front_soldier.pos.x): # 跟塔,如果塔在前面的话 follow_tower_pos = StateUtil.get_tower_behind(first_tower, hero, line_index=1) move_action = CmdAction(hero.hero_name, CmdActionEnum.MOVE, None, None, follow_tower_pos, None, None, None, None) action_str = StateUtil.build_command(move_action) action_strs.append(action_str) else: # 得到最前方的兵线位置 move_action = CmdAction(hero.hero_name, CmdActionEnum.MOVE, None, None, front_soldier.pos, None, None, None, None) action_str = StateUtil.build_command(move_action) action_strs.append(action_str) else: if self.real_hero != hero_name: # 使用模型进行决策 # print("使用对线模型决定英雄%s的行动" % hero.hero_name) self.hero_strategy[hero.hero_name] = ActionEnum.line_model # 目前对线只涉及到两名英雄 rival_hero = '28' if hero.hero_name == '27' else '27' action, explorer_ratio, action_ratios = self.get_action(state_info, hero_name, rival_hero) # 考虑使用固定策略 # 如果决定使用策略,会连续n条行为全都采用策略(比如确保对方残血时候连续攻击的情况) # 如果策略返回为空则表示策略中断 if self.policy_ratio > 0 and ( 0 < self.cur_policy_act_idx_map[hero_name] < self.policy_continue_acts or random.uniform(0, 1) <= self.policy_ratio ): policy_action = LineTrainerPolicy.choose_action(state_info, action_ratios, hero_name, rival_hero, near_enemy_units, nearest_friend_units) if policy_action is not None: policy_action.vpred = action.vpred action = policy_action self.cur_policy_act_idx_map[hero_name] += 1 print("英雄 " + hero_name + " 使用策略,策略行为计数 idx " + str(self.cur_policy_act_idx_map[hero_name])) if self.cur_policy_act_idx_map[hero_name] >= self.policy_continue_acts: self.cur_policy_act_idx_map[hero_name] = 0 else: # 策略中断,清零 if self.cur_policy_act_idx_map[hero_name] > 0: print("英雄 " + hero_name + " 策略中断,清零") self.cur_policy_act_idx_map[hero_name] = 0 action_str = StateUtil.build_command(action) action_strs.append(action_str) # 如果是要求英雄施法回城,更新英雄状态,这里涉及到后续多帧是否等待回城结束 if action.action == CmdActionEnum.CAST and int(action.skillid) == 6: print("英雄%s释放了回城" % hero_name) self.hero_strategy[hero.hero_name] = ActionEnum.town_ing # 如果是选择了撤退,进行特殊标记,会影响到后续的行为 if action.action == CmdActionEnum.RETREAT: print("英雄%s释放了撤退,撤退点为%s" % (hero_name, action.tgtpos.to_string())) self.hero_strategy[hero.hero_name] = ActionEnum.retreat self.retreat_pos = action.tgtpos # 如果批量训练结束了,这时候需要清空未使用的训练集,然后重启游戏 if action.action == CmdActionEnum.RESTART: restart = True else: # 保存action信息到状态帧中 state_info.add_action(action) else: # 还是需要模型来计算出一个vpred rival_hero = '28' if hero.hero_name == '27' else '27' action, explorer_ratio, action_ratios = self.get_action(state_info, hero_name, rival_hero) # 推测玩家的行为 guess_action = Replayer.guess_player_action(state_info, next1_state_info, next2_state_info, next3_state_info, hero_name, rival_hero) guess_action.vpred = action.vpred action_str = StateUtil.build_command(guess_action) action_str['tick'] = state_info.tick print('猜测玩家行为为:' + JSON.dumps(action_str)) # 保存action信息到状态帧中 state_info.add_action(guess_action) return action_strs, restart
def buy_equip(self, state_info, hero_name): # 决定是否购买道具 buy_action = EquipUtil.buy_equip(state_info, hero_name) if buy_action is not None: buy_str = StateUtil.build_command(buy_action) return buy_str
def build_response(self, raw_state_str): self.save_raw_log(raw_state_str) prev_state_info = self.state_cache[-1] if len( self.state_cache) > 0 else None response_strs = [] # 解析客户端发送的请求 obj = JSON.loads(raw_state_str) raw_state_info = StateInfo.decode(obj) # 重开时候会有以下报文 {"wldstatic":{"ID":9051},"wldruntime":{"State":0}} if raw_state_info.tick == -1: return {"ID": raw_state_info.battleid, "tick": -1} if raw_state_info.tick <= StateUtil.TICK_PER_STATE and ( prev_state_info is None or prev_state_info.tick > raw_state_info.tick): print("clear") prev_state_info = None self.state_cache = [] self.battle_started = -1 self.battle_heroes_cache = [] self.dead_heroes = [] self.dead_heroes_cache = [] self.data_inputs = [] self.rebooting = False elif prev_state_info is None and raw_state_info.tick > StateUtil.TICK_PER_STATE: # 不是开始帧的话直接返回重启游戏 # 还有偶然情况下首帧没有tick(即-1)的情况,这种情况下只能重启本场战斗 print("battle_id", self.battle_id, "tick", raw_state_info.tick, '不是开始帧的话直接返回重启游戏', raw_state_info.tick) action_strs = [ StateUtil.build_action_command('27', 'RESTART', None) ] rsp_obj = { "ID": raw_state_info.battleid, "tick": raw_state_info.tick, "cmd": action_strs } rsp_str = JSON.dumps(rsp_obj) return rsp_str state_info = StateUtil.update_state_log(prev_state_info, raw_state_info) hero = state_info.get_hero("27") if hero is None or hero.hp is None: # 偶然情况处理,如果找不到英雄,直接重开 print("battle_id", self.battle_id, "tick", state_info.tick, '不是开始帧的话直接返回重启游戏', raw_state_info.tick) action_strs = [ StateUtil.build_action_command('27', 'RESTART', None) ] rsp_obj = { "ID": raw_state_info.battleid, "tick": raw_state_info.tick, "cmd": action_strs } rsp_str = JSON.dumps(rsp_obj) return rsp_str # 战斗前准备工作 if len(self.state_cache) == 0: # 第一帧的时候,添加金钱和等级 for hero in self.heros: add_gold_cmd = CmdAction(hero, CmdActionEnum.ADDGOLD, None, None, None, None, None, None, None) add_gold_cmd.gold = 3000 add_gold_str = StateUtil.build_command(add_gold_cmd) response_strs.append(add_gold_str) add_lv_cmd = CmdAction(hero, CmdActionEnum.ADDLV, None, None, None, None, None, None, None) add_lv_cmd.lv = 9 add_lv_str = StateUtil.build_command(add_lv_cmd) response_strs.append(add_lv_str) elif len(self.state_cache) > 1: # 第二帧时候开始,升级技能,购买装备,这个操作可能会持续好几帧 for hero in self.heros: upgrade_cmd = self.upgrade_skills(state_info, hero) if upgrade_cmd is not None: response_strs.append(upgrade_cmd) buy_cmd = self.buy_equip(state_info, hero) if buy_cmd is not None: response_strs.append(buy_cmd) for hero in self.heros: # 判断是否英雄死亡 if prev_state_info is not None: dead = StateUtil.if_hero_dead(prev_state_info, state_info, hero) if dead == 1 and hero not in self.dead_heroes: print("battle_id", self.battle_id, "tick", state_info.tick, "英雄死亡", hero, "tick", state_info.tick) self.dead_heroes.append(hero) # 首先要求所有英雄站到团战圈内,然后开始模型计算,这时候所有的行动都有模型来决定 # 需要过滤掉无效的行动,同时屏蔽会离开战斗圈的移动 #TODO 开始团战后,如果有偶尔的技能移动会离开圈,则拉回来 # 这里会排除掉死亡的英雄,他们不需要再加入团战 # 团战范围在收缩 battle_range = self.cal_battle_range( len(self.state_cache) - self.battle_started) heroes_in_range, heroes_out_range = TeamBattleTrainer.all_in_battle_range( state_info, self.heros, self.dead_heroes, battle_range) # 存活英雄 battle_heros = list(heroes_in_range) battle_heros.extend(heroes_out_range) # 缓存参战情况和死亡情况,用于后续训练 self.battle_heroes_cache.append(battle_heros) self.dead_heroes_cache.append(list(self.dead_heroes)) if state_info.tick >= 142560: debuginfo = True # 团战还没有开始,有英雄还在圈外 if len(heroes_out_range) > 0: if self.battle_started > -1: print('battle_id', self.battle_id, "战斗已经开始,但是为什么还有英雄在团战圈外", ','.join(heroes_out_range), "battle_range", battle_range) # 移动到两个开始战斗地点附近 # 如果是团战开始之后,移动到团战中心点 for hero in heroes_out_range: start_point_x = randint(0, 8000) start_point_z = TeamBattleTrainer.BATTLE_CIRCLE_RADIUS_BATTLE_START * 1000 if self.battle_started == -1 else 0 start_point_z += randint(-4000, 4000) if TeamBattleUtil.get_hero_team(hero) == 0: start_point_z *= -1 start_point_z += TeamBattleTrainer.BATTLE_POINT_Z tgt_pos = PosStateInfo(start_point_x, 0, start_point_z) move_action = CmdAction(hero, CmdActionEnum.MOVE, None, None, tgt_pos, None, None, None, None) mov_cmd_str = StateUtil.build_command(move_action) response_strs.append(mov_cmd_str) # 团战已经开始 elif not self.rebooting: if self.battle_started == -1: self.battle_started = len(self.state_cache) # 对特殊情况。比如德古拉使用大招hp会变1,修改帧状态 state_info, _ = TeamBattlePolicy.modify_status_4_draculas_invincible( state_info, self.state_cache) # action_cmds, input_list, model_upgrade = self.get_model_actions(state_info, heroes_in_range) # 跟队伍,每个队伍得到行为 team_a, team_b = TeamBattleUtil.get_teams(heroes_in_range) team_actions_a, input_list_a, model_upgrade_a = self.get_model_actions_team( state_info, team_a, heroes_in_range) team_actions_b, input_list_b, model_upgrade_b = self.get_model_actions_team( state_info, team_b, heroes_in_range) # 如果模型已经开战,重启战斗 if (model_upgrade_a or model_upgrade_b ) and self.battle_started < len(self.state_cache) + 1: print("battle_id", self.battle_id, "因为模型升级,重启战斗", self.battle_started, len(self.state_cache)) action_strs = [ StateUtil.build_action_command('27', 'RESTART', None) ] rsp_obj = { "ID": raw_state_info.battleid, "tick": raw_state_info.tick, "cmd": action_strs } rsp_str = JSON.dumps(rsp_obj) return rsp_str data_input_map = {} for action_cmd, data_input in zip(team_actions_a + team_actions_b, input_list_a + input_list_b): action_str = StateUtil.build_command(action_cmd) response_strs.append(action_str) state_info.add_action(action_cmd) data_input_map[action_cmd.hero_name] = data_input # 缓存所有的模型输入,用于后续训练 self.data_inputs.append(data_input_map) # 添加记录到缓存中 self.state_cache.append(state_info) # 将模型行为加入训练缓存,同时计算奖励值 # 注意:因为奖励值需要看后续状态,所以这个计算会有延迟 last_x_index = 2 if self.battle_started > -1 and len(self.data_inputs) >= last_x_index: if self.rebooting: # 测试发现重启指令发出之后,可能下一帧还没开始重启战斗,这种情况下抛弃训练 print("battle_id", self.battle_id, "tick", state_info.tick, "warn", "要求重启战斗,但是还在收到后续帧状态, 继续重启") # 重启游戏 response_strs = [ StateUtil.build_action_command('27', 'RESTART', None) ] else: state_index = len(self.state_cache) - last_x_index win, win_team, left_heroes = self.remember_replay_heroes( -last_x_index, state_index, battle_range) # 团战结束条件 # 首先战至最后一人 # all_in_team = TeamBattleUtil.all_in_one_team(heroes_in_range) # if self.battle_started: # if len(self.dead_heroes) >= 9 or (len(self.dead_heroes) >= 5 and all_in_team > -1): if win == 1: # 重启游戏 print('battle_id', self.battle_id, "重启游戏", "剩余人员", ','.join(left_heroes)) response_strs = [ StateUtil.build_action_command('27', 'RESTART', None) ] self.rebooting = True # battle_heros = self.search_team_battle(state_info) # if len(battle_heros) > 0: # print("team battle heros", ';'.join(battle_heros)) # # heros_need_model = [] # for hero in self.heros: # # 判断是否英雄死亡 # if prev_state_info is not None: # dead = StateUtil.if_hero_dead(prev_state_info, state_info, hero) # if dead == 1 and hero not in self.dead_heroes: # self.dead_heroes.append(hero) # # # 复活的英雄不要再去参团 # if hero in self.dead_heroes: # continue # # # near_enemy_heroes = StateUtil.get_nearby_enemy_heros(state_info, hero, TeamBattleTrainer.MODEL_RANGE) # if hero not in battle_heros: # # 移动到团战点附近,添加部分随机 # rdm_delta_x = randint(0, 1000) # rdm_delta_z = randint(0, 1000) # tgt_pos = PosStateInfo(TeamBattleTrainer.BATTLE_POINT_X + rdm_delta_x, 0, TeamBattleTrainer.BATTLE_POINT_Z + rdm_delta_z) # move_action = CmdAction(hero, CmdActionEnum.MOVE, None, None, tgt_pos, None, None, None, None) # mov_cmd_str = StateUtil.build_command(move_action) # response_strs.append(mov_cmd_str) # else: # # 启动模型决策 # heros_need_model.append(hero) # # if len(heros_need_model) > 0: # action_cmds = self.get_model_actions(state_info, heros_need_model) # for action_cmd in action_cmds: # action_str = StateUtil.build_command(action_cmd) # response_strs.append(action_str) # state_info.add_action(action_cmd) #TODO 记录模型输出,用于后续训练 # 返回结果给游戏端 rsp_obj = { "ID": state_info.battleid, "tick": state_info.tick, "cmd": response_strs } rsp_str = JSON.dumps(rsp_obj) print('battle_id', self.battle_id, 'response', rsp_str) return rsp_str
def get_model_actions(self, state_info, heros, debug=False): # 第一个人先选,然后第二个人,一直往后,后面的人会在参数中添加上之前人的行为 # TODO 同时可以变成按照模型给出maxq大小来决定谁先选 # 这样的好处是所有人选择的行为就是最后执行的行为 # 暂时为随机英雄先选 random_heros = list(heros) shuffle(random_heros) # 得到当前团战范围,因为会收缩 battle_range = self.cal_battle_range( len(self.state_cache) - self.battle_started) action_cmds = [] input_list = [] model_upgrade = False for hero in random_heros: hero_info = state_info.get_hero(hero) data_input = TeamBattleInput.gen_input(state_info, hero) data_input = np.array(data_input) # 对于之前的英雄行为,加入输入 for prev_action in action_cmds: data_input = TeamBattleInput.add_other_hero_action( data_input, hero_info, prev_action, debug) action_list, explor_value, vpreds, clear_cache = self.model_util.get_action_list( self.battle_id, hero, data_input) action_str = ' '.join( str("%.4f" % float(act)) for act in action_list) if debug: print("battle_id", self.battle_id, "tick", state_info.tick, "hero", hero, "model action list", action_str) unaval_list = TeamBattleTrainer.list_unaval_actions( action_list, state_info, hero, heros, battle_range) unaval_list_str = ' '.join( str("%.4f" % float(act)) for act in unaval_list) if debug: print("battle_id", self.battle_id, "tick", state_info.tick, "hero", hero, "model remove_unaval_actions", unaval_list_str) friends, opponents = TeamBattleUtil.get_friend_opponent_heros( heros, hero) action_cmd, max_q, selected = TeamBattleTrainer.get_action_cmd( action_list, unaval_list, state_info, hero, friends, opponents) if debug: print("battle_id", self.battle_id, "tick", state_info.tick, "hero", hero, "model get_action", StateUtil.build_command(action_cmd), "max_q", max_q, "selected", selected) # 如果模型升级了,需要清空所有缓存用作训练的行为,并且重启游戏 if clear_cache: print('battle_id', self.battle_id, '模型升级,清空训练缓存') for hero_name in self.heros: self.model_caches[hero_name].clear_cache() model_upgrade = True action_cmds.append(action_cmd) input_list.append(data_input) return action_cmds, input_list, model_upgrade
def get_model_actions_team(self, state_info, team, battle_heroes, debug=False): # 第一个人先选,然后第二个人,一直往后,后面的人会在参数中添加上之前人的行为 # 同时可以变成按照模型给出maxq大小来决定谁先选 # 这样的好处是所有人选择的行为就是最后执行的行为 # 暂时为随机英雄先选 # first_hero = heroes[0] # 得到当前团战范围,因为会收缩 battle_range = self.cal_battle_range( len(self.state_cache) - self.battle_started) # 首先得到当前情况下每个英雄的基础输入集和所有无效的选择 hero_input_map = {} hero_unavail_list_map = {} for hero in team: data_input = TeamBattleInput.gen_input(state_info, hero, battle_heroes) data_input = np.array(data_input) hero_input_map[hero] = data_input unaval_list = TeamBattleTrainer.list_unaval_actions( self.act_size, state_info, hero, battle_heroes, battle_range) unaval_list_str = ' '.join( str("%.4f" % float(act)) for act in unaval_list) hero_unavail_list_map[hero] = unaval_list if debug: print("battle_id", self.battle_id, "tick", state_info.tick, "hero", hero, "model remove_unaval_actions", unaval_list_str) # 得到每个英雄的推荐行为 hero_recommend_list_map = {} for hero in team: friends, opponents = TeamBattleUtil.get_friend_opponent_heros( battle_heroes, hero) hero_info = state_info.get_hero(hero) recommend_list = TeamBattlePolicy.select_action_by_strategy( state_info, hero_info, friends, opponents) hero_recommend_list_map[hero] = recommend_list # 开始挑选英雄行为,每次根据剩余英雄的最优选择,根据Q大小来排序 action_cmds = [] input_list = [] left_heroes = list(team) model_upgrade = False while len(left_heroes) > 0: cur_max_q = -1 chosen_hero = left_heroes[0] chosen_action_list = None for hero in left_heroes: # 对于之前的英雄行为,加入输入 hero_info = state_info.get_hero(hero) data_input = hero_input_map[hero] for prev_action in action_cmds: data_input = TeamBattleInput.add_other_hero_action( data_input, hero_info, prev_action, debug) unaval_list = hero_unavail_list_map[hero] recommend_list = hero_recommend_list_map[hero] action_list, explor_value, vpreds, clear_cache = self.model_util.get_action_list( self.battle_id, hero, data_input) action_str = ' '.join( str("%.4f" % float(act)) for act in action_list) max_q = TeamBattleTrainer.get_max_q(action_list, unaval_list, recommend_list) if debug: print("battle_id", self.battle_id, "tick", state_info.tick, "本轮行为候选", "hero", hero, "max_q", max_q, "model action list", action_str) # 允许等于是为了支持max_q等于-1的情况 if max_q >= cur_max_q: cur_max_q = max_q chosen_hero = hero chosen_action_list = action_list # 如果模型升级了,需要清空所有缓存用作训练的行为,并且重启游戏 if clear_cache: print('battle_id', self.battle_id, '模型升级,清空训练缓存') for hero_name in self.heros: self.model_caches[hero_name].clear_cache() model_upgrade = True # 使用最大q的英雄的行为 unaval_list = hero_unavail_list_map[chosen_hero] recommend_list = hero_recommend_list_map[hero] friends, opponents = TeamBattleUtil.get_friend_opponent_heros( battle_heroes, chosen_hero) action_cmd, max_q, selected = TeamBattleTrainer.get_action_cmd( chosen_action_list, unaval_list, recommend_list, state_info, chosen_hero, friends, opponents) if debug: print("battle_id", self.battle_id, "tick", state_info.tick, "hero", chosen_hero, "model get_action", StateUtil.build_command(action_cmd), "max_q", max_q, "selected", selected) # 更新各个状态集 action_cmds.append(action_cmd) input_list.append(data_input) left_heroes.remove(chosen_hero) return action_cmds, input_list, model_upgrade
def build_response(self, state_info, prev_state_info, line_model, hero_names=None): battle_id = state_info.battleid tick = state_info.tick if tick >= 139062: db = 1 action_strs=[] if hero_names is None: hero_names = [hero.hero_name for hero in state_info.heros] for hero_name in hero_names: hero = state_info.get_hero(hero_name) prev_hero = prev_state_info.get_hero(hero.hero_name) if prev_state_info is not None else None # 检查是否重启游戏 # 线上第一个塔被摧毁 # 如果有可以升级的技能,优先升级技能3 skills = StateUtil.get_skills_can_upgrade(hero) if len(skills) > 0: skillid = 3 if 3 in skills else skills[0] update_cmd = CmdAction(hero.hero_name, CmdActionEnum.UPDATE, skillid, None, None, None, None, None, None) update_str = StateUtil.build_command(update_cmd) action_strs.append(update_str) # 检查周围状况 near_enemy_heroes = StateUtil.get_nearby_enemy_heros(state_info, hero.hero_name, StateUtil.LINE_MODEL_RADIUS) near_enemy_units = StateUtil.get_nearby_enemy_units(state_info, hero.hero_name, StateUtil.LINE_MODEL_RADIUS) nearest_enemy_tower = StateUtil.get_nearest_enemy_tower(state_info, hero.hero_name, StateUtil.LINE_MODEL_RADIUS + 3) # 回城相关逻辑 # 如果在回城中且没有被打断则继续回城,什么也不用返回 if prev_hero is not None: if self.hero_strategy[hero.hero_name] == ActionEnum.town_ing and prev_hero.hp <= hero.hp \ and not StateUtil.if_hero_at_basement(hero): if not hero.skills[6].canuse: print('回城中,继续回城') continue else: print('回城失败') if hero.hp <= 0: self.hero_strategy[hero.hero_name] = None continue # 处在少血状态是,且周围没有地方单位的情况下选择回城 # if len(near_enemy_heroes) == 0 and len(near_enemy_units) == 0 and nearest_enemy_tower is None: # if hero.hp/float(hero.maxhp) < LineTrainer.TOWN_HP_THRESHOLD: # print('策略层:回城') # # 检查英雄当前状态,如果在回城但是上一帧中受到了伤害,则将状态设置为正在回城,开始回城 # if self.hero_strategy[hero.hero_name] == ActionEnum.town_ing: # if prev_hero.hp > hero.hp: # town_action = CmdAction(hero.hero_name, CmdActionEnum.CAST, 6, hero.hero_name, None, None, None, None, None) # action_str = StateUtil.build_command(town_action) # action_strs.append(action_str) # # 检查英雄当前状态,如果不在回城,则将状态设置为正在回城,开始回城 # elif self.hero_strategy[hero.hero_name] != ActionEnum.town_ing: # self.hero_strategy[hero.hero_name] = ActionEnum.town_ing # town_action = CmdAction(hero.hero_name, CmdActionEnum.CAST, 6, hero.hero_name, None, None, None, None, None) # action_str = StateUtil.build_command(town_action) # action_strs.append(action_str) # # # 无论上面怎么操作,玩家下面的动作应该都是在回城中,所以跳过其它的操作 # continue # 处在泉水之中的时候设置策略层为吃线 if StateUtil.if_hero_at_basement(hero): if hero.hp < hero.maxhp: continue # 撤退逻辑 # TODO 甚至可以使用移动技能移动 if hero.hero_name in self.hero_strategy and self.hero_strategy[hero.hero_name] == ActionEnum.retreat: dist = StateUtil.cal_distance(hero.pos, self.retreat_pos) if dist <= 2: print('到达撤退点附近') self.hero_strategy[hero.hero_name] = None elif prev_hero is not None and prev_hero.pos.to_string() == hero.pos.to_string(): print('英雄卡住了,取消撤退') self.hero_strategy[hero.hero_name] = None else: print('仍然在撤退 ' + str(dist)) continue # 开始根据策略决定当前的行动 # 对线情况下,首先拿到兵线,朝最前方的兵线移动 # 如果周围有危险(敌方单位)则启动对线模型 # 如果周围有小兵或者塔,需要他们都是在指定线上的小兵或者塔 line_index = 1 near_enemy_units_in_line = StateUtil.get_units_in_line(near_enemy_units, line_index) nearest_enemy_tower_in_line = StateUtil.get_units_in_line([nearest_enemy_tower], line_index) if len(near_enemy_units_in_line) == 0 and len(nearest_enemy_tower_in_line) == 0 and (len(near_enemy_heroes) == 0 or StateUtil.if_in_line(hero, line_index, 4000) == -1): self.hero_strategy[hero.hero_name] = ActionEnum.line_1 # print("策略层:因为附近没有指定兵线的敌人所以开始吃线 " + hero.hero_name) # 跟兵线 front_soldier = StateUtil.get_frontest_soldier_in_line(state_info, line_index, hero.team) if front_soldier is None: action_str = StateUtil.build_action_command(hero.hero_name, 'HOLD', {}) action_strs.append(action_str) else: # 得到最前方的兵线位置 move_action = CmdAction(hero.hero_name, CmdActionEnum.MOVE, None, None, front_soldier.pos, None, None, None, None) action_str = StateUtil.build_command(move_action) action_strs.append(action_str) else: # 使用模型进行决策 # print("使用对线模型决定英雄%s的行动" % hero.hero_name) self.hero_strategy[hero.hero_name] = ActionEnum.line_model enemies = [] enemies.extend((hero.hero_name for hero in near_enemy_heroes)) enemies.extend((unit.unit_name for unit in near_enemy_units)) if nearest_enemy_tower is not None: enemies.append(nearest_enemy_tower.unit_name) # print('对线模型决策,因为周围有敌人 ' + ' ,'.join(enemies)) # 目前对线只涉及到两名英雄 rival_hero = '28' if hero.hero_name == '27' else '27' action = line_model.get_action(prev_state_info, state_info, hero.hero_name, rival_hero) action_str = StateUtil.build_command(action) action_strs.append(action_str) # 如果是要求英雄施法回城,更新英雄状态,这里涉及到后续多帧是否等待回城结束 if action.action == CmdActionEnum.CAST and int(action.skillid) == 6: print("英雄%s释放了回城" % hero_name) self.hero_strategy[hero.hero_name] = ActionEnum.town_ing # 如果是选择了撤退,进行特殊标记,会影响到后续的行为 if action.action == CmdActionEnum.RETREAT: print("英雄%s释放了撤退,撤退点为%s" % (hero_name, action.tgtpos.to_string())) self.hero_strategy[hero.hero_name] = ActionEnum.retreat self.retreat_pos = action.tgtpos # 保存action信息到状态帧中 state_info.add_action(action) return action_strs