Example #1
0
    def __init__(self, host, port):
        self.socket = GameSocket(host, port)
        self.state = State()

        self.is_moving_right = True  # default: go to right side
        self.steps = 0
        self.pre_action = 0
Example #2
0
    def __init__(self,
                 id,
                 estWood=-1,
                 pEnergyToStep=-1,
                 pStepToGold=-1,
                 strategy=-1):
        self.state = State()
        self.info = PlayerInfo(id)

        if (estWood == -1):  #random strenght
            estWood = (5 + randrange(16))
            pEnergyToStep = (2 + randrange(9)) * 5
            pStepToGold = (1 + randrange(6)) * 50

        self.estWood = estWood
        self.pEnergyToStep = pEnergyToStep
        self.pStepToGold = pStepToGold
        #print ("AddG_BOT",estWood,pEnergyToStep,pStepToGold)
        self.tx = -1
        self.ty = -1

        if (strategy == -1):
            strategy = np.random.choice(range(6)) - 1

        self.selectTargetOption = strategy
Example #3
0
    def __init__(self, host, port):
        self.socket = GameSocket(host, port)
        self.state = State()

        self.score_pre = (
            self.state.score
        )  # Storing the last score for designing the reward function
Example #4
0
    def __init__(self, id):
        self.state = State()
        self.info = PlayerInfo(id)

        self.isMovingInc = False

        self.isHorizontalMoving = False
 def __init__(self, host, port):
     self.socket = GameSocket(host, port)
     self.state = State()
     self.pre_x = 0
     self.pre_y = 0
     self.pre_energy = 0
     #self.pre_action = ''   
     self.score_pre = self.state.score#Storing the last score for designing the reward function
    def __init__(self, host, port):
        self.socket = GameSocket(host, port)
        self.state = State()

        self.score_pre = self.state.score  # Storing the last score for designing the reward function
        self.decay = 27
        self.area_affect = 3
        self.affect_eff = 0.92
        self.view = None
        self.energy_view = None
        self.current_action = None
Example #7
0
    def __init__(self, id, estWood=-1, pEnergyToStep=-1, pStepToGold=-1):
        self.state = State()
        self.info = PlayerInfo(id)

        if (estWood == -1):  #random strenght
            estWood = (5 + randrange(16))
            pEnergyToStep = (2 + randrange(9)) * 5
            pStepToGold = (1 + randrange(6)) * 50

        self.estWood = estWood
        self.pEnergyToStep = pEnergyToStep
        self.pStepToGold = pStepToGold
Example #8
0
    def __init__(self, host, port):
        self.socket = GameSocket(host, port)
        self.state = State()

        self.is_moving_right = True  # default: go to right side
        self.steps = 0
        self.pre_action = 0
        self.pre_x = -1
        self.pre_y = -1

        self.search_left_right = True
        self.largest_gold_x = -1
        self.largest_gold_y = -1
        self.left_or_right = 2
Example #9
0
    def __init__(self, host, port):
        self.socket = GameSocket(host, port)
        self.state = State()

        # define action space
        self.INPUTNUM = 198  # The number of input values for the DQN model
        self.ACTIONNUM = 6  # The number of actions output from the DQN model
        # define state space

        self.gameState = None
        self.reward = 0
        self.terminate = False
        
        self.score_pre = self.state.score   # Storing the last score for designing the reward function
        self.energy_pre = self.state.energy # Storing the last energy for designing the reward function

        self.viewer = None
        self.steps_beyond_done = None
Example #10
0
 def __init__(self, id):
     self.state = State()
     self.info = PlayerInfo(id)
     self.limit = 2
     state_dim = (2 * self.limit + 1)**2 + 3 + 3
     action_dim = 6
     max_action = 1.0
     # load model
     kwargs = {
         "state_dim": state_dim,
         "action_dim": action_dim,
         "max_action": max_action,
     }
     policy_file = "newTD3_Miner_0_2_get_state3"
     self.TreeID = 1
     self.TrapID = 2
     self.SwampID = 3
     self.policy = newTD3_bot.TD3(**kwargs)
     self.policy.load(f"./models_newTD3_2/{policy_file}")
 def __init__(self, id):
     self.state = State()
     self.info = PlayerInfo(id)
     self.limit = 2
     state_dim = (2*self.limit+1)**2 + 3
     action_dim = 6
     max_action = 1.0
     # load model
     kwargs = {
             "state_dim": state_dim,
             "action_dim": action_dim,
             "max_action": max_action,
             }
     policy_file = "DDPG_Miner_0_2"
     self.TreeID = 1
     self.TrapID = 2
     self.SwampID = 3
     self.policy = DDPG.DDPG(**kwargs)
     self.policy.load(f"./models_DDPG/{policy_file}")
Example #12
0
 def __init__(self, id):
     self.state = State()
     self.info = PlayerInfo(id)
Example #13
0
 def __init__(self, id):
     self.state = State()
     self.info = PlayerInfo(id)
     self.path = None
     self.grid = next_move([[0 for i in range(9)] for j in range(21)])
     self.idx = 0
Example #14
0
    def __init__(self, id):
        self.state = State()
        self.info = PlayerInfo(id)

        self.isMovingInc = False
        self.initial_flag = True