예제 #1
0
    def __init__(self, aid, booksList):
        Agent.__init__(self, aid)

        self.booksList = booksList

        comportamento = ComportamentoAgenteLivraria(self)
        self.behaviours.append(comportamento)
 def __init__(self):
     Agent.__init__(self)
     self.sequence = []
     self.last_sequence = deque(maxlen=10)
     self.actions = ['r', 'p', 's']
     self.values = {'r': 0, 'p': 1, 's': 2}
     self.markov = MarkovAgent()
예제 #3
0
파일: mc_agent.py 프로젝트: srjit/deep-rl
    def __init__(self, environment, gamma, No=100):
        Agent.__init__(self, environment, gamma, No)

        self.N = self.get_clear_tensor()

        self.Gs = np.zeros(
            [self.env.dealer_max_value + 1, self.env.agent_max_value + 1])
예제 #4
0
	def __init__(self, player_id, own_dice_list):
		Agent.__init__(self, player_id, own_dice_list)
		self.num_each_fv = {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}
		for fv in self.own_dice_list:
			self.num_each_fv[fv] += 1
		self.pg = ProbGenerator((NUM_PLAYERS-1)*NUM_DICE)
		self.pg.calc()
예제 #5
0
 def __init__(self, aid, booksList):
     Agent.__init__(self, aid)
     
     self.booksList = booksList
     
     comportamento = ComportamentoAgenteLivraria(self)
     self.behaviours.append(comportamento)
예제 #6
0
    def __init__(self, state_size, action_size, seed):
        """Initialize an Agent object.

        Params
        ======
            state_size (int): dimension of each state
            action_size (int): dimension of each action
            seed (int): random seed
        """
        Agent.__init__(self, state_size, action_size, seed)
        # Q-Network
        self.qnetwork_local = getattr(models,
                                      cfgs.MODEL_TYPE)(self.action_size,
                                                       seed).to(device)
        self.qnetwork_target = getattr(models,
                                       cfgs.MODEL_TYPE)(self.action_size,
                                                        seed).to(device)
        if cfgs.OPTIMIZER == 'SGD':
            self.optimizer = optim.SGD(self.qnetwork_local.parameters(),
                                       lr=cfgs.LR,
                                       momentum=cfgs.MOMENTUM)
        elif cfgs.OPTIMIZER == 'ADAM':
            self.optimizer = optim.Adam(self.qnetwork_local.parameters(),
                                        lr=cfgs.LR)
        self.t_step = 0
예제 #7
0
파일: fare.py 프로젝트: timcondit/routes
 def __init__(self, name):
     '''DOCSTRING'''
     Agent.__init__(self, name)
     # Fare maintains its own SimEvent, but Taxi uses it (look for
     # fareBeingDriven.doneSignal.signal(self.name) in the Taxi's
     # cooperate() and compete() methods.)
     self.doneSignal = SimEvent()
예제 #8
0
 def __init__(self, my_id, my_clan):
     Agent.__init__(self, my_id=my_id, my_clan=my_clan)
     self.true_performance = 0
     self.expected_performance = 0
     self.wanna_be_my_id = -1
     self.plans = [[]]
     self.feedback_tick = 5  # timespan of taking feedback from users (or customers)
예제 #9
0
 def __init__(self, player_id, own_dice_list):
     Agent.__init__(self, player_id, own_dice_list)
     self.num_each_fv = {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}
     for fv in self.own_dice_list:
         self.num_each_fv[fv] += 1
     self.pg = ProbGenerator((NUM_PLAYERS - 1) * NUM_DICE)
     self.pg.calc()
예제 #10
0
    def __init__(self, aid, booksList):
        Agent.__init__(self, aid)

        self.booksList = booksList

        behav_ = BookstoreAgentBehaviour(self)
        self.addBehaviour(behav_)
예제 #11
0
	def __init__(self, name, fg, ms, opt):
		Agent.__init__(self, name, fg, ms, opt)

		self.f = self.fg.functions[self.name]
		self.neighbors = self.f.variables
		self.domains = {v:self.fg.variables[v].domain for v in self.neighbors}
		self.q = {v:{value:0 for value in self.domains[v]} for v in self.neighbors}
		self.terminated_neighbors = {v:False for v in self.neighbors}
예제 #12
0
 def __init__(self, environment, ID):
     Agent.__init__(self, environment, ID)
     self.__threshold = 0.7  # TODO: change to normal distribution
     self.__ID = ID
     self._willingness = random.uniform(
         0, 100
     )  # I wasn't sure what to set the mean and standard deviation to
     self._strategy = "standard"
예제 #13
0
 def __init__(self, my_id, my_clan):
     Agent.__init__(self, my_id=my_id, my_clan=my_clan)
     self.true_performance = 0
     self.expected_performance = 0
     self.wanna_be_my_id = -1
     self.plans = [[]]
     self.I = 0  #iteration index
     self.IN = 0  #total length of iteration
예제 #14
0
    def __init__(self, exec_cap):
        Agent.__init__(self)

        # executor limit set to each job
        self.exec_cap = exec_cap

        # map for executor assignment
        self.exec_map = {}
예제 #15
0
파일: script_3.py 프로젝트: Brunovf1/pade-1
	def __init__(self, aid):
		Agent.__init__(self, aid)
		
		message = ACLMessage(ACLMessage.REQUEST)
		message.set_protocol(ACLMessage.FIPA_REQUEST_PROTOCOL)
		message.set_content('REQUEST')
		message.add_receiver('agent_participant_1')
		behaviour_1 = RequestInitiator(self, message)
		self.addBehaviour(behaviour_1)
예제 #16
0
 def __init__(self, my_id, my_clan):
     Agent.__init__(self, my_id=my_id, my_clan=my_clan)
     self.true_performance = 0
     self.expected_performance = 0
     self.wanna_be_my_id = -1
     self.plans = [[]]
     self.feedback_tick = 5  # timespan of taking feedback from users (or customers)
     self.I = 0  #iteration index
     self.IN = 1  #total length of iteration
예제 #17
0
 def __init__(self, name, x, y, dx, dy, isfriendly, model):
     Agent.__init__(self, name, x, y, model)
     self.dx = dx  #momentum in x direction
     self.dy = dy  #momentum in y direction
     self.isfriendly = isfriendly
     self.correct_identification = False
     self.epoch_counter = 0
     self.reply = self.generate_identification()
     self.in_range = False
예제 #18
0
	def __init__(self, name, fg, ms, opt):
		Agent.__init__(self, name, fg, ms, opt)

		self.v = self.fg.variables[self.name]
		self.neighbors = self.v.functions
		self.domain = self.v.domain
		self.z = {value:0 for value in self.domain}
		self.r = {f:{value:0 for value in self.domain} for f in self.neighbors}
		self.z_queue = []
예제 #19
0
	def __init__(self, aid):
		Agent.__init__(self, aid)
		
		message = ACLMessage(ACLMessage.REQUEST)
		message.set_protocol(ACLMessage.FIPA_REQUEST_PROTOCOL)
		message.set_content('REQUEST')
		message.add_receiver('agent_participant_1')
		comportamento_1 = RequestIniciante(self, message)
		self.addBehaviour(comportamento_1)
예제 #20
0
 def __init__(self, aid, bookStores):
     Agent.__init__(self, aid)
     
     self.bookStores = bookStores
     self.bestPropose = None
     self.bestBookStore = None
     self.proposes = []
     self.messages = []
     self.sends = 0
     self.receives = 0
예제 #21
0
	def __init__(self, player_id):
		Agent.__init__(self, player_id)
		#self.num_each_fv = {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}
		#for fv in self.own_dice_list:
		#	self.num_each_fv[fv] += 1
		self.pg = ProbGenerator(NUM_PLAYERS*NUM_DICE)
		self.pg.calc()
		self.good_bid_count = 0
		self.num_bids_made = 0
		self.bad_bid_count = 0
예제 #22
0
 def __init__(self, aid, bookStores):
     Agent.__init__(self, aid)
     
     self.bookStores = bookStores
     self.bestPropose = None
     self.bestBookStore = None
     self.proposes = []
     self.messages = []
     self.sends = 0
     self.receives = 0
예제 #23
0
 def __init__(self, player_id):
     Agent.__init__(self, player_id)
     #self.num_each_fv = {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}
     #for fv in self.own_dice_list:
     #	self.num_each_fv[fv] += 1
     self.pg = ProbGenerator(NUM_PLAYERS * NUM_DICE)
     self.pg.calc()
     self.good_bid_count = 0
     self.num_bids_made = 0
     self.bad_bid_count = 0
예제 #24
0
파일: script_4.py 프로젝트: tbfreitas/Pade
    def __init__(self, aid):
        Agent.__init__(self, aid)

        pedido = {'tipo': 'pedido', 'qtd': 100.0}
        message = ACLMessage(ACLMessage.CFP)
        message.set_protocol(ACLMessage.FIPA_CONTRACT_NET_PROTOCOL)
        message.set_content(dumps(pedido))
        message.add_receiver('participant_agent_1')
        message.add_receiver('participant_agent_2')
        behaviour = InitiatorProtocol(self, message)
        self.addBehaviour(behaviour)
예제 #25
0
	def __init__(self, alpha = .1, beta = 100, gamma = .8):
		#sets up all the state space and stuff that applies to all agents
		Agent.__init__(self)
		#sets up the Q table, this will be indexed by (state, action) tuples
		self.q_table = dict()
		#sets up the learning and policy params
		self.alpha = alpha #the learning rate
		self.beta = beta #policy noise
		self.gamma = gamma #discount factors
		#defines the prev action to 0 here, just to avoid weird errors
		self.last_action = 0
예제 #26
0
 def __init__(self, aid):
     Agent.__init__(self, aid)
     
     pedido = {'tipo' : 'pedido', 'qtd' : 100.0}
     message = ACLMessage(ACLMessage.CFP)
     message.set_protocol(ACLMessage.FIPA_CONTRACT_NET_PROTOCOL)
     message.set_content(dumps(pedido))
     message.add_receiver('participant_agent_1')
     message.add_receiver('participant_agent_2')
     behaviour = InitiatorProtocol(self, message)
     self.addBehaviour(behaviour)
예제 #27
0
 def __init__(self):
     Agent.__init__(self)
     self.bias = {'r': 0.35, 'p': 0.3233, 's': 0.3267}
     self.cumulative = []
     self.names = []
     for val, p in self.bias.items():
         if self.cumulative:
             self.cumulative.append(p + self.cumulative[-1])
         else:
             self.cumulative.append(p)
         self.names.append(val)
예제 #28
0
    def __init__(self, environment, ID):

        Agent.__init__(self, environment, ID)
        self.__popular_time_slots_idx = []
        self.__popular_time_slots_preference = []
        self.__n_slots_consideration = 4  # The number of time slots that will be taken in consideration\
        self.__n_votes = 1  # How many votes the agent should cast, this has to be always equal or greater than n_slots_consideration
        environment.rank_popularity_time_slots()
        self._willingness = random.randint(80, 100)
        self._strategy = "popular"
        self.__ID = ID
예제 #29
0
 def __init__(self, *args, **kwargs):
     self.max_depth = 8
     if "max_depth" in kwargs:
         self.max_depth = kwargs["max_depth"] or self.max_depth
         del kwargs["max_depth"]
     self.max_width = 3
     if "max_width" in kwargs:
         self.max_width = kwargs["max_width"] or self.max_width
         del kwargs["max_width"]
     Agent.__init__(self, *args, **kwargs)
     self.ops = [None, BlackOps(), WhiteOps()]
예제 #30
0
    def __init__(self, name, fg, ms, opt):
        Agent.__init__(self, name, fg, ms, opt)

        self.f = self.fg.functions[self.name]
        self.neighbors = self.f.variables
        self.domains = {v: self.fg.variables[v].domain for v in self.neighbors}
        self.q = {
            v: {value: 0
                for value in self.domains[v]}
            for v in self.neighbors
        }
        self.terminated_neighbors = {v: False for v in self.neighbors}
예제 #31
0
    def __init__(self, city, location):
        '''
        Constructor
        '''
        Agent.__init__(self, city, location)
        self.type = random.randint(0, Citizen.__AGENT_MAX_TYPES)
        self.color = colorBlue if self.type else colorRed
        self.groundType = city.getLocationType(location[0], location[1])
        self.regions = None
        self.path = None
#        self.dist = lambda (x1, y1), (x2, y2): math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
        self.dist = lambda (x1, y1), (x2, y2): abs(x2 - x1) + abs(y2 - y1)
예제 #32
0
    def __init__(self, states, actions, blim):
        Agent.__init__(self, states, actions)
        base = self.states
        shape = np.shape(base)
        temp = np.zeros((blim * shape[0], shape[1] + 1))

        c = 0
        for b in range(blim):
            for p in base:
                temp[c] = [b, p[0], p[1]]
                c += 1

        self.states = temp
예제 #33
0
    def __init__(self, name, fg, ms, opt):
        Agent.__init__(self, name, fg, ms, opt)

        self.v = self.fg.variables[self.name]
        self.neighbors = self.v.functions
        self.domain = self.v.domain
        self.z = {value: 0 for value in self.domain}
        self.r = {
            f: {value: 0
                for value in self.domain}
            for f in self.neighbors
        }
        self.z_queue = []
예제 #34
0
    def __init__(self, aid, bookStores, pedido):
        Agent.__init__(self, aid)

        self.bookStores = bookStores
        self.pedido = pedido

        cfp_message = ACLMessage(ACLMessage.CFP)
        cfp_message.set_protocol(ACLMessage.FIPA_CONTRACT_NET_PROTOCOL)
        for i in self.bookStores:
            cfp_message.add_receiver(i)
        cfp_message.set_content(dumps(self.pedido))

        comportamento = ComportamentoAgenteConsumidor(self, cfp_message)
        self.addBehaviour(comportamento)
예제 #35
0
    def __init__(self, aid, bookStores, pedido):
        Agent.__init__(self, aid)

        self.bookStores = bookStores
        self.pedido = pedido

        cfp_message = ACLMessage(ACLMessage.CFP)
        cfp_message.set_protocol(ACLMessage.FIPA_CONTRACT_NET_PROTOCOL)
        for i in self.bookStores:
            cfp_message.add_receiver(i)
        cfp_message.set_content(dumps(self.pedido))

        comportamento = ComportamentoAgenteConsumidor(self, cfp_message)
        self.addBehaviour(comportamento)
예제 #36
0
 def __init__(self,
              envir=hfo.HFOEnvironment(),
              action_set="low_level",
              seed=123):
     Agent.__init__(self,
                    env=envir,
                    agent_type="low_level_random_agent",
                    action_space=LowLevelActionSpace(),
                    state_space=NeuralStateSpace(),
                    feature_set=hfo.LOW_LEVEL_FEATURE_SET,
                    port=6000,
                    base="base_right",
                    goalie=True)
     self.seed = seed
 def __init__(
     self,
     modelStanding,
     modelAnimationDict,
     turnRate,
     speed,
     agentList,
     name="",
     rangeFinderCount=13,
     collisionMask=BitMask32.allOff(),
     adjacencySensorThreshold=0,
     radarSlices=0,
     radarLength=0.0,
     scale=1.0,
     brain=None,
     massKg=0.1,
     collisionHandler=None,
     collisionTraverser=None,
     waypoints=None,
 ):
     Agent.__init__(
         self,
         modelStanding,
         modelAnimationDict,
         turnRate,
         speed,
         agentList,
         massKg,
         collisionMask,
         name,
         collisionHandler,
         collisionTraverser,
     )
     self.collisionMask = collisionMask
     self.adjacencySensorThreshold = adjacencySensorThreshold
     self.radarSlices = radarSlices
     self.radarLength = radarLength
     self.scale = scale
     self.brain = brain
     self.npcState = "retriveKey"
     self.waypoints = waypoints
     self.targetReached = False
     self.setScale(self.scale)
     self.currentTarget = None
     self.player = None
     self.bestPath = None
     self.key = None
     self.keyInHand = False
     self.hasFallen = False
     self.pathSmoothening = True
예제 #38
0
 def __init__(self, name, x, y, model):
     Agent.__init__(self, name, x, y, model)
     #Kripke_model.__init__(self)
     self.agents = [t for t in model.turrets if t.name != self.name]
     self.tracked_planes = []
     self.turret_range = 4
     self.broadcasted_pos = False
     self.closest = False
     self.planecounters = {
     }  ## dict of planes with a counter for each plane
     self.max_message_count = 20
     self.max_epochs = self.model.numepochs
     self.shoot_plane = False
     self.shoot_commands = set([])
예제 #39
0
 def __init__(self, aid, bookStores, order):
     Agent.__init__(self, aid)
 
     self.bookStores = bookStores
     self.order = order
     
     cfp_message = ACLMessage(ACLMessage.CFP)
     cfp_message.set_protocol(ACLMessage.FIPA_CONTRACT_NET_PROTOCOL)
     for i in self.bookStores:
         cfp_message.add_receiver(i)
     cfp_message.set_content(dumps(self.order))
     
     behav_ = ConsumerAgentBehaviour(self, cfp_message)
     self.behaviours.append(behav_)
예제 #40
0
파일: mc_control.py 프로젝트: srjit/deep-rl
    def __init__(self, environment, gamma, No=100):


        Agent.__init__(self, environment, gamma, No)

        # action value function - three dimensions
        # a) Dealer Sum Value
        # b) Agent Sum Value
        # c) Possible Actions 
        self.Q = self.get_clear_tensor()

        # N(s) - number of times a state has been visited
        # N(s,a) - number of times a state has been selected
        # and an action has been chosen
        self.Ns = self.get_clear_tensor()
예제 #41
0
 def __init__(self, maxlen=20):
     Agent.__init__(self)
     self.seq_len = maxlen
     self.model = Sequential([
         Dense(150, input_dim=self.seq_len),
         Activation('relu'),
         Dense(50),
         Activation('relu'),
         Dense(3),
         Activation('softmax')
     ])
     self.model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
     self.last_sequence = deque(maxlen=self.seq_len)
     self.actions = ['r', 'p', 's']
     self.values = {'r': 0, 'p': 1, 's': 2}
     self.last_move = None
예제 #42
0
	def __init__(self, screen):
		pygame.sprite.Sprite.__init__(self)
		self.calmfile = "spr/e.png"
		self.waryfile = "spr/ea.png"
		self.firefile = "spr/es.png"
		self.kickfile = "spr/ek.png"
		self.deadfile = "spr/ed.png"
		self.alive = True
		self.shootstg = 0
		self.before = self.calmfile
		self.actchnce = 0
		self.acthigh = 0
		self.actlow = 0
		screen = pygame.display.get_surface()
		Agent.__init__(self, self.calmfile, self.waryfile, self.firefile, self.kickfile, screen)
		self.rect.x = 720
		self.rect.y = 350
예제 #43
0
 def __init__(self, prefix="msg", suffix="", default="messageReceived", silent=False):
     """ Action agent constructor.
     @param prefix: Prefix for all action-specific handler function name.
     @type prefix: C{str}
     @param suffix: Suffix for all action-specific handler function name.
     @type suffix: C{str}
     @param default: Default handler function name.
     @type default: C{str}
     @param silent: Silent flag.
     @type silent: C{bool}
     """
     Agent.__init__(self)
     self.pattern = prefix + "%s" + suffix
     if hasattr(self, default):
         self.__defaultHandler = getattr(self, default)
     else:
         self.__defautlHandler = self.__default_handler
     self.silent = silent
     self.currentMessage = None
예제 #44
0
    def __init__(self, policy_function, discount_factor, update_freq=1):
        """
        Parameters:
            policy_function (PolicyFunction): Policy function instance implementing the mapping from states to actions.
            discount_factor (float): Reward discount factor
            update_freq (int): Update frequency. Parameters are updated after every update_freq episodes
        """
        Agent.__init__(self, action_space=policy_function.action_space)

        if discount_factor < 0.0 or discount_factor > 1.0:
            raise ValueError("Discount factor should be between 0.0 and 1.0.")
        self.discount_factor = discount_factor

        if update_freq <= 0:
            raise ValueError("Update frequency should be positive.")
        self.update_freq = update_freq

        self.policy_function = policy_function
        self.trials_experienced = 0  # used for discounting reward
        self.episodes_experienced = 0  # used for updating parameters at the desired update_freq
예제 #45
0
    def __init__(self, q_function, discount_factor, greed_eps):
        """
        Parameters:
            q_function (QFunction)
            discount_factor (float): Reward discount factor
            greed_eps (ParameterSchedule): Schedule for epsilon of epsilon-greedy action picking strategy (probability
                of picking a random (rather than the greedy) action.
        """
        Agent.__init__(self, action_space=q_function.action_space)
        if discount_factor < 0.0 or discount_factor > 1.0:
            raise ValueError("Discount factor should be between 0.0 and 1.0.")
        self.discount_factor = discount_factor
        self.greed_eps = greed_eps
        # keep track of episodes experienced (this is for example used by parameter schedules)
        self.episodes_experienced = 0

        self.q = q_function

        # We need to keep track of last state, action, and reward
        self.last_state = None
        self.last_action = None
        self.last_reward = 0.0
예제 #46
0
	def __init__(self, aid):
		Agent.__init__(self, aid)
		comportamento_1 = RequestParticipante(self)
		self.addBehaviour(comportamento_1)
예제 #47
0
 def __init__(self, aid, booksList):
     Agent.__init__(self, aid)
     
     self.booksList = booksList
예제 #48
0
파일: awareness.py 프로젝트: gregr/uriel
	def __init__(self, mind):
		"""Create an Awareness with 'mind' as the Agent program."""
		Agent.__init__(self, mind)
		self.spaces = {}	# spaces containing sectors viewed
		self.perspectives = WeakKeyDictionary()	# reference counted
		self._onSpaceDestroyed = MethodProxy(self.OnSpaceDestroyed)
예제 #49
0
 def __init__(self, *args, **kwargs):
   self._activities = list()
   self._postponed_messages = list()
   Agent.__init__(self, *args, **kwargs)
예제 #50
0
 def __init__(self, aid):
     Agent.__init__(self, aid)
 def __init__(self, player_id):
     Agent.__init__(self, player_id)
     self.valid_bid_command = re.compile('(([0-9]+)[ ]+([0-9]+)|[c]+)')
예제 #52
0
	def __init__(self, *arg):
		Agent.__init__(self, *arg)
예제 #53
0
 def __init__(self, aid, power_values):
     Agent.__init__(self, aid)
     behaviour = ParticipantProtocol(self, power_values)
     self.addBehaviour(behaviour)
예제 #54
0
	def __init__(self, x_init, y_init):
		Agent.__init__(self,x_init,y_init,"Prey")
예제 #55
0
 def __init__(self):
     """ Scheduler constructor. """
     Agent.__init__(self)
     self.__alive = False
     self.activators = []