def add_customer(agent_id, time, belly_mean):
    start_pos = random.choice(range(5))
    belly_size = max([np.random.normal(loc=belly_mean, scale=2) / 2, 0.001])
    entrance_occupied, _ = check_if_pos_empty(belly_size,
                                              parkEntrances[start_pos], np.nan)
    if entrance_occupied:
        return agent_id
    first_target = random.choice(attractions)

    customers[agent_id] = Agent(index=agent_id,
                                entrance=parkEntrances[start_pos],
                                entrancesStr=parkEntrancesStr[start_pos],
                                entranceTime=time,
                                firstTarget=first_target,
                                mapSize=mapSize,
                                belly=belly_size)
    path = ParkMap.get_path_to_next_pos(customers[agent_id])
    customers[agent_id].path = path
    customersInPark.append(agent_id)
    ParkMap.agentsLocation[agent_id] = customers[agent_id].pos
    return agent_id + 1
Ejemplo n.º 2
0
init_holding = 1
initialcash = 20000
theta = 75
gene_length = 64
risk_coef = 0.5
num_strategies = 80

max_stockprice = 200
min_stockprice = 0.01
min_excess = 0.005
eta = 0.005
specialist_iterations = 10

the_market = Stock(num_shares, init_price, dividend_startvalue, rho, noise_sd,
                   d_bar, interest_rate)
agents = [
    Agent(init_holding, initialcash, num_strategies, theta, gene_length,
          risk_coef, num_shares, init_price, dividend_startvalue, rho,
          noise_sd, d_bar, interest_rate) for _ in range(100)
]
the_specialist = Specialist(max_stockprice, min_stockprice, min_excess, eta,
                            specialist_iterations, num_shares, init_price,
                            dividend_startvalue, rho, noise_sd, d_bar,
                            interest_rate)

for t in range(1000):
    the_market.advance_arprocess()
    the_specialist.clear_market(agents)

ma = the_market.calculate_ma(query='dividend', period=50)
Ejemplo n.º 3
0
Archivo: file.py Proyecto: ykeissar/iai
def main():
    f = open("parameters2.txt", "r")
    flines = list(map(lambda x: x.replace('\n', '').split(), f.readlines()))
    n = int(flines[0][1])
    deadLine = float(flines[1][1])
    cDeadLine = deadLine
    graph = {}
    j = 2
    totalNumOfPpl = 0
    for i in range(2, 2 + n):
        ppl = int(flines[i][1][1:]) if len(flines[i]) > 1 else 0
        graph[flines[i][0][1:]] = {"p": ppl, "e": []}
        j = j + 1
        totalNumOfPpl += ppl
    for i in range(j + 1, len(flines)):
        v1 = "V" + flines[i][1]
        v2 = "V" + flines[i][2]
        w = int(flines[i][3][1:])
        graph[v1]["e"].append({"v": v2, "w": w, "blocked": False})
        graph[v2]["e"].append({"v": v1, "w": w, "blocked": False})

    print("please enter number of agents:")
    num_of_agents = int(input())
    print("please enter agents details: ")
    agents_details = input().split(',')
    agentDetails = list(map(lambda x: x.split(' '), agents_details))

    agentsList = list()
    for i in range(0, num_of_agents):
        agentsList.append(
            Agent(agentDetails[i][0], agentDetails[i][1], len(graph), L))
    # main loop
    print("Agents: \n", agentsList)
    print("Graph: \n", graph)
    while deadLine > 0 and totalNumOfPpl > 0 and not allTerminated(agentsList):
        for i in agentsList:
            if i.terminated:
                continue
            elif i.stepsLeft > 0:
                i.stepsLeft -= 1
            elif i.calcTime > 0:
                i.calcTime -= 1
            elif i.type == 's':
                saboAct(i, graph)
            else:
                # we reached vertice, evacuating ppl
                verPpl = graph[i.currentPosition]["p"]
                totalNumOfPpl -= verPpl
                i.peopleEvacuated += verPpl
                graph[i.currentPosition]["p"] = 0
                if totalNumOfPpl == 0:
                    break
                #finding next vertice to travel
                prevVer = i.currentPosition
                i.currentPosition = getNextStep(i, graph)
                print("Next step: ", i.currentPosition)
                if not i.currentPosition:
                    i.terminated = True
                else:
                    i.stepsLeft = getEdgeWeight(graph, prevVer,
                                                i.currentPosition)
                i.numOfActions += 1
        deadLine -= 1
    feedback = "Well Done!!! Agent: "
    if agentsList[0].peopleEvacuated == 0:
        feedback = "Too Bad.. You could've done better, "
    print(feedback, agentsList[0].type, "\nevacuated ",
          agentsList[0].peopleEvacuated, " people! And it took ",
          cDeadLine - deadLine, " rounds.")
if __name__ == '__main__':

    environment_name = 'CarRacing-v0'
    env = environment(environment_name, img_dim, num_stack, num_actions,
                      render, lr)
    num_states = img_dim
    print(env.env.action_space.shape)
    action_dim = env.env.action_space.shape[0]
    assert action_list.shape[
        1] == action_dim, "length of Env action space does not match action buffer"
    num_actions = action_list.shape[0]
    # Setting random seeds with respect to python inbuilt random and numpy random
    random.seed(901)
    np.random.seed(1)
    agent = Agent(num_states, num_actions, img_dim, model_path)
    randomAgent = RandomAgent(num_actions)

    print(test_mode, train_mode)

    try:
        #Train agent
        if test_mode:
            if train_mode:
                print("Initialization with random agent. Fill memory")
                while randomAgent.exp < memory_size:
                    env.run(randomAgent)
                    print(randomAgent.exp, "/", memory_size)

                agent.memory = randomAgent.memory
                randomAgent = None
Ejemplo n.º 5
0
# the action space
action_size = brain.vector_action_space_size
print('Number of actions:', action_size)

# the state space
state = env_info.vector_observations[0]
state_size = len(state)
print('States have length:', state_size)
#######################################

###########  Agent Setting  ###########
agent = Agent(state_size,
              action_size,
              seed=0,
              hidden_layers=hidden_layers,
              drop_p=drop_p,
              method=method,
              Dueling=Dueling)
print('-------- Model structure --------')
print('method :', method)
print(agent.qnetwork_local)
print('---------------------------------')
#######################################

scores = []  # list containing scores from each episode
scores_window = deque(maxlen=100)  # last 100 scores

eps = eps_start  # initialize epsilon
isFirst = True
Ejemplo n.º 6
0
 def wallet_to_agents(self, wallets):
     
     return [Agent(wallet) for wallet in wallets]