コード例 #1
0
    def __init__(self, task):

        self.env = gym.make(task)
        LearningAgent.__init__(self, self.env)
        self.nrFeatures = getNrFeaturesForTask(task)
        self.nrActions = self.env.action_space.n
        self.tF = np.zeros((self.nrActions, self.nrFeatures), 'float32')
コード例 #2
0
 def __init__(self, task, gamma):
     env = gym.make(task)
     LearningAgent.__init__(self, env)
     self.discretizer = Discretizer(task)
     self.numStates, self.numActions = self.discretizer.getStateActionInfo()
     self.gamma = gamma
     self.Q = np.zeros((self.numStates, self.numActions))
コード例 #3
0
 def __init__(self, task):
     self.task = task
     self.env = gym.make(task)
     LearningAgent.__init__(self, self.env)
     self.nrFeatures = getNrFeaturesForTask(task)
     self.nrActions = getActionsForTask(task)
     self.tF = np.random.randn(self.nrActions, self.nrFeatures)
コード例 #4
0
    def __init__(self, task):

        self.env = gym.make(task)
        LearningAgent.__init__(self, self.env)
        self.nrFeatures = getNrFeaturesForTask(task)
        self.nrActions = getActionsForTask(task)
        self.tF = np.zeros((self.nrActions, self.nrFeatures),'float32')
        self.population = np.random.rand(self.pop,self.nrActions, self.nrFeatures)
コード例 #5
0
ファイル: poker.py プロジェクト: marinthiercelin/projet_ai
def ask_player_mode(number, chips, bet_value, model):
    print "Player" + str(number) + " Mode : "
    print " 1 : Manual player (You play)"
    print " 2 : Random Agent"
    print " 3 : All-In Agent"
    print " 4 : Bucket_Agent"
    print " 5 : Bayesian Agent ( with pre-computed model )"
    print " 6 : Learning Agent "
    print " 7 : Regret-Minimization Agent"
    x = raw_input(" What mode ? [1-7] : ")
    gamer = None
    if x == "1":
        gamer = player("player" + str(number), chips)
    elif x == "2":
        gamer = random_agent("player" + str(number), chips)
    elif x == "3":
        gamer = All_In_Agent("player" + str(number), chips)
    elif x == "4":
        gamer = Agent_Bucket("player" + str(number), chips)
    elif x == "5":
        gamer = bayesian("player" + str(number), chips, list(model))
    elif x == "6":
        gamer = LearningAgent("player" + str(number), chips, 2)
    elif x == "7":
        gamer = Regret_Agent("player" + str(number), chips, bet_value)
    else:
        return False
    return gamer
コード例 #6
0
ファイル: main.py プロジェクト: naveenr414/ijcai-rideshare
def run_epoch(envt,
              oracle,
              central_agent,
              value_function,
              DAY,
              is_training,
              agents_predefined=None,
              TRAINING_FREQUENCY: int = 1):

    # INITIALISATIONS
    Experience.envt = envt
    # Initialising agents
    if agents_predefined is not None:
        agents = deepcopy(agents_predefined)
    else:
        initial_states = envt.get_initial_states(envt.NUM_AGENTS, is_training)
        agents = [
            LearningAgent(agent_idx, initial_state)
            for agent_idx, initial_state in enumerate(initial_states)
        ]

    # ITERATING OVER TIMESTEPS
    print("DAY: {}".format(DAY))
    down_sample = 1
    if Settings.has_value("down_sample") and Settings.get_value("down_sample"):
        down_sample = Settings.get_value("down_sample")
    request_generator = envt.get_request_batch(DAY, downsample=down_sample)
    total_value_generated = 0
    num_total_requests = 0

    ret_dictionary = {
        'epoch_requests_completed': [],
        'epoch_requests_accepted': [],
        'epoch_dropoff_delay': [],
        'epoch_requests_seen': [],
        'epoch_requests_accepted_profit': [],
        'epoch_each_agent_profit': [],
        'epoch_locations_all': [],
        'epoch_locations_accepted': [],
    }

    while True:
        # Get new requests
        try:
            current_requests = next(request_generator)

            if Settings.has_value("print_verbose") and Settings.get_value(
                    "print_verbose"):
                print("Current time: {} or {} on DAY {}".format(
                    envt.current_time,
                    datetime.timedelta(seconds=envt.current_time), DAY))
                print("Number of new requests: {}".format(
                    len(current_requests)))
        except StopIteration:
            break

        ret_dictionary['epoch_locations_all'].append(
            [i.pickup for i in current_requests])

        for i in current_requests:
            envt.requests_region[envt.labels[i.pickup]] += 1

        # Get feasible actions
        feasible_actions_all_agents = oracle.get_feasible_actions(
            agents, current_requests)

        # Score feasible actions
        experience = Experience(deepcopy(agents), feasible_actions_all_agents,
                                envt.current_time, len(current_requests))
        scored_actions_all_agents = value_function.get_value([experience])

        # Choose actions for each agent
        scored_final_actions = central_agent.choose_actions(
            scored_actions_all_agents,
            is_training=is_training,
            epoch_num=envt.num_days_trained)

        # Assign final actions to agents
        for agent_idx, (action, _) in enumerate(scored_final_actions):
            agents[agent_idx].path = deepcopy(action.new_path)

            position = experience.agents[agent_idx].position.next_location
            time_driven = 0
            for request in action.requests:
                time_driven += envt.get_travel_time(request.pickup,
                                                    request.dropoff)

            time_to_request = sum([
                envt.get_travel_time(position, request.pickup)
                for request in action.requests
            ])

            envt.driver_utilities[agent_idx] += max(
                (time_driven - time_to_request), 0)

        # Calculate reward for selected actions
        rewards = []
        locations_served = []
        for action, _ in scored_final_actions:
            reward = len(action.requests)
            locations_served += [request.pickup for request in action.requests]
            for request in action.requests:
                envt.success_region[envt.labels[request.pickup]] += 1
            rewards.append(reward)
            total_value_generated += reward

        if Settings.has_value("print_verbose") and Settings.get_value(
                "print_verbose"):
            print("Reward for epoch: {}".format(sum(rewards)))

        profits, agent_profits = get_profit_distribution(scored_final_actions)
        for i, j in agent_profits:
            envt.driver_profits[i] += j

        # Update
        if (is_training):
            # Update replay buffer
            value_function.remember(experience)

            # Update value function every TRAINING_FREQUENCY timesteps
            if ((int(envt.current_time) / int(envt.EPOCH_LENGTH)) %
                    TRAINING_FREQUENCY == TRAINING_FREQUENCY - 1):
                value_function.update(central_agent)

        # Sanity check
        for agent in agents:
            assert envt.has_valid_path(agent)

        # Writing statistics to logs
        value_function.add_to_logs(
            'rewards_day_{}'.format(envt.num_days_trained), sum(rewards),
            envt.current_time)
        avg_capacity = sum([agent.path.current_capacity
                            for agent in agents]) / envt.NUM_AGENTS
        value_function.add_to_logs(
            'avg_capacity_day_{}'.format(envt.num_days_trained), avg_capacity,
            envt.current_time)

        epoch_dictionary = {}
        for agent in agents:
            agent_dictionary = get_statistics_next_epoch(agent, envt)
            if epoch_dictionary == {}:
                epoch_dictionary = agent_dictionary
            else:
                for key in agent_dictionary:
                    epoch_dictionary[key] += agent_dictionary[key]

        # Simulate the passing of time
        envt.simulate_motion(agents, current_requests)
        num_total_requests += len(current_requests)

        ret_dictionary['epoch_requests_completed'].append(
            epoch_dictionary['requests_served'])
        ret_dictionary['epoch_dropoff_delay'].append(
            epoch_dictionary['total_delivery_delay'])
        ret_dictionary['epoch_requests_accepted'].append(sum(rewards))
        ret_dictionary['epoch_requests_seen'].append(len(current_requests))
        ret_dictionary['epoch_requests_accepted_profit'].append(sum(profits))
        ret_dictionary['epoch_each_agent_profit'].append(agent_profits)
        ret_dictionary['epoch_locations_accepted'].append(locations_served)

        if Settings.has_value("print_verbose") and Settings.get_value(
                "print_verbose"):
            print("Requests served {}".format(
                np.sum(ret_dictionary["epoch_requests_completed"])))
            print("Requests accepted {}".format(sum(rewards)))

    # Printing statistics for current epoch
    print('Number of requests accepted: {}'.format(total_value_generated))
    print('Number of requests seen: {}'.format(num_total_requests))

    ret_dictionary['total_requests_accepted'] = total_value_generated

    return ret_dictionary
コード例 #7
0
ファイル: main.py プロジェクト: naveenr414/ijcai-rideshare
                        max_test_score = test_score if test_score > max_test_score else max_test_score

            envt.num_days_trained += 1
            if value_num in NEURAL_VALUE_FUNCTIONS:
                value_function.model.save('../models/{}_{}.h5'.format(
                    num_agents, envt.num_days_trained))

    # Reset the driver utilities
    envt.reset()
    central_agent.reset()

    for day in TEST_DAYS:
        initial_states = envt.get_initial_states(envt.NUM_AGENTS,
                                                 is_training=False)
        agents = [
            LearningAgent(agent_idx, initial_state)
            for agent_idx, initial_state in enumerate(initial_states)
        ]

        epoch_data = run_epoch(envt,
                               oracle,
                               central_agent,
                               value_function,
                               day,
                               is_training=False,
                               agents_predefined=agents)
        total_requests_served = epoch_data['total_requests_accepted']
        print("(TEST) DAY: {}, Requests: {}\n\n".format(
            day, total_requests_served))

        # Write our pickled resutls
コード例 #8
0
from Agent import *
from SensorAgent import SensorAgent
from KinematicsAgent import KinematicsAgent
from ControllingAgent import ControllingAgent
from SupervisorAgent import SupervisorAgent
from LearningAgent import LearningAgent

#Agent initialization
sensorAgent = SensorAgent(AgentType.SensorAgent)
kinematicsAgent = KinematicsAgent(AgentType.KinematicsAgent)
controllingAgent = ControllingAgent(AgentType.ControllingAgent)
supervisorAgent = SupervisorAgent(AgentType.SupervisorAgent)
learningAgent = LearningAgent(AgentType.LearningAgent)

#Adding peer agents
#Sensor
sensorAgent.AddPeerAgent(learningAgent)

#Kinematics
kinematicsAgent.AddPeerAgent(learningAgent)
kinematicsAgent.AddPeerAgent(controllingAgent)

#Controlling
controllingAgent.AddPeerAgent(kinematicsAgent)

#Supervisor
supervisorAgent.AddPeerAgent(learningAgent)

#Learning
learningAgent.AddPeerAgent(sensorAgent)
learningAgent.AddPeerAgent(kinematicsAgent)
コード例 #9
0
def run_epoch(envt,
              oracle,
              central_agent,
              value_function,
              DAY,
              is_training,
              agents_predefined=None,
              TRAINING_FREQUENCY: int=1):

    # INITIALISATIONS
    Experience.envt = envt
    # Initialising agents
    if agents_predefined is not None:
        agents = deepcopy(agents_predefined)
    else:
        initial_states = envt.get_initial_states(envt.NUM_AGENTS, is_training)
        agents = [LearningAgent(agent_idx, initial_state) for agent_idx, initial_state in enumerate(initial_states)]

    # ITERATING OVER TIMESTEPS
    print("DAY: {}".format(DAY))
    request_generator = envt.get_request_batch(DAY)
    total_value_generated = 0
    num_total_requests = 0
    while True:
        # Get new requests
        try:
            current_requests = next(request_generator)
            print("Current time: {}".format(envt.current_time))
            print("Number of new requests: {}".format(len(current_requests)))
        except StopIteration:
            break

        # Get feasible actions
        feasible_actions_all_agents = oracle.get_feasible_actions(agents, current_requests)

        # Score feasible actions
        experience = Experience(deepcopy(agents), feasible_actions_all_agents, envt.current_time, len(current_requests))
        scored_actions_all_agents = value_function.get_value([experience])

        # Choose actions for each agent
        scored_final_actions = central_agent.choose_actions(scored_actions_all_agents, is_training=is_training, epoch_num=envt.num_days_trained)

        # Assign final actions to agents
        for agent_idx, (action, _) in enumerate(scored_final_actions):
            agents[agent_idx].path = deepcopy(action.new_path)

        # Calculate reward for selected actions
        rewards = []
        for action, _ in scored_final_actions:
            reward = envt.get_reward(action)
            rewards.append(reward)
            total_value_generated += reward
        print("Reward for epoch: {}".format(sum(rewards)))

        # Update
        if (is_training):
            # Update replay buffer
            value_function.remember(experience)

            # Update value function every TRAINING_FREQUENCY timesteps
            if ((int(envt.current_time) / int(envt.EPOCH_LENGTH)) % TRAINING_FREQUENCY == TRAINING_FREQUENCY - 1):
                value_function.update(central_agent)

                # Diagnostics
                for action, score in scored_actions_all_agents[0]:
                    print("{}: {}, {}, {}".format(score, action.requests, action.new_path, action.new_path.total_delay))
                print()
                for idx, (action, score) in enumerate(scored_final_actions[:10]):
                    print("{}: {}, {}, {}".format(score, action.requests, action.new_path, action.new_path.total_delay))

        # Sanity check
        for agent in agents:
            assert envt.has_valid_path(agent)

        # Writing statistics to logs
        value_function.add_to_logs('rewards_day_{}'.format(envt.num_days_trained), sum(rewards), envt.current_time)
        avg_capacity = sum([agent.path.current_capacity for agent in agents]) / envt.NUM_AGENTS
        value_function.add_to_logs('avg_capacity_day_{}'.format(envt.num_days_trained), avg_capacity, envt.current_time)

        # Simulate the passing of time
        envt.simulate_motion(agents, current_requests)
        num_total_requests += len(current_requests)

    # Printing statistics for current epoch
    print('Number of requests accepted: {}'.format(total_value_generated))
    print('Number of requests seen: {}'.format(num_total_requests))

    return total_value_generated
コード例 #10
0
                    total_requests_served = run_epoch(envt, oracle, central_agent, value_function, day, is_training=False)
                    print("\n(TEST) DAY: {}, Requests: {}\n\n".format(day, total_requests_served))
                    test_score += total_requests_served
                value_function.add_to_logs('validation_score', test_score, envt.num_days_trained)

                # TODO: Save results better
                if (isinstance(value_function, NeuralNetworkBased)):
                    if (test_score > max_test_score or (envt.num_days_trained % SAVE_FREQ) == (SAVE_FREQ - 1)):
                        value_function.model.save('../models/{}_{}agent_{}capacity_{}delay_{}interval_{}_{}.h5'.format(type(value_function).__name__, args.numagents, args.capacity, args.pickupdelay, args.decisioninterval, envt.num_days_trained, test_score))
                        max_test_score = test_score if test_score > max_test_score else max_test_score

            envt.num_days_trained += 1

    # CHECK TEST SCORE
    # value_function_baseline = RewardPlusDelay(DELAY_COEFFICIENT=1e-7, log_dir=LOG_DIR)

    for day in TEST_DAYS:
        # Initialising agents
        initial_states = envt.get_initial_states(envt.NUM_AGENTS, is_training=False)
        agents = [LearningAgent(agent_idx, initial_state) for agent_idx, initial_state in enumerate(initial_states)]

        total_requests_served = run_epoch(envt, oracle, central_agent, value_function, day, is_training=False, agents_predefined=agents)
        print("\n(TEST) DAY: {}, Requests: {}\n\n".format(day, total_requests_served))
        value_function.add_to_logs('test_requests_served', total_requests_served, envt.num_days_trained)

        # total_requests_served = run_epoch(envt, oracle, central_agent, value_function_baseline, day, is_training=False, agents_predefined=agents)
        # print("\n(TEST) DAY: {}, Requests: {}\n\n".format(day, total_requests_served))
        # value_function_baseline.add_to_logs('test_requests_served', total_requests_served, envt.num_days_trained)

        envt.num_days_trained += 1
コード例 #11
0
from LearningAgent import LearningAgent
from importSVM import import_model

#Get the data and the cards, then generate the proper files to send to the Preference Learning Toolbox
la = LearningAgent()
la.process_and_save_data('all_data.csv','processed_deck_features.json','test.cdata','test.pdatas')
la.select_card('Ba3awX1yiTkSIlV7ESWD6Tw', 'WalAkS9-bTt-x2d7Y6RfDVA', 'WkTQ-d3JfSdeAMp2vQMwK6g')

#Get the RankSVM model from the toolbox by choosing the best parameters
#TODO: Add those parameters later

#Create a new Learning Agent based on the model generated using the Preference Learning Toolbox
##bot.learn_from_model('RankSVM.model')

#import_model('RankSVM.model', 'test.cdata')
コード例 #12
0
 def __init__(self, Qs, env):
     self.agents = [LearningAgent(Q, env.action_space[0].n) for Q in Qs]
     self.env = env
     self.current_episode = 0
     self.episodes_amount = 100