示例#1
0
    def testValueBased(self):
        """ Test value-based learner.
        """
        mkt = SmartMarket(self.case)
        exp = MarketExperiment([], [], mkt)
        for g in self.case.generators:
            env = DiscreteMarketEnvironment([g], mkt)
            dim_state, num_actions = (10, 10)
            exp.tasks.append(ProfitTask(env, dim_state, num_actions))
            module = ActionValueTable(dim_state, num_actions)
            module.initialize(1.0)
#            module = ActionValueNetwork(dimState=1, numActions=4)
            learner = SARSA() #Q() QLambda()
#            learner.explorer = BoltzmannExplorer() # default is e-greedy.
            exp.agents.append(LearningAgent(module, learner))
        for _ in range(1000):
            exp.doInteractions(24) # interact with the env in batch mode
            for agent in exp.agents:
                agent.learn()
                agent.reset()
示例#2
0
 def testValueBased(self):
     """ Test value-based learner.
     """
     mkt = SmartMarket(self.case)
     exp = MarketExperiment([], [], mkt)
     for g in self.case.generators:
         env = DiscreteMarketEnvironment([g], mkt)
         dim_state, num_actions = (10, 10)
         exp.tasks.append(ProfitTask(env, dim_state, num_actions))
         module = ActionValueTable(dim_state, num_actions)
         module.initialize(1.0)
         #            module = ActionValueNetwork(dimState=1, numActions=4)
         learner = SARSA()  #Q() QLambda()
         #            learner.explorer = BoltzmannExplorer() # default is e-greedy.
         exp.agents.append(LearningAgent(module, learner))
     for _ in range(1000):
         exp.doInteractions(24)  # interact with the env in batch mode
         for agent in exp.agents:
             agent.learn()
             agent.reset()
示例#3
0
文件: rl.py 项目: ZiiCee/pylon
    """ Create an environment for each agent with an asset and a market. """
    env = ParticipantEnvironment(g, mkt, n_offbids=2)

    """ Create a task for the agent to achieve. """
    task = ProfitTask(env)

    """ Build an artificial neural network for the agent. """
    net = buildNetwork(task.outdim, task.indim, bias=False, outputbias=False)
    #    net._setParameters(array([9]))

    """ Create a learning agent with a learning algorithm. """
    agent = LearningAgent(module=net, learner=ENAC())
    """ Initialize parameters (variance). """
    #    agent.setSigma([-1.5])
    """ Set learning options. """
    agent.learner.alpha = 2.0
    # agent.learner.rprop = True
    agent.actaspg = False
    #    agent.disableLearning()

    agents.append(agent)
    tasks.append(task)

""" The Experiment will coordintate the interaction of the given agents and
their associated tasks. """
experiment = MarketExperiment(tasks, agents, mkt)
experiment.setRenderer(ExperimentRenderer())

""" Instruct the experiment to coordinate a set number of interactions. """
experiment.doInteractions(3)
示例#4
0
# Create the market and associate learning agents with each generator.
market = SmartMarket(case)

# Specify the discrete set of possible markups on marginal cost.
markups = (0.1, 0.2, 0.33, 0.5, 0.6, 0.75, 1.0)

# Specify the number of offers/bids each participant can submit.
numOffbids = 4

# Specify the desired number of discrete states.
dimState = 10

dimAction = len(markups) * numOffbids

# Construct an experiment to test the market.
experiment = MarketExperiment([], [], market)

# Add the agents and their tasks.
for g in case.generators:
    env = DiscreteMarketEnvironment([g], market, dimState, markups, numOffbids)
    task = ProfitTask(env)
    module = ActionValueTable(dimState, dimAction)
    module.initialize(1.0)
    #    learner = SARSA(gamma=0.9)
    learner = Q()
    #    learner = QLambda()
    #    learner.explorer = BoltzmannExplorer() # default is e-greedy.
    agent = LearningAgent(module, learner)

    agent.name = g.name
    experiment.tasks.append(task)
示例#5
0
文件: rl.py 项目: oosterden/pylon
mkt = SmartMarket(case)

agents = []
tasks = []
for g in bus1.generators:
    """ Create an environment for each agent with an asset and a market. """
    env = ParticipantEnvironment(g, mkt, n_offbids=2)
    """ Create a task for the agent to achieve. """
    task = ProfitTask(env)
    """ Build an artificial neural network for the agent. """
    net = buildNetwork(task.outdim, task.indim, bias=False, outputbias=False)
    #    net._setParameters(array([9]))
    """ Create a learning agent with a learning algorithm. """
    agent = LearningAgent(module=net, learner=ENAC())
    """ Initialize parameters (variance). """
    #    agent.setSigma([-1.5])
    """ Set learning options. """
    agent.learner.alpha = 2.0
    # agent.learner.rprop = True
    agent.actaspg = False
    #    agent.disableLearning()

    agents.append(agent)
    tasks.append(task)
""" The Experiment will coordintate the interaction of the given agents and
their associated tasks. """
experiment = MarketExperiment(tasks, agents, mkt)
experiment.setRenderer(ExperimentRenderer())
""" Instruct the experiment to coordinate a set number of interactions. """
experiment.doInteractions(3)
示例#6
0
文件: auction.py 项目: Waqquas/pylon
# Create the market and associate learning agents with each generator.
market = SmartMarket(case)

# Specify the discrete set of possible markups on marginal cost.
markups = (0.1, 0.2, 0.33, 0.5, 0.6, 0.75, 1.0)

# Specify the number of offers/bids each participant can submit.
numOffbids = 4

# Specify the desired number of discrete states.
dimState = 10

dimAction = len(markups) * numOffbids

# Construct an experiment to test the market.
experiment = MarketExperiment([], [], market)

# Add the agents and their tasks.
for g in case.generators:
    env = DiscreteMarketEnvironment([g], market, dimState, markups, numOffbids)
    task = ProfitTask(env)
    module = ActionValueTable(dimState, dimAction)
    module.initialize(1.0)
#    learner = SARSA(gamma=0.9)
    learner = Q()
#    learner = QLambda()
#    learner.explorer = BoltzmannExplorer() # default is e-greedy.
    agent = LearningAgent(module, learner)

    agent.name = g.name
    experiment.tasks.append(task)