Exemplo n.º 1
0
 def __init__(self, evaluator, evaluable, **args):
     """ The evaluator must be an episodic task, an the evaluable must be a module. """
     assert isinstance(evaluator, EpisodicTask)
     assert isinstance(evaluable, Module)
     self.agent = self.subagent(evaluable.copy(), self.sublearner(**self.subargs))
     self.agent.learner.setAlpha(self.learningRate)
     self.agent.learner.gd.momentum = self.momentum
     self.agent.copy = lambda: self.agent
     self.module = evaluable.copy()
     def wrappedEvaluator(module):
         """ evaluate the internal agent (and changing its internal parameters),
         and then transferring the parameters to the outside basenet """
         self.agent.reset()
         res = 0.
         for dummy in range(self.learningBatches):
             res += evaluator(self.agent)
         res /= self.learningBatches
         self.agent.learn()
         module._setParameters(self.agent.module.params[:module.paramdim])
         # the performance is measured on a greedy run:
         res2 = evaluator(module)
         if self.verbose:
             print 'stoch', res, 'greedy', res2,
         return res2
     
     Learner.__init__(self, wrappedEvaluator, evaluable, **args)
Exemplo n.º 2
0
    def __init__(self, meta, layers=[], rate=.05, target=None, momentum=None, trans=None, wrange=100):
        Learner.__init__(self, meta, target)

        inputs = len(self.meta.names()) - 1
        _, possible = self.meta[self.target]
        self.outputs = possible
        self.net = Net([inputs] + layers + [len(possible)], rate=rate, momentum=momentum, wrange=wrange, trans=trans)
Exemplo n.º 3
0
    def __init__(self, evaluator, evaluable, **args):
        """ The evaluator must be an episodic task, an the evaluable must be a module. """
        assert isinstance(evaluator, EpisodicTask)
        assert isinstance(evaluable, Module)
        self.agent = self.subagent(evaluable.copy(),
                                   self.sublearner(**self.subargs))
        self.agent.learner.setAlpha(self.learningRate)
        self.agent.learner.gd.momentum = self.momentum
        self.agent.copy = lambda: self.agent
        self.module = evaluable.copy()

        def wrappedEvaluator(module):
            """ evaluate the internal agent (and changing its internal parameters),
            and then transferring the parameters to the outside basenet """
            self.agent.reset()
            res = 0.
            for dummy in range(self.learningBatches):
                res += evaluator(self.agent)
            res /= self.learningBatches
            self.agent.learn()
            module._setParameters(self.agent.module.params[:module.paramdim])
            # the performance is measured on a greedy run:
            res2 = evaluator(module)
            if self.verbose:
                print 'stoch', res, 'greedy', res2,
            return res2

        Learner.__init__(self, wrappedEvaluator, evaluable, **args)
Exemplo n.º 4
0
	def __init__(self, data, init_model, param):
		Learner.__init__(self, data, init_model, param)

		# print 'Hey, TheanoLearner is initializing!'
		### mapping from input data to continuous integer indices 
		# label_set['label'] = j
		self.label_map = get_label_map(self.labels)
		# feature_set['fea_id'] = k
		self.feature_map = get_feature_map(data.dat)
Exemplo n.º 5
0
 def __init__(self, name, selection_strategy):
     """
     Initializes a new incremental learner
     :param str name: The learner name
     :param SelectionStrategy selection_strategy: The selection strategy
     """
     Learner.__init__(self, "incremental_{}".format(name))
     self.selection_strategy = selection_strategy
     self.observer = observe.DispatchObserver()
Exemplo n.º 6
0
    def __init__(self, meta, rate, target=None):
        Learner.__init__(self, meta, target)

        length = len(meta.names())
        _, possible = meta[self.target]

        self.perceptrons = {}
        for truthy, falsy in itertools.combinations(possible, 2):
            self.perceptrons[(truthy, falsy)] = Perceptron(length, rate)
Exemplo n.º 7
0
 def __init__(self,
              id,
              quorum_size,
              is_leader=False,
              promised_id=None,
              accepted_id=None,
              accepted_value=None):
     Proposer.__init__(self, id, quorum_size, is_leader)
     Acceptor.__init__(self, id, promised_id, accepted_id, accepted_value)
     Learner.__init__(self, id, quorum_size)
Exemplo n.º 8
0
 def __init__(self,
              start,
              goal,
              Xrange,
              Vrange,
              num_actions=3,
              max_memory=500,
              hidden_size=200,
              learning_rate=.001,
              discount_factor=.99,
              epsilon=.1):
     Learner.__init__(self, start, goal, Xrange, Vrange, num_actions,
                      max_memory, hidden_size, learning_rate,
                      discount_factor, epsilon)
     self.model1, self.model2 = self.build_model(), self.build_model()
Exemplo n.º 9
0
 def __init__(self,
              kb,
              n=None,
              min_sup=1,
              sim=1,
              depth=4,
              target=None,
              use_negations=False,
              optimal_subclass=True):
     Learner.__init__(self,
                      kb,
                      n=n,
                      min_sup=min_sup,
                      sim=sim,
                      depth=depth,
                      target=target,
                      use_negations=use_negations)
Exemplo n.º 10
0
 def __init__(self, kb, n=None, min_sup=1, sim=1, depth=4, target=None,
              use_negations=False, optimal_subclass=True):
     Learner.__init__(self, kb, n=n, min_sup=min_sup, sim=sim, depth=depth,
                      target=target, use_negations=use_negations)
Exemplo n.º 11
0
 def __init__(self, rule_learner_name, max_hyperplanes_per_rule,
              max_terms_per_rule):
     Learner.__init__(self, "greedy_{}".format(rule_learner_name))
     self.max_hyperplanes_per_rule = max_hyperplanes_per_rule
     self.max_terms_per_rule = max_terms_per_rule
Exemplo n.º 12
0
Arquivo: pa.py Projeto: nachocano/asml
 def __init__(self, module_properties, dao):
   Learner.__init__(self, module_properties, dao, PassiveAggressiveClassifier(C=module_properties['C'], 
                   loss=module_properties['loss'], shuffle=False))
Exemplo n.º 13
0
 def __init__(self, module_properties, dao):
   Learner.__init__(self, module_properties, dao, SGDClassifier(loss=module_properties['loss'], penalty=module_properties['penalty'],
                   learning_rate=module_properties['step_policy'], eta0=module_properties['eta0'], average=module_properties['average'],
                   shuffle=False))
Exemplo n.º 14
0
 def __init__(self, k):
     Learner.__init__(self)
     self.k = k