コード例 #1
0
ファイル: skann.py プロジェクト: maksim171/StockDemo
def begin1():

    cbf = readFromCsv("cbf2")
    numdataset = np.array(cbf, dtype=np.float64)
    #训练数据,验证数据,今天的数据
    tgdataset, vadataset, tydata = dataSplit(numdataset)
    #归一的参数
    gydata, dmean, dstd = gyData(tgdataset)

    #验证和今天的数据
    gyvadata = calFeature(vadataset, dmean, dstd)
    gytydata = calFeature(tydata, dmean, dstd)

    #神经网络
    trainingset = buildTrainingSet(gydata)

    for i in range(1000):
        net = buildNetwork(15,
                           8,
                           1,
                           bias=True,
                           hiddenclass=TanhLayer,
                           outclass=TanhLayer)
        trainer = BackpropTrainer(net, trainingset)
        trainer.trainEpochs(epochs=100)
        rate = va.calRightRate(gyvadata, net)
        if rate > 0.6:
            NetworkWriter.writeToFile(
                net, '../netv3/zxtx_8l_100t_6_' + str(rate) + ".xml")
            print(va.calRightRate(gyvadata, net))
            print(va.calRightRate(gytydata, net))
        print(str(i) + " times " + str(rate))


# begin1();
コード例 #2
0
def main (  ) :

    # criando os dados para treino
    datasetTreino = montaDados ()

    # criando os dados para teste
    datasetTeste = montaDados()

    # definindo a estrutura de como será a rede neural
    # a entrada será a dimensão de entrada do dataset = 3
    # terá 6 neurônios na primeira camada intermediária
    # terá 6 neurônios na segunda camada escondida
    # terá como dimensão de saída o tamanho do dado de saída = 1
    # terá a função de autocorreção para melhor adaptação da rede
    network = buildNetwork( datasetTreino.indim, 12, 6, datasetTreino.outdim, bias=True )

    # criando a rede neural
    # terá como estrutura de rede neural definida no objeto network
    # utilizará os dados do dataset para treino
    neuralNetwork = BackpropTrainer ( network, datasetTreino, learningrate=0.01, momentum=0.9 )

    # treinando a rede
    neuralNetwork.trainEpochs ( 1500 )

    # validando a rede
    neuralNetwork.testOnData ( datasetTeste, verbose=True )
コード例 #3
0
    def train(self, players=2, games=300, epochs=50, print_fitness=False):
        """
        nn = Coup_NN()
        nn.train(2, print_fitness=True)
        """
        from pybrain.tools.shortcuts import buildNetwork
        from pybrain import SigmoidLayer
        from pybrain.supervised.trainers.backprop import BackpropTrainer
        from pybrain.datasets import SupervisedDataSet
        from simulations import simulations
        from collections import Counter
        
        INPUT_NEURONS_PER_PLAYER = 5
        OUTPUT_NEURONS = 5
        HIDDEN_NEURONS = 10
    
        ds = SupervisedDataSet(players * INPUT_NEURONS_PER_PLAYER, OUTPUT_NEURONS)
        self.NETS[players] = buildNetwork(players * INPUT_NEURONS_PER_PLAYER, HIDDEN_NEURONS, OUTPUT_NEURONS, bias=True, outputbias= True, hiddenclass=SigmoidLayer)
        trainer = BackpropTrainer(self.NETS[players], ds, learningrate= 0.1)
        WINS = []
        POSITIONS = []
    
        for _ in range(games):
            game_result = simulations.duel(Play_Coup(2))
            WINS.append(game_result.winner.alpha)
            POSITIONS.append(game_result.influence_binary)
            ds.addSample(game_result.influence_binary, game_result.winner.influence_binary)        
    
        trainer.trainEpochs(epochs)

        if print_fitness:
            norm_results = dict(Counter(WINS).most_common())
            nn_results = dict(Counter(self.game_winner(self.NETS[players].activate(p)) for p in POSITIONS).most_common())
    
            print(''.ljust(25), 'normal', 'nn')
            for pair in set(nn_results.keys() + norm_results.keys()):
                print(pair.ljust(25), str(norm_results.get(pair,0)).ljust(6), str(nn_results.get(pair,0)).ljust(6))
            
        with open('coup_nn-{0}'.format(players), 'w') as neunet:
            pickle.dump(self.NETS[players], neunet)
コード例 #4
0
ファイル: secondtraining.py プロジェクト: maksim171/StockDemo
def begin2():

    cbf = readFromCsv("cbf2")
    numdataset = np.array(cbf, dtype=np.float64)
    #训练数据,验证数据,今天的数据
    tgdataset, vadataset, tydata = dataSplit(numdataset)
    #归一的参数
    gydata, dmean, dstd = gyData(tgdataset)

    #验证和今天的数据
    gyvadata = calFeature(vadataset, dmean, dstd)
    gytydata = calFeature(tydata, dmean, dstd)

    tset = buildTrainingSet(gyvadata)

    net = NetworkReader.readFrom("../netv3/zxtx_8l_100t_6_0.785714285714.xml")
    trainer = BackpropTrainer(net, tset)
    trainer.trainEpochs(epochs=100)

    li = []
    for ele in gytydata[0]:
        li.append(ele)

    print(dec2int(net.activate(li[:-1])))
コード例 #5
0
ファイル: parityrnn.py プロジェクト: wsgan001/AI
        net.reset()
        for i, t in seq:
            res = net.activate(i)
            if verbose:
                print(t, res)
            r += sum((t-res)**2)
            samples += 1
        if verbose:
            print('-'*20)
    r /= samples
    if not silent:
        print('MSE:', r)
    return r

if __name__ == "__main__":
    N = buildParityNet()
    DS = ParityDataSet()
    evalRnnOnSeqDataset(N, DS, verbose = True)
    print('(preset weights)')
    N.randomize()
    evalRnnOnSeqDataset(N, DS)
    print('(random weights)')


    # Backprop improves the network performance, and sometimes even finds the global optimum.
    N.reset()
    bp = BackpropTrainer(N, DS, verbose = True)
    bp.trainEpochs(5000)
    evalRnnOnSeqDataset(N, DS)
    print('(backprop-trained weights)')
コード例 #6
0
ファイル: skann.py プロジェクト: maksim171/StockDemo
    return ls;



#读入字符串
dataset = readFromCsv("cbf");
#化为float
numdataset = np.array(dataset,dtype=np.float64);
#原始分割为两组
trainingset,vdataset = dataSplit(numdataset);
# print(len(trainingset),len(vdataset));
#分别归一化
gytdataset = gyData(trainingset);
gyvdataset = gyData(vdataset);




#下面的是训练神经网络

# #最终的训练集,用归一化的数据来构成训练集
bts = buildTrainingSet(gytdataset);
# ll = [3382.9879,3384.0262,3358.7953,3373.3446,179423841,2.31148615058,4.4,4.4,4.35,4.36,0.4556,4518585,19794038.0,4363744000.0,4363744000.0];
# print(calTodayFeature(ll,trainingset));
net = buildNetwork(15, 4, 2, bias=True,hiddenclass=SigmoidLayer,outclass=SigmoidLayer)
trainer = BackpropTrainer(net, bts)
trainer.trainEpochs(epochs=100);
NetworkWriter.writeToFile(net, '../net/jxkj_4l_100t.xml')
#

print(ve.calRightRate(gyvdataset,net));
コード例 #7
0
ファイル: testComparison.py プロジェクト: lixiangnlp/cybrain
#PYBRAIN
from pybrain.tools.shortcuts import buildNetwork
from pybrain import LinearLayer, SigmoidLayer, FeedForwardNetwork, FullConnection, BiasUnit, SoftmaxLayer
from pybrain.supervised.trainers.backprop import BackpropTrainer
from pybrain.structure.modules.tanhlayer import TanhLayer
from pybrain.datasets import SupervisedDataSet

ds = SupervisedDataSet(2, 1)

ds.addSample((0, 0), (0, ))
ds.addSample((0, 1), (1, ))
ds.addSample((1, 0), (1, ))
ds.addSample((1, 1), (0, ))

net = buildNetwork(2,
                   2,
                   1,
                   bias=True,
                   outputbias=True,
                   hiddenclass=SigmoidLayer)
trainer = BackpropTrainer(net, ds, learningrate=0.1)

t1 = time()
trainer.trainEpochs(2000)
print "Time PyBrain {}".format(time() - t1)

#PRINT RESULTS
for x in X:
    print "{} ==> {}".format(x, net.activate(x))
コード例 #8
0
# 将数据扁平化
X = datasets.reshape((datasets.shape[0], datasets.shape[1] * datasets.shape[2]))
X_train ,X_test, y_train, y_test = train_test_split(X, y, train_size=0.9)

# 添加数据到数据格式中
training =SupervisedDataSet(X.shape[1], y.shape[1])
for i in range(X_train.shape[0]):
    training.addSample(X_train[i], y_train[i])
testing = SupervisedDataSet(X.shape[1], y.shape[1])
for i in range(X_test.shape[0]):
    testing.addSample(X_test[i], y_test[i])

# 搭建三层网络
net = buildNetwork(X.shape[1], 150, y.shape[1] ,bias=True)
# 使用BP算法
trainer = BackpropTrainer(net, training ,weightdecay=0.01)
# 训练步数
trainer.trainEpochs(epochs=50)
# 保存模型
# model_filename = open('CAPTCHA_predictor.model','wb')
# pickle.dump(trainer,model_filename,0)
# model_filename.close()

predictions = trainer.testOnClassData(dataset=testing)

from sklearn.metrics import f1_score,classification_report
print(classification_report(y_test.argmax(axis=1), predictions))


コード例 #9
0
fnn = buildNetwork(traindata.indim, 5, traindata.outdim, outclass = SoftmaxLayer)

trainer = BackpropTrainer(fnn, dataset=traindata, momentum=0.1, verbose=True, weightdecay=0.01)

ticks = arange(-3., 6., 0.2)
X, Y = meshgrid(ticks, ticks)

griddata = ClassificationDataSet(2, 1, nb_classes=3)
for i in range(X.size):
    griddata.addSample([X.ravel()[i],Y.ravel()[i]], [0])

griddata._convertToOneOfMany()

for i in range(20):
    trainer.trainEpochs(1) # usually 5
    
trainresult = percentError(trainer.testOnClassData(), traindata["class"])
testresult = percentError(trainer.testOnClassData(), testdata["class"])

print("epoch %4d" % trainer.totalepochs, "trainerror %5.2f%%" % trainresult, "testerror %5.2f%%" % testresult)
    
out = fnn.activateOnDataset(griddata)
out = out.argmax(axis=1)
out = out.reshape(X.shape)

figure(1)
 # might be the wrong import for the following lines
ioff()  
clf()
hold(True)
コード例 #10
0
class AgentNTD(AgentNeural):
    def __init__(self, state_class, load_knowledge=False):
        super(AgentNTD, self).__init__(state_class,
                                       NTD_NUM_OUTPUTS,
                                       init_weights=NTD_NETWORK_INIT_WEIGHTS)

        # Predicting separate state-action values for white and black only makes
        # sense when training against self.
        if NTD_NUM_OUTPUTS == 2:
            assert TRAIN_BUDDY == TRAIN_BUDDY_SELF

        self.trainer = BackpropTrainer(self.network,
                                       learningrate=NTD_LEARNING_RATE,
                                       momentum=0.0,
                                       verbose=False)

        self.epsilon = NTD_EPSILON
        self.lamda = NTD_LAMBDA
        self.alpha = NTD_ALPHA
        self.gamma = NTD_GAMMA

        # self.state_str = None
        # self.state_in = None
        self.last_state_str = None
        # self.last_state_in = None
        self.last_action = None
        # Since we apply updates with one step delay, need to remember Whether
        # the action in previous time step was exploratory.
        self.was_last_action_random = False

        self.processed_final_reward = False
        self.episode_traj = ''

        self.is_learning = True
        self.e = {}
        self.updates = {}
        # astar_value[s'] = argmax_b Q(s', b) for undetermined roll.
        self.astar_value = {}
        # Used for alpha annealing.  Note that the roll value that is recorded
        # reflects the roll chosen by the agent, not the original random roll.
        # So, including the roll makes sense for calculating the updates, which
        # are based on the action chosen by the agent.  But it doesn't make
        # sense for epsilon annealing, which is calculated before the agent
        # is asked to take an action.
        self.visit_count = {}  # Example key: (w-5-1, action)
        # Used for epsilon annealing.
        self.visit_count_no_roll = {}  # Example key: w-5
        self.visited_in_episode = {}
        self.network_inputs = {}
        self.network_outputs = {}

        # Recording value of intersting states over time.
        self.num_training_games = 0
        self.value_tracker_file = None
        # network_predictions are gathered at the end of each iteration to
        # produce reports.
        self.network_predictions = {}

        # TODO: Merge this functionality with COLLECT_STATS logic.
        self.traj_count = {}

        if load_knowledge:
            raise ValueError('AgentNTD does not support load_knowledge.')
            # self.is_learning = False

    def begin_episode(self):
        self.e = {}
        self.astar_value = {}
        self.updates = {}
        if self.is_learning:
            self.network_outputs = {}
        self.visited_in_episode = {}
        # self.state_str = None
        # self.state_in = None
        self.last_state_str = None
        # self.last_state_in = None
        self.last_action = None
        self.processed_final_reward = False
        self.episode_traj = ''

    def end_episode(self, reward):
        if self.is_learning and not self.processed_final_reward:
            if TRAIN_BUDDY == TRAIN_BUDDY_SELF:
                # Ignore the reward parameter and construct own reward signal
                # corresponding to the probability of white winning.
                rewards = self.compute_values_for_final_state(self.state)
                # winner = other_player(self.state.player_to_move)
                # if winner == PLAYER_WHITE:
                #     rewards = np.array([REWARD_WIN, REWARD_LOSE])
                # else:
                #     rewards = np.array([REWARD_LOSE, REWARD_WIN])
                #
                # if self.outputdim == 1:
                #     rewards = rewards[:1]
            else:
                rewards = np.array([reward])

            self.ntd_step(action=None, is_action_random=False, rewards=rewards)

            if PRINT_GAME_RESULTS:
                print 'Episode traj: %s' % self.episode_traj
            self.traj_count[self.episode_traj] = self.traj_count.get(
                self.episode_traj, 0) + 1

            self.apply_updates()
            self.processed_final_reward = True

    def update_values(self, delta):
        # Number of elements in delta depends on NTD_NUM_OUTPUTS.
        if all(v == 0 for v in delta):  # If aLL elements in delta are zero.
            return
        alpha = self.alpha
        for (si, ai) in self.e.iterkeys():
            if NTD_USE_ALPHA_ANNEALING:
                alpha = 1.0 / self.visit_count.get((si, ai), 1)
                alpha = max(alpha, NTD_ALPHA)
            if self.e[(si, ai)] != 0.0:
                change = [alpha * x * self.e[(si, ai)] for x in delta]
                # network_in = self.network_inputs[si]
                current_update = self.updates.get((si, ai),
                                                  [0.0] * self.outputdim)
                self.updates[(si, ai)] = [
                    a + b for a, b in zip(current_update, change)
                ]

    def apply_updates(self):
        dataset = SupervisedDataSet(self.inputdim, self.outputdim)
        for (si, ai) in self.updates.iterkeys():
            si_ai = '%s-%s' % (si, ai)
            network_in = self.network_inputs[si_ai]
            current_value = self.get_network_value(None, None, si_ai)
            new_value = [
                a + b for a, b in zip(current_value, self.updates[(si, ai)])
            ]
            dataset.addSample(network_in, new_value)
            if PRINT_GAME_RESULTS:
                print 'updating (%s, %s) from %s to %s' % (
                    si, ai, map(PrettyFloat,
                                current_value), map(PrettyFloat, new_value))
        # import pdb; pdb.set_trace()
        if dataset:  # len(dataset) > 0:
            self.trainer.setData(dataset)
            self.trainer.trainEpochs(NTD_TRAIN_EPOCHS)
        # print '----'

    def compute_values_for_final_state(self, state):
        if state.has_player_won(PLAYER_WHITE):
            values = np.array([REWARD_WIN, REWARD_LOSE])
        else:
            values = np.array([REWARD_LOSE, REWARD_WIN])

        if self.outputdim == 1:
            values = values[:1]

        return values

    def get_Q_value(self, state, action):
        """Returns state-action value.

        Args:
            state: State for which value is requested.
            action: Action for which value is requested.


        Returns:
            List containing NTD_NUM_OUTPUTS elements.
            When NTD_NUM_OUTPUTS == 1, one-dimensional return value can be
            intepretted as [p_w] showing the probability for white winning.
            When NTD_NUM_OUTPUTS == 2, the two-dimensional return value can be
            intepretted as [p_w, p_b] showing probabilities for white or black
            winning.
        """
        if state.is_final():
            # The algorithm never trains the network on final states, so it
            # cannot know their values.  Need to retrieve the value of final
            # states directly.
            values = self.compute_values_for_final_state(state)
        else:
            network_out = self.get_network_value(state, action)
            values = network_out
        # # If player to move is white, it means black is considering a move
        # # outcome, so black is evaluating the position.
        # if state.player_to_move == PLAYER_WHITE:
        #     multiplier = -1.0
        # else:
        #     multiplier = 1.0
        # return multiplier * state_value
        return values
        # if state.player_to_move == PLAYER_WHITE:
        #    return network_out[1]
        # else:
        #    return network_out[0]

    # def cache_network_values(self, state):
    #     state_str = str(state)[:-2]
    #     if state_str not in self.network_inputs:
    #         self.network_inputs[state_str] = state.encode_network_input()
    #     network_in = self.network_inputs[state_str]
    #     if state_str not in self.network_outputs:
    #         self.network_outputs[state_str] = self.network.activate(network_in)

    # This function needs to receive the actual state object, because it needs
    # to calculate the corresponding network inputs for it.
    def get_network_value(self, state, action, state_action_str=None):
        if state_action_str:
            assert state is None
            assert action is None
            assert state_action_str in self.network_outputs
            return self.network_outputs[state_action_str]
        else:
            state_action_str = '%s-%s' % (state, action)
            if state_action_str in self.network_outputs:
                return self.network_outputs[state_action_str]
            if state_action_str not in self.network_inputs:
                self.network_inputs[
                    state_action_str] = state.encode_network_input(action)
            network_in = self.network_inputs[state_action_str]
            self.network_outputs[state_action_str] = self.network.activate(
                network_in)
            return self.network_outputs[state_action_str]

    def ntd_step(self, action, is_action_random, rewards=None):
        """Updates the underlying model after every transition.

        This method is called in self.select_action() and self.end_episode().

        Args:
            action: Action taken by the agent.
            is_action_random: Whether action was an exploratory action.
            rewards: List of reward components received from the environment.

        Returns:
            None
        """
        if rewards is None:
            rewards = [0.0] * self.outputdim
        assert len(rewards) == self.outputdim

        s = self.last_state_str
        a = self.last_action
        sp = self.state
        ap = action

        # state_str_no_roll = str(self.state)[:-2]
        if action is None:
            self.episode_traj += ' -> %s.' % str(self.state)
        else:
            self.episode_traj += ' -> %s, %s' % (str(self.state), action)

        if s is not None:
            # update e
            if ALGO == ALGO_Q_LEARNING and self.was_last_action_random:
                # Q(lambda): Set all traces to zero.
                self.e = {}
            else:
                for key in self.e.iterkeys():
                    self.e[key] *= (self.gamma * self.lamda)

            # replacing traces
            self.e[(s, a)] = 1.0
            # set the trace for the other actions to 0
            for other_action in self.state.action_object.get_all_actions():
                if other_action != a:
                    if (s, other_action) in self.e:
                        self.e[(s, other_action)] = 0

            s_a = '%s-%s' % (s, a)
            if self.state.is_final():
                # delta = reward - self.Q.get((s, a), self.default_q)
                # print 'Shouldn\'t happen'
                # if self.is_learning:
                #     import pdb; pdb.set_trace()
                delta = rewards - self.get_network_value(None, None, s_a)
            else:
                # delta = (reward + self.gamma * self.Q.get((sp, ap), self.default_q) -
                #                   self.Q.get((s, a), self.default_q))
                # delta = (reward + self.gamma * self.get_network_value(sp_in) -
                #                   self.get_network_value(s_in))
                # In our domains, only the very last state transition receives
                # a reward.
                assert all(v == 0 for v in rewards)
                if ALGO == ALGO_SARSA:
                    # Just consider the action we took in sp.
                    next_state_v = self.get_network_value(sp, ap)
                elif ALGO == ALGO_Q_LEARNING:
                    # Consider the best we could do from sp.
                    next_state_v = self.astar_value[str(sp)
                                                    [:-2]]  # state_str_no_roll

                delta = (rewards + self.gamma * next_state_v -
                         self.get_network_value(None, None, s_a))

            self.update_values(delta)
        else:
            # Just cache the value of current state-action, so we can access
            # it on the next call to this method, when it's requested as s_a.
            self.get_network_value(sp, ap)

        # save visited state and chosen action
        self.last_state_str = str(self.state)
        # self.last_state_in = self.state_in
        self.last_action = action
        self.was_last_action_random = is_action_random
        if action is not None:  # end_episode calls this with action=None.
            key = (self.last_state_str, self.last_action)
            if key not in self.visited_in_episode:
                self.visit_count[key] = self.visit_count.get(key, 0) + 1
            self.visited_in_episode[key] = True

    def select_action(self):
        # self.last_played_as = self.state.player_to_move
        # self.cache_network_values(self.state)
        # self.state_str = str(self.state)[:-2]

        # if self.state_str not in self.network_inputs:
        #     self.network_inputs[self.state_str] = self.encode_network_input(self.state)
        # self.state_in = self.network_inputs[self.state_str]

        if self.is_learning:
            if NTD_USE_EPSILON_ANNEALING:
                # Since under some conditions the current roll can be entirely
                # ignored (--chooseroll=1.0), it makes sense to exclude the
                # current roll from visit counts.
                state_str_no_roll = str(self.state)[:-2]
                self.visit_count_no_roll[state_str_no_roll] = (
                    self.visit_count_no_roll.get(state_str_no_roll, 0) + 1)
                # Following logic with example: anneal_time = 100, visit_count = 5.
                time_to_end = max(
                    0, NTD_EPSILON_ANNEAL_TIME -
                    self.visit_count_no_roll.get(state_str_no_roll, 0))
                ratio = float(time_to_end) / NTD_EPSILON_ANNEAL_TIME  # 0.95
                epsilon = NTD_EPSILON_END + (NTD_EPSILON_START -
                                             NTD_EPSILON_END) * ratio
                # print "State: %s, visits: %d, time_to_end: %d, ratio: %.2f, epsilon: %.2f" % (
                #     state_str, self.visit_count.get(state_str, 0), time_to_end, ratio, epsilon)
            else:
                epsilon = self.epsilon
        else:
            epsilon = 0

        choose_random_action = True if random.random() < epsilon else False

        # Select the best action.
        action, _ = self.select_action_with_search(
            state=self.state,
            choose_random_action=choose_random_action,
            plies=NTD_SEARCH_PLIES)

        # Update values.
        if self.is_learning:
            self.ntd_step(action, is_action_random=choose_random_action)

        return action

    # def save_knowledge(self):
    #
    #     filename = './td-network.txt' % Domain.name
    #     f = open(filename, 'w')
    #     pickle.dump(self.network, f)
    #     f.close()
    #
    # def load_knowledge(self):
    #     filename = './td-network-%s.txt' % Domain.name
    #     f = open(filename, 'r')
    #     self.network = pickle.load(f)
    #     f.close()

    def pause_learning(self):
        self.is_learning = False

    def resume_learning(self):
        self.is_learning = True

    def print_e(self):
        e_keys = self.e.keys()
        e_keys.sort()
        print "e:"
        for key in e_keys:
            print "e%s -> %.10f" % (key, self.e[key])

    def print_visit_count(self):
        print "Visit Counts:"
        # keys = self.visit_count.keys()
        # keys.sort()
        # for key in Q_keys:
        #     print "Q%s -> %.2f" % (key, self.Q[key])
        for key, value in sorted(self.visit_count.iteritems(),
                                 key=lambda (k, v): (v, k)):
            print "%s: %s" % (key, value)

    def probe_network(self):
        exp_params = ExpParams.get_exp_params_from_command_line_args()
        graph = exp_params.state_class.GAME_GRAPH

        print "Network predictions:"
        self.network_predictions = {}  # Network predictions.
        true_values = {
        }  # True values obtained from the graph using value iteration.
        for state_roll_action_str in sorted(self.network_inputs.iterkeys()):
            # state_value = self.network_outputs[state_str]
            state_roll_action_value = self.network.activate(
                self.network_inputs[state_roll_action_str])
            self.network_predictions[
                state_roll_action_str] = state_roll_action_value
            node_id = graph.get_node_id(
                state_roll_action_str[:-4])  # Removes roll and action.
            true_value = graph.get_attr(node_id, VAL_ATTR)
            true_values[state_roll_action_str] = true_value
            # print "%s -> %s (%.2f)" % (state_str, state_value, abs_value)
        for (si, ai), _ in sorted(self.visit_count.iteritems(),
                                  key=lambda (k, v): (v, k)):
            state_roll_action_str = '%s-%s' % (si, ai)
            true_value = true_values[state_roll_action_str]
            # Reward for white win is [1, 0],
            # Reward for black win is [0, 1],
            # state_value[0] - state_value[1] ranges from -1 to +1, although
            # it can exceed those bounds when the network outputs are
            # outside the range [0, 1].
            # The following formula is meant to scale the difference to range [0, 1].
            print "(%s, %s): opt. val. for white: %+.2f prediction: %s visited: %d" % (
                si, ai, true_value,
                map(PrettyFloat,
                    self.network_predictions[state_roll_action_str]),
                self.visit_count.get((si, ai), 0))
        print(
            'Note: optimal values for white are based on the board '
            'positions only and ignore the current roll.')

    def track_interesting_states(self):
        interesting_states = self.state.interesting_states()
        if interesting_states:
            if not self.value_tracker_file:
                value_tracker_filename = (
                    self.state.exp_params.get_value_tracker_filename(
                        FILE_PREFIX_NTD))
                self.value_tracker_file = open(value_tracker_filename, 'w')
            self.num_training_games += NTD_NUM_TRAINING_GAMES
            self.value_tracker_file.write('%d' % self.num_training_games)
            for s in interesting_states:
                s_val = self.network_predictions[s][
                    0] if s in self.network_predictions else 0.5
                self.value_tracker_file.write(' %f' % s_val)
            self.value_tracker_file.write('\n')
            self.value_tracker_file.flush()

    def print_traj_counts(self):
        print "Trajectories in training:"
        import operator
        sorted_traj_count = sorted(self.traj_count.items(),
                                   key=operator.itemgetter(1),
                                   reverse=True)
        for traj, cnt in sorted_traj_count:
            print "%s: %d" % (traj, cnt)
        # Reset after each query.
        self.traj_count = {}

    def print_learner_state(self):
        self.print_visit_count()
        self.print_e()
        self.probe_network()
        self.print_traj_counts()
コード例 #11
0
ファイル: run_pretrain.py プロジェクト: hawkerpl/odyseus
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers.backprop import BackpropTrainer
from odyseus_model import OdyseusRecursiveTwoThrusters
import numpy as np
import optparse
from tools.common_run import add_common_options

if __name__ == "__main__":
    usage = "python run_pretrain.py resulting_file.neu \n this script pretrain net of model described in|" \
            " OdyseusRecursiveTwoThrusters, for 5 input nodes. Resulting network is saved to desired file"
    parser = optparse.OptionParser(usage=usage)
    opts, args = parser.parse_args()
    ds = SupervisedDataSet(5, 2)
    ds.addSample((50,50,50,0,0),(0,1))
    ds.addSample((0,50,50,50,0),(1,1))
    ds.addSample((0,0,50,0,0), (1,1))
    ds.addSample((0,0,50,50,50),(1,0))
    net = OdyseusRecursiveTwoThrusters.random_net()
    trainer = BackpropTrainer(net, ds)
    trainer.trainEpochs(3600)
    np.savetxt(args[0], trainer.module.params, delimiter=',')
コード例 #12
0
irisData = datasets.load_iris()
dataFeatures = irisData.data
dataTargets = irisData.target

#plt.matshow(irisData.images[11], cmap=cm.Greys_r)
#plt.show()
#print dataTargets[11]
#print dataFeatures.shape

dataSet = ClassificationDataSet(4, 1 , nb_classes=3)

for i in range(len(dataFeatures)):
	dataSet.addSample(np.ravel(dataFeatures[i]), dataTargets[i])
	
trainingData, testData = splitWithProportion(dataSet,0.7)

trainingData._convertToOneOfMany()
testData._convertToOneOfMany()

neuralNetwork = buildNetwork(trainingData.indim, 7, trainingData.outdim, outclass=SoftmaxLayer) 
trainer = BackpropTrainer(neuralNetwork, dataset=trainingData, momentum=0.01, learningrate=0.05, verbose=True)

trainer.trainEpochs(10000)
print('Error (test dataset): ' , percentError(trainer.testOnClassData(dataset=testData), testData['class']))

print('\n\n')
counter = 0
for input in dataFeatures:
	print(counter," output is according to the NN: ", neuralNetwork.activate(input))
	counter = counter + 1
コード例 #13
0
ファイル: Practica4_NN.py プロジェクト: nilde/Prctica3APC
def trainm_NN(x1, x2, parametresNN, actualNet):

    start_time = time.time()

    # prepare data
    x1 = map(list, x1)
    x2 = map(list, x2)
    X = x1 + x2
    print shape(X)
    y1 = ones((shape(x1)[0], 1))  # els positius son la classe '1'
    y2 = -1 * ones((shape(x2)[0], 1))  # els negatius son la classe '-1'
    Y = list(y1) + list(y2)
    Y = ravel(Y)

    #-----RED NUMERO 1

    n1 = FeedForwardNetwork()
    inLayer = TanhLayer(199)
    hiddenLayer1 = TanhLayer(40)
    hiddenLayer2 = LinearLayer(3)
    hiddenLayer3 = TanhLayer(5)
    outLayer = TanhLayer(1)

    n1.addInputModule(inLayer)
    n1.addModule(hiddenLayer1)
    n1.addModule(hiddenLayer2)
    n1.addModule(hiddenLayer3)
    n1.addOutputModule(outLayer)

    in_to_hidden1 = FullConnection(inLayer, hiddenLayer1)
    hidden1_to_hidden2 = FullConnection(hiddenLayer1, hiddenLayer2)
    hidden2_to_hidden3 = FullConnection(hiddenLayer2, hiddenLayer3)
    hidden3_to_out = FullConnection(hiddenLayer3, outLayer)

    n1.addConnection(in_to_hidden1)
    n1.addConnection(hidden1_to_hidden2)
    n1.addConnection(hidden2_to_hidden3)
    n1.addConnection(hidden3_to_out)

    n1.sortModules()  #Crida necesaria init de moduls interns

    #-----RED NUMERO 2

    n2 = FeedForwardNetwork()

    inLayer = TanhLayer(199)
    hiddenLayer1 = TanhLayer(12)
    hiddenLayer2 = LinearLayer(3)
    outLayer = LinearLayer(1)

    n2.addInputModule(inLayer)
    n2.addModule(hiddenLayer1)
    n2.addModule(hiddenLayer2)
    n2.addOutputModule(outLayer)

    in_to_hidden1 = FullConnection(inLayer, hiddenLayer1)
    hidden1_to_hidden2 = FullConnection(hiddenLayer1, hiddenLayer2)
    hidden2_to_out = FullConnection(hiddenLayer2, outLayer)

    n2.addConnection(in_to_hidden1)
    n2.addConnection(hidden1_to_hidden2)
    n2.addConnection(hidden2_to_out)

    n2.sortModules()  #Crida necesaria init de moduls interns

    #-----RED NUMERO 3

    n3 = FeedForwardNetwork()

    inLayer = TanhLayer(199)
    hiddenLayer1 = TanhLayer(27)
    hiddenLayer2 = TanhLayer(5)
    outLayer = LinearLayer(1)

    n3.addInputModule(inLayer)
    n3.addModule(hiddenLayer1)
    n3.addModule(hiddenLayer2)
    n3.addOutputModule(outLayer)

    in_to_hidden1 = FullConnection(inLayer, hiddenLayer1)
    hidden1_to_hidden2 = FullConnection(hiddenLayer1, hiddenLayer2)
    hidden2_to_out = FullConnection(hiddenLayer2, outLayer)

    n3.addConnection(in_to_hidden1)
    n3.addConnection(hidden1_to_hidden2)
    n3.addConnection(hidden2_to_out)

    n3.sortModules()  #Crida necesaria init de moduls interns

    #-----RED NUMERO 4 (implementar RELU)

    #Selection of the net that gonna be trained
    if (actualNet == 1):
        n = n1
    elif (actualNet == 2):
        n = n2
    elif (actualNet == 3):
        n = n3
    else:
        #TODO
        n = n1
    #Initialization weights
    if (actualNet == 1):
        r = math.sqrt(1 / ((199 + 70 + 3 + 5 + 1) * (1.0)))
        sizeOfNet = 199 * 70 + 70 * 3 + 3 * 5 + 5
    elif (actualNet == 2):
        r = math.sqrt(1 / ((199 + 12 + 3 + 1) * (1.0)))
        sizeOfNet = 199 * 12 + 12 * 3 + 3
    elif (actualNet == 3):
        r = math.sqrt(1 / ((199 + 27 + 5 + 1) * (1.0)))
        sizeOfNet = 199 * 27 + 27 * 5 + 5
    else:
        #TODO
        r = math.sqrt(1 / ((199 + 40 + 3 + 5 + 1) * (1.0)))
        sizeOfNet = 199 * 40 + 40 * 3 + 3 * 5 + 5

    weights_init = random.uniform(low=-r, high=r, size=(sizeOfNet, ))
    n._setParameters(weights_init)
    DS = ClassificationDataSet(199, nb_classes=1)
    for i in range(shape(X)[0]):
        DS.addSample(list(X[i]), Y[i])

    #DS._convertToOneOfMany() # No -> volem nomes una sortida
    #DS.setField('class', DS.getField('target'))

    trainer = BackpropTrainer(n,
                              dataset=DS,
                              momentum=parametresNN['Momentum'],
                              learningrate=parametresNN['learningrate'],
                              verbose=parametresNN['verbose'],
                              weightdecay=parametresNN['weightdecay'],
                              batchlearning=parametresNN['batchlearning'])
    trainningErrors, validationErrors = trainer.trainUntilConvergence(
        maxEpochs=parametresNN['maxEpochs'])
    f = trainer.trainEpochs(parametresNN['maxEpochs'])

    print "Red Activa: ", actualNet, "   Tiempo transcurrido: ", time.time(
    ) - start_time, "   Error final training:", trainningErrors[
        -1], "   Error final validation:", validationErrors[-1]
    return n
コード例 #14
0
ファイル: testComparison.py プロジェクト: luffyhwl/cybrain
print

#PYBRAIN
from pybrain.tools.shortcuts import buildNetwork
from pybrain import LinearLayer, SigmoidLayer, FeedForwardNetwork, FullConnection, BiasUnit, SoftmaxLayer
from pybrain.supervised.trainers.backprop import BackpropTrainer
from pybrain.structure.modules.tanhlayer import TanhLayer
from pybrain.datasets import SupervisedDataSet


ds = SupervisedDataSet(2,1 )

ds.addSample((0, 0), (0,))
ds.addSample((0, 1), (1,))
ds.addSample((1, 0), (1,))
ds.addSample((1, 1), (0,))


net = buildNetwork(2, 2, 1, bias=True, outputbias= True, hiddenclass=SigmoidLayer)
trainer = BackpropTrainer(net, ds, learningrate= 0.1)

t1 = time()
trainer.trainEpochs(2000)
print "Time PyBrain {}".format(time()-t1)

#PRINT RESULTS
for x in X:
    print "{} ==> {}".format( x, net.activate(x) )

コード例 #15
0
    training = SupervisedDataSet(X.shape[1], y.shape[1])
    for i in range(X_train.shape[0]):
        training.addSample(X_train[i], y_train[i])
    testing = SupervisedDataSet(X.shape[1], y.shape[1])
    for i in range(X_test.shape[0]):
        testing.addSample(X_test[i], y_test[i])
        # 指定维度,创建神经网络,第一个参数为输入层神经元数量,第二个参数隐含层神经元数量,第三个参数为输出层神经元数量
    # bias在每一层使用一个一直处于激活状态的偏置神经元
    net = buildNetwork(X.shape[1], 100, y.shape[1], bias=True)

    trainer = BackpropTrainer(net,
                              training,
                              learningrate=0.01,
                              weightdecay=0.01)
    # 设定代码的运行步数
    trainer.trainEpochs(epochs=20)
    # 预测值
    predictions = trainer.testOnClassData(dataset=testing)
    # f1_score的average默认值为'binary',如果不指定average则会发生ValueError
    print("F-score:{0:.2f}".format(
        f1_score(y_test.argmax(axis=1), predictions, average="weighted")))
    print("F-score:{0:.2f}".format(
        f1_score(y_test.argmax(axis=1), predictions, average="micro")))
    print("F-score:{0:.2f}".format(
        f1_score(y_test.argmax(axis=1), predictions, average="macro")))
    print("------------------------")

    def predict_captcha(captcha_image, neural_network):
        subimages = segment_image(captcha_image)
        predicted_word = ""
        for subimage in subimages: