示例#1
0
    def caliculate_backpropergation(self, de_dy):
        errs = de_dy

        # make update value pool
        delta_pool = [[]]
        for i in range(1, self.number_of_element):
            if self.name_of_element[i] == 'weight': delta_pool.append([np.matrix(np.zeros(self.neural_element[i].value.shape))])
            else                                  : delta_pool.append([])
            
        # caliculate on output layer
        [delta_pool[self.number_of_element-2], delta_part] = \
            nf.back_propergation(\
            errs, \
                np.array(self.neural_element[self.number_of_element-1].value), \
                self.neural_element[self.number_of_element-3].value.T, \
                self.neural_element[self.number_of_element-1].func,\
                self.learning_rate,\
                solved = self.solved);
        errs = np.array(np.dot(delta_part, self.neural_element[self.number_of_element-2].value.T));

        for i in range(self.number_of_element-3, 1, -2):
            [delta_pool[i-1], delta_part] = \
                nf.back_propergation(\
                errs, \
                    np.array(self.neural_element[i].value), \
                    self.neural_element[i-2].value.T, \
                    self.neural_element[i].func, \
                    self.learning_rate, \
                    solved = 'fitting')
            errs = np.array(np.dot(delta_part, self.neural_element[i-1].value.T));
            
        return delta_pool
示例#2
0
    def learn(self, input_data, teach_data):
        # set input and teach signals, and caliculate output and err signals.
        self.input_signals(input_data); self.teach_signals(teach_data); self.output_signals(); self.error_signals()
        # make update value pool
        delta_pool = [[]]
        for i in range(1, self.number_of_element):
            if self.name_of_element[i] == 'weight': delta_pool.append([np.matrix(np.zeros(self.neural_element[i].value.shape))])
            else                                  : delta_pool.append([])

        # back propergation
        # solved problem
        if self.solved == 'fitting':
            errs = -np.array(self.neural_element[self.number_of_element-1].value - teach_data)
        elif self.solved == 'classification':
            # softmax
            if self.neural_element[self.number_of_element-1].func == nf.softmax:
                errs = np.array(self.neural_element[self.number_of_element-1].value - teach_data)
            # sigmoid
            elif self.neural_element[self.number_of_element-1].func == nf.sigmoid:
                errs = -np.array(self.neural_element[self.number_of_element-1].value - teach_data)
            # tanh
            elif self.neural_element[self.number_of_element-1].func == nf.tanh:
                pass
            
        # caliculate on output layer
        [delta_pool[self.number_of_element-2], delta_part] = nf.back_propergation(errs, np.array(self.neural_element[self.number_of_element-1].value), self.neural_element[self.number_of_element-3].value.T, self.neural_element[self.number_of_element-1].func, self.learning_rate, solved = self.solved);
        errs = np.array(np.dot(delta_part, self.neural_element[self.number_of_element-2].value.T));

        for i in range(self.number_of_element-3, 1, -2):
            [delta_pool[i-1], delta_part] = nf.back_propergation(errs, np.array(self.neural_element[i].value), self.neural_element[i-2].value.T, self.neural_element[i].func, self.learning_rate, solved = 'fitting'); errs = np.array(np.dot(delta_part, self.neural_element[i-1].value.T));
        # update all weights
        for i in range(1, self.number_of_element): 
            if self.name_of_element[i] == 'weight': self.neural_element[i].value = self.neural_element[i].value - delta_pool[i]
示例#3
0
 def learn(self, input_data, teach_data):
     # set input and teach signals, and caliculate output and err signals.
     self.input_signals(input_data); self.teach_signals(teach_data); self.output_signals(); self.error_signals()
     # make update value pool
     delta_pool = [[]]
     for i in range(1, self.number_of_element):
         if self.name_of_element[i] == 'weight': delta_pool.append([np.matrix(np.zeros(self.neural_element[i].value.shape))])
         else                                  : delta_pool.append([])
     # back propergation
     errs = np.array(self.error);
     for i in range(self.number_of_element-1, 1, -2):
         [delta_pool[i-1], delta_part] = nf.back_propergation(errs, np.array(self.neural_element[i].value), self.neural_element[i-2].value.T, self.neural_element[i].func, self.learning_rate); errs = np.array(np.dot(delta_part, self.neural_element[i-1].value.T));
     # update all weights
     for i in range(1, self.number_of_element): 
         if self.name_of_element[i] == 'weight': self.neural_element[i].value = self.neural_element[i].value + delta_pool[i]
示例#4
0
    def learn(self, input_data, teach_data):
        # set input and teach signals, and caliculate output and err signals.
        self.input_signals(input_data); self.teach_signals(teach_data); self.output_signals(); self.error_signals()
        delta_pool = [[]]
        for i in range(1, self.number_of_element):
            if self.name_of_element[i] == 'weight': delta_pool.append([np.matrix(np.zeros(self.neural_element[i].value.shape))])
            else                                  : delta_pool.append([])
        # back propergation
        # solved problem
        errs = 0.
        if self.solved == 'fitting' or self.solved == 'fit':
            errs = -np.array(self.neural_element[self.number_of_element-1].value - teach_data)
#            print "err:"; print errs
#            print "out:"; print self.neural_element[self.number_of_element-1].value
 
        elif self.solved == 'classification' or self.solved == 'class':
            # softmax
            if self.neural_element[self.number_of_element-1].func == nf.softmax:
                errs = np.array(self.neural_element[self.number_of_element-1].value - teach_data)
            # sigmoid
            elif self.neural_element[self.number_of_element-1].func == nf.sigmoid:
                errs = np.array(self.neural_element[self.number_of_element-1].value - teach_data)
            # tanh
            elif self.neural_element[self.number_of_element-1].func == nf.tanh:
                pass

        # backup delta_pool
        past_delta_pool = self.delta_pool
        # caliculate on output layer
        pos       = self.neural_element[self.number_of_element-1].value.shape[1]
        prepos    = self.neural_element[self.number_of_element-2].value.shape
        preprepos = self.neural_element[self.number_of_element-3].value.shape[1] - 1
        [tmp_delta_pool_normal, delta_part_normal] =\
            nf.back_propergation(\
            errs,\
                np.array(self.neural_element[self.number_of_element-1].value[0:1, 0:pos]),\
                self.neural_element[self.number_of_element-3].value[0:1, 0:preprepos].T,\
                self.neural_element[self.number_of_element-1].func,\
                self.learning_rate,\
                solved = self.solved);
        errs_normal = np.array(np.dot(delta_part_normal, self.neural_element[self.number_of_element-2].value[0:preprepos].T))

        [tmp_delta_pool_bios, delta_part_bios] =\
            nf.back_propergation(\
            errs,\
                np.array(self.neural_element[self.number_of_element-1].value[0:1, pos-1]),\
                self.neural_element[self.number_of_element-3].value[0:1, preprepos].T,\
                nf.linear,\
                self.learning_rate,\
                solved = 'fitting');
        errs_bios = np.array(np.dot(delta_part_bios, self.neural_element[self.number_of_element-2].value[preprepos].T))
        errs      = errs_normal

        tmp = np.matrix(np.zeros((prepos)))
        tmp[0          :prepos[0]-1] = tmp_delta_pool_normal
        tmp[prepos[0]-1:prepos[0]  ] = tmp_delta_pool_bios
#        tmp[prepos[0]-1:prepos[0]  ] = 0.
        self.delta_pool[self.number_of_element-2] = tmp
#        print ""
#        print tmp_delta_pool_bios

        for i in range(self.number_of_element-3, 1, -2):
            pos       = self.neural_element[i].value.shape[1] - 1
            prepos    = self.neural_element[i-1].value.shape
            preprepos = self.neural_element[i-2].value.shape[1] - 1
            
            [tmp_delta_pool_normal, delta_part_normal] =\
                nf.back_propergation(\
                errs,\
                    np.array(self.neural_element[i].value[0:1, 0:pos]),\
                    self.neural_element[i-2].value[0:1, 0:preprepos].T,\
                    self.neural_element[i].func,\
                    self.learning_rate,\
                    solved = 'fitting');
            errs_normal = np.array(np.dot(delta_part_normal, self.neural_element[i-1].value[0:preprepos].T))
            
            [tmp_delta_pool_bios, delta_part_bios] =\
                nf.back_propergation(\
                errs,\
                    np.array(self.neural_element[i].value[0:1, pos]),\
                    self.neural_element[i-1].value[0:1, preprepos].T,\
                    nf.linear,\
                    self.learning_rate,\
                    solved = 'fitting');
            errs_bios = np.array(np.dot(delta_part_bios, self.neural_element[i-1].value[preprepos].T))
            errs      = errs_normal
            
            tmp = np.matrix(np.zeros((prepos)))
            tmp[0          :prepos[0]-1] = tmp_delta_pool_normal
            tmp[prepos[0]-1:prepos[0]  ] = tmp_delta_pool_bios
#            tmp[prepos[0]-1:prepos[0]  ] = 0.
            self.delta_pool[i-1] = tmp
           
#        self.show_element('weight')
        # update all weights
        for i in range(1, self.number_of_element): 
            if self.name_of_element[i] == 'weight': 
                self.neural_element[i].value -= self.delta_pool[i]