def add(self,key,value):
     if not self.db:
         self.db.append((key,value))
     else:
         (nkey,nval),ndist = self.nearest(key)
         if norm(nkey-key) > 0.0:
             self.db.append((key,value))
    def learn_step(self, input, output):

        if rank(input) == 1:
            input = reshape(input, (self.num_inputs, 1))
        if rank(output) == 1:
            output = reshape(output, (self.num_outputs, 1))

        result = self(input)
        err = output - result

        self.MSE = norm(err.flat**2) / self.num_outputs
        self.debug("MSE =", self.MSE)

        alpha = self.alpha / sum(input**2)
        self.w += alpha * transpose(dot(input, transpose(err)))
        self.debug("update ratio =", norm(self(input) - result) / norm(err))
Example #3
0
    def learn_step(self,input,output):

        if rank(input) == 1:
            input = reshape(input,(self.num_inputs,1))
        if rank(output) == 1:
            output = reshape(output,(self.num_outputs,1))
            
        result = self(input)    
        err = output - result

        self.MSE = norm(err.flat**2)/self.num_outputs
        self.debug("MSE =",self.MSE)

        alpha = self.alpha/sum(input**2)
        self.w += alpha*transpose(dot(input,transpose(err)))
        self.debug( "update ratio =", norm(self(input)-result)/norm(err))
Example #4
0
    def present_input(self,X):

        for y in range(self.ydim):
            for x in range(self.xdim):                
                self.activation[y,x] = norm(X - self.weights[y,x])**self.response_exponent

        self.activation = 1/self.activation
        
        if inf in self.activation:
            win = self.winner()
            self.activation.flat[win] = 0
            self.activation -= self.activation
            self.activation.flat[win] = 1.0
        else:
            self.activation /= sum(self.activation.flat)
    def testUniform(self):

        # train
        for X in self.training_data:
            Y = self.ground_truth(X)
            e = rand.normal(0, self.training_error, self.num_outputs)
            self.fn.learn_step(X, Y + e)

        # test
        failures = 0
        total_err = 0
        for i, X in enumerate(self.test_data):
            Y_learned = self.fn(X)
            Y_true = self.ground_truth(X)
            err = norm(Y_learned - Y_true)
            total_err += err
            self.fn.verbose("err =", err)
            failures += (err > self.acceptable_error)
        self.fn.message("avg err =", total_err / len(self.test_data))
        failure_rate = failures / float(len(self.test_data))
        self.fn.message("%.1f%% Failure" % (failure_rate * 100))
        assert failure_rate < self.acceptable_failure_rate
    def testUniform(self):

        # train
        for X in self.training_data:
            Y = self.ground_truth(X)
            e = rand.normal(0, self.training_error, self.num_outputs)
            self.fn.learn_step(X, Y + e)

        # test
        failures = 0
        total_err = 0
        for i, X in enumerate(self.test_data):
            Y_learned = self.fn(X)
            Y_true = self.ground_truth(X)
            err = norm(Y_learned - Y_true)
            total_err += err
            self.fn.verbose("err =", err)
            failures += err > self.acceptable_error
        self.fn.message("avg err =", total_err / len(self.test_data))
        failure_rate = failures / float(len(self.test_data))
        self.fn.message("%.1f%% Failure" % (failure_rate * 100))
        assert failure_rate < self.acceptable_failure_rate
    def k_nearest(self,key,k,depth=0):

        if self._key is None:
            results = []
        else:
            d = depth % self.vector_len

            if key[d] <= self._key[d]:
                close_branch = self._le
                far_branch = self._gt
            else:
                close_branch = self._gt
                far_branch = self._le

            # Get the k nearest from the close side of the split
            if close_branch is not None:
                results = close_branch.k_nearest(key,k,depth+1)
            else:
                results = []

            # if the distance to the farthest result is less than
            # the distance to the split then we don't need to check the other
            # branch
            if not results or (max([dist for v,dist in results]) >= abs(self._key[d] - key[d])):
                if far_branch is not None:
                    far_results = far_branch.k_nearest(key,k,depth+1)
                else:
                    far_results = []
                results.append( ((self._key,self._value), norm(self._key - key)) )            
                results.extend(far_results)
                #results = best_N_destructive(results,N=k,pred=lambda a,b:a[1] < b[1])
                results.sort(key = lambda p: p[1])
                results = results[:k]
        if depth == 0:
            return [v for v,d in results],[d for v,d in results]
        else:
            return results
Example #8
0
 def present_input(self,X):
     
     SOM.present_input(self,X)        
     dist = norm( self.get_model_vector(self.winner()) - X )
     self.error_ratio = dist / norm(X)
Example #9
0
    def train(self,X,error=None):

        self.debug("Training on input:",X)
        self.present_input(X)
        self.count += 1
        
        # (roman numeral comments from fritzke's algorithm in
        # B. Fritzke, Unsupervised ontogenetic networks, in Handbook
        # of Neural Computation, IOP Publishing and Oxford University
        # Press, 1996)  [ replacing \zeta with X ]


        # (iii) Determine units s_1 and s_2 (s_1,s_2 \in A) such that
        #       |w_{s_1} - X| <= |w_c - X| (\forall c \in A)
        #   and
        #       |w_{s_2} - X| <= |w_c - X| (\forall c \in A\\s_1)

        s_1,s_2 = self.winners(2)

        # (iv) If it does not already exist, insert a connection between s1 and s2
	#   in any case, set the age of the connection to zero

        self.add_connection(s_1,s_2)

        # (v) Add the squared distance betwen the input signal and the
        # nearest unit in input space to a local error variable

        if error == None:
            error = self.dists[s_1]**2
            if self.normalize_error:
                error = sqrt(error)/norm(X)

        self.error[s_1] += error

        # (vi) Move s_i and its direcct topological neighbors towards
        # X by fractions e_b and e_n, respectively, of the total
        # distance.

        self.weights[s_1] += self.e_b * (X - self.weights[s_1])
        for n in self.connections[s_1]:
            self.weights[n] += self.e_n * (X - self.weights[n])

        # (vii) Increment the age of all edges emanating from s_1
        for n in self.connections[s_1]:
            self.connections[n][s_1] += 1
            self.connections[s_1][n] += 1
                                           

        # (viii) Remove edges with an age larger than max_age....  
        for a,connection_dict in enumerate(self.connections):
            for b,age in connection_dict.items():
                if age > self.max_age:
                    self.del_connection(a,b)

        # (viii) ... If this results in units having no emanating
        # edges, remove them as well.
        to_be_deleted = [a for a,d in enumerate(self.connections) if not d]
        #   sort the list in descending order, so deleting lower numbered
        #   units doesn't screw up the connections
        to_be_deleted.sort(reverse=True)
        if to_be_deleted:
            self.verbose("Deleting units",to_be_deleted)
        for a in to_be_deleted:
            self.del_unit(a)


                       
        # (ix) if the number of input signals so far is an integer
        # multiple of a parameter \lambda, insert a new unit as
        # follows.
        if self.time_to_grow():
            # o Determine the unit q with the maximum accumulated error.
            # o Interpolate a new unit r from q and its neighbor f with the largest
            #   error variable

            q,f = self.growth_pair()
            r = len(self.weights)
            
            new_weights = 0.5 * (self.weights[q] + self.weights[f])
            new_weights.shape = (1,self.dim)
            self.weights = join((self.weights,new_weights),axis=0)
            
            new_distance = norm(X-new_weights)
            self.dists = join((self.dists,new_distance),axis=0)

            self.connections.append({})

            # o Insert edges connecting the new unit r with unts q and f and
            #   remove the original edge between q and f.
            self.verbose("Adding unit",r,"between",q,"and",f,"--- count =",self.count)
            self.add_connection(q,r)
            self.add_connection(r,f)
            self.del_connection(q,f)

            # o Decrease the error variables of q and f
            self.error[q] += -self.alpha * self.error[q]
            self.error[f] += -self.alpha * self.error[f]

            # o Interpolate the error variable of r from q and f
            new_error = array(0.5 * (self.error[q] + self.error[f]))
            new_error.shape = (1,1)
            self.error = join((self.error,new_error))

            if self.grow_callback:
                self.grow_callback(q,f)

        # (x) Decrease the error variables of all units
        self.error += -self.beta * self.error
        return
    def train(self, X, error=None):

        self.debug("Training on input:", X)
        self.present_input(X)
        self.count += 1

        # (roman numeral comments from fritzke's algorithm in
        # B. Fritzke, Unsupervised ontogenetic networks, in Handbook
        # of Neural Computation, IOP Publishing and Oxford University
        # Press, 1996)  [ replacing \zeta with X ]

        # (iii) Determine units s_1 and s_2 (s_1,s_2 \in A) such that
        #       |w_{s_1} - X| <= |w_c - X| (\forall c \in A)
        #   and
        #       |w_{s_2} - X| <= |w_c - X| (\forall c \in A\\s_1)

        s_1, s_2 = self.winners(2)

        # (iv) If it does not already exist, insert a connection between s1 and s2
        #   in any case, set the age of the connection to zero

        self.add_connection(s_1, s_2)

        # (v) Add the squared distance betwen the input signal and the
        # nearest unit in input space to a local error variable

        if error == None:
            error = self.dists[s_1]**2
            if self.normalize_error:
                error = sqrt(error) / norm(X)

        self.error[s_1] += error

        # (vi) Move s_i and its direcct topological neighbors towards
        # X by fractions e_b and e_n, respectively, of the total
        # distance.

        self.weights[s_1] += self.e_b * (X - self.weights[s_1])
        for n in self.connections[s_1]:
            self.weights[n] += self.e_n * (X - self.weights[n])

        # (vii) Increment the age of all edges emanating from s_1
        for n in self.connections[s_1]:
            self.connections[n][s_1] += 1
            self.connections[s_1][n] += 1

        # (viii) Remove edges with an age larger than max_age....
        for a, connection_dict in enumerate(self.connections):
            for b, age in connection_dict.items():
                if age > self.max_age:
                    self.del_connection(a, b)

        # (viii) ... If this results in units having no emanating
        # edges, remove them as well.
        to_be_deleted = [a for a, d in enumerate(self.connections) if not d]
        #   sort the list in descending order, so deleting lower numbered
        #   units doesn't screw up the connections
        to_be_deleted.sort(reverse=True)
        if to_be_deleted:
            self.verbose("Deleting units", to_be_deleted)
        for a in to_be_deleted:
            self.del_unit(a)

        # (ix) if the number of input signals so far is an integer
        # multiple of a parameter \lambda, insert a new unit as
        # follows.
        if self.time_to_grow():
            # o Determine the unit q with the maximum accumulated error.
            # o Interpolate a new unit r from q and its neighbor f with the largest
            #   error variable

            q, f = self.growth_pair()
            r = len(self.weights)

            new_weights = 0.5 * (self.weights[q] + self.weights[f])
            new_weights.shape = (1, self.dim)
            self.weights = join((self.weights, new_weights), axis=0)

            new_distance = norm(X - new_weights)
            self.dists = join((self.dists, new_distance), axis=0)

            self.connections.append({})

            # o Insert edges connecting the new unit r with unts q and f and
            #   remove the original edge between q and f.
            self.verbose("Adding unit", r, "between", q, "and", f,
                         "--- count =", self.count)
            self.add_connection(q, r)
            self.add_connection(r, f)
            self.del_connection(q, f)

            # o Decrease the error variables of q and f
            self.error[q] += -self.alpha * self.error[q]
            self.error[f] += -self.alpha * self.error[f]

            # o Interpolate the error variable of r from q and f
            new_error = array(0.5 * (self.error[q] + self.error[f]))
            new_error.shape = (1, 1)
            self.error = join((self.error, new_error))

            if self.grow_callback:
                self.grow_callback(q, f)

        # (x) Decrease the error variables of all units
        self.error += -self.beta * self.error
        return