Example #1
0
    def _testNmseAtSelectedBases(self, K):
        """Each entry of 'K' is an index into self.prod_bases11.
        Use_test_data = calc nmse using test_X/test_y or just usual (trn) X/y ?
        """
        assert max(K) <= self._M()

        #calc coefs
        tran_sub_B = numpy.transpose( numpy.take(self.B, K, 0) )
        tran_y = numpy.transpose(self.y)
        try:
            (coefs, resids, rank, sing_vals) = \
                    linear_least_squares( tran_sub_B, tran_y )
            
            #sometimes coefs comes back as Float64.  We want Float32 ('float')
            coefs = numpy.array(coefs, dtype=float)
        except:
            return float('inf')

        #fill test_B if needed
        if self.test_B is None:
            self.test_B = numpy.zeros((self._M(), len(self.test_y)),
                                        dtype=float)
            for m, base11 in enumerate(self.prod_bases11):
                self.test_B[m,:] = base11.simulate(self.test_X11)
        test_tran_sub_B = numpy.transpose( numpy.take(self.test_B, K, 0) )

        #sim and sse calc
        test_tran_yhat = numpy.dot(coefs, test_tran_sub_B)
        test_yhat = numpy.transpose(test_tran_yhat)
        sse = self._sse(test_yhat, self.test_y)
        nmse = math.sqrt(sse)
        return nmse
Example #2
0
 def _linLearnAndCalcYhat(self, tran_B, tran_y):
     (coefs, resids, rank, sing_vals) = \
             linear_least_squares( tran_B, tran_y )    
     #sometimes coefs comes back as Float64.  We want Float32 ('float')
     coefs = numpy.array(coefs, dtype=float)
     
     tran_yhat = numpy.dot(coefs, tran_B)
     yhat = numpy.transpose(tran_yhat)
     return yhat
    def learn_batch(self, data):

        X = array([x for x, y in data])
        Y = array([y for x, y in data])

        if self.use_bias:
            X = join((X, ones((len(X), 1))), axis=1)
        W, residuals, rank, s = linear_least_squares(X, Y)

        self.w = W
Example #4
0
    def learn_batch(self,data):

        X = array([x for x,y in data])
        Y = array([y for x,y in data])

        if self.use_bias:
            X = join((X,ones((len(X),1))),axis=1)
        W,residuals,rank,s = linear_least_squares(X,Y)

        self.w = W
    def _combine(self,q,Xs,Ys,weights):
        q = array(q)
        X = array(Xs)

        rows,cols = X.shape
        
        if rows < cols:
            self.verbose("Falling back to weighted averaging.")
            return weighted_average(Ys,weights)
        
        Y = array(Ys)
        W = Numeric.identity(len(weights))*weights
        Z = mult(W,X)
        v = mult(W,Y)

        if self.ridge_range:
            ridge = Numeric.identity(cols) * rand.uniform(0,self.ridge_range,(cols,1))
            Z = join((Z,ridge))
            v = join((v,Numeric.zeros((cols,1))))
            

        B,residuals,rank,s = linear_least_squares(Z,v)

        if len(residuals) == 0:
            self.verbose("Falling back to weighted averaging.")
            return weighted_average(Ys,weights)
        
        estimate = mult(q,B)

        # we estimate the variance as the sum of the
        # residuals over the squared sum of the weights
        variance = residuals/sum(weights**2)

        stderr = Numeric.sqrt(variance)/Numeric.sqrt(sum(weights))

        return estimate,stderr