def cost(self, data, weightcost):
     cost = 0.0
     if type(data) != type([]):
         data = [data]
     numcases = len(data)
     for i in range(numcases):
         inp = matrix(data[i][0])
         out = data[i][1]
         T = inp.shape[0]
         alpha = zeros((self.numclasses, T), dtype=float)
         alpha[:,0][:,newaxis] = \
                        self.singlenodescore(inp[0,:],range(self.numclasses))
         for t in range(1, T):
             for y in range(self.numclasses):
                 alpha[y,t] = self.singlenodescore(inp[t,:],y)+\
                      logsumexp(alpha[:,t-1].flatten()+\
                          self.dualnodescore(inp,(arange(self.numclasses),y)))
         cost -= logsumexp(alpha[:, -1])
         for t in range(T):
             cost += self.singlenodescore(inp[t, :], out[t])
         for t in range(T - 1):
             cost += self.dualnodescore(inp, (out[t], out[t + 1]))
     cost = cost.flatten()[0]
     cost /= double(numcases)
     cost -= 0.5 * weightcost * sum((self.params)**2)
     return -cost
 def cost(self,data,weightcost):
     cost = 0.0
     if type(data)!=type([]):
         data = [data]
     numcases = len(data)
     for i in range(numcases): 
         inp = matrix(data[i][0])
         out = data[i][1]
         T = inp.shape[0]
         alpha = zeros((self.numclasses,T),dtype=float)
         alpha[:,0][:,newaxis] = \
                        self.singlenodescore(inp[0,:],range(self.numclasses))
         for t in range(1,T):
             for y in range(self.numclasses):
                 alpha[y,t] = self.singlenodescore(inp[t,:],y)+\
                      logsumexp(alpha[:,t-1].flatten()+\
                          self.dualnodescore(inp,(arange(self.numclasses),y)))
         cost -= logsumexp(alpha[:,-1])
         for t in range(T):
             cost += self.singlenodescore(inp[t,:],out[t])
         for t in range(T-1):
             cost += self.dualnodescore(inp,(out[t],out[t+1]))
     cost = cost.flatten()[0]
     cost /= double(numcases)
     cost -= 0.5*weightcost*sum((self.params)**2)
     return -cost
Exemple #3
0
 def cost(self, features, labels, weightcost):
     #labels = onehot(labels)
     scores = dot(self.weights,features) + self.biases[:,newaxis]
     if self.regularizebiases:
         negloglik = (-sum(sum(labels*scores)) + \
                    sum(logsumexp(scores,0)))/double(features.shape[1]) + \
                    weightcost * sum(sum(self.params**2))
     else:
         negloglik = (-sum(sum(labels*scores)) + \
                    sum(logsumexp(scores,0)))/double(features.shape[1])+ \
                    weightcost * sum(sum(self.weights**2))
     return negloglik 
 def cost(self, features, labels, weightcost):
     #labels = onehot(labels)
     scores = dot(self.weights, features) + self.biases[:, newaxis]
     if self.regularizebiases:
         negloglik = (-sum(sum(labels*scores)) + \
                    sum(logsumexp(scores,0)))/double(features.shape[1]) + \
                    weightcost * sum(sum(self.params**2))
     else:
         negloglik = (-sum(sum(labels*scores)) + \
                    sum(logsumexp(scores,0)))/double(features.shape[1])+ \
                    weightcost * sum(sum(self.weights**2))
     return negloglik
Exemple #5
0
 def cost(self,features,labels,weightcost):
     labels = labels.astype(float)
     numcases = features.shape[1]
     scores = self.scorefunc.fprop(features)
     if self.regularizebiases:
         cost = (-sum(sum(labels*scores)) + \
              sum(logsumexp(scores,0)))/double(numcases) + \
              weightcost * sum(sum(self.params**2))
     else:
         cost = (-sum(sum(labels*scores)) + \
              sum(logsumexp(scores,0)))/double(numcases) + \
              weightcost * sum(sum(self.weightsbeforebiases**2))
     return cost
 def cost(self, features, labels, weightcost):
     labels = labels.astype(float)
     numcases = features.shape[1]
     scores = self.scorefunc.fprop(features)
     if self.regularizebiases:
         cost = (-sum(sum(labels*scores)) + \
              sum(logsumexp(scores,0)))/double(numcases) + \
              weightcost * sum(sum(self.params**2))
     else:
         cost = (-sum(sum(labels*scores)) + \
              sum(logsumexp(scores,0)))/double(numcases) + \
              weightcost * sum(sum(self.weightsbeforebiases**2))
     return cost
 def outputmarginals(self, input):
     """Returns a tuple with the dual-node and single-node marginals for 
     the given input-sequence."""
     T = input.shape[0]
     alpha = zeros((self.numclasses, T), dtype=float)
     beta = zeros((self.numclasses, T), dtype=float)
     p_s = []
     p_d = []
     for t in range(T):
         p_s.append(zeros(self.numclasses, dtype=float))
         p_d.append(zeros((self.numclasses, self.numclasses), dtype=float))
     #forward-backward:
     alpha[:,0][:,newaxis] = \
                      self.singlenodescore(input[0,:],range(self.numclasses))
     for t in range(1, T):
         for y in range(self.numclasses):
             alpha[y,t] = self.singlenodescore(input[t,:],y)+\
                 logsumexp(alpha[:,t-1].flatten()+\
                     self.dualnodescore(input,(arange(self.numclasses),y)))
     beta[:,-1][:,newaxis] = \
                   self.singlenodescore(input[-1,:],range(self.numclasses))
     for t in range(T - 2, -1, -1):
         for y in range(self.numclasses):
             beta[y,t] = self.singlenodescore(input[t,:],y)+\
                 logsumexp(beta[:,t+1].flatten()+\
                     self.dualnodescore(input,(y,arange(self.numclasses))))
     logZ = logsumexp(alpha[:, -1])
     #get dual-node marginals:
     for t in range(T - 1):
         for y in range(self.numclasses):
             for y_ in range(self.numclasses):
                 p_d[t][y,y_] = exp(alpha[y,t]+\
                              self.dualnodescore(input,(y,y_))+\
                              beta[y_,t+1]\
                              -logZ)
     #get single-node marginals by further marginalizing:
     for t in range(T - 1):
         for y in range(self.numclasses):
             p_s[t][y] = sum(p_d[t][y, :])
     p_s[-1] = sum(p_d[T - 2][:, :], 0)
     p_d = p_d[:-1]
     return p_s, p_d
 def outputmarginals(self,input): 
     """Returns a tuple with the dual-node and single-node marginals for 
     the given input-sequence."""
     T = input.shape[0]
     alpha = zeros((self.numclasses,T),dtype=float)
     beta  = zeros((self.numclasses,T),dtype=float)
     p_s = []
     p_d = []
     for t in range(T):
         p_s.append(zeros(self.numclasses,dtype=float))
         p_d.append(zeros((self.numclasses,self.numclasses),dtype=float))
     #forward-backward:
     alpha[:,0][:,newaxis] = \
                      self.singlenodescore(input[0,:],range(self.numclasses))
     for t in range(1,T):
         for y in range(self.numclasses):
             alpha[y,t] = self.singlenodescore(input[t,:],y)+\
                 logsumexp(alpha[:,t-1].flatten()+\
                     self.dualnodescore(input,(arange(self.numclasses),y)))
     beta[:,-1][:,newaxis] = \
                   self.singlenodescore(input[-1,:],range(self.numclasses))
     for t in range(T-2,-1,-1):
         for y in range(self.numclasses):
             beta[y,t] = self.singlenodescore(input[t,:],y)+\
                 logsumexp(beta[:,t+1].flatten()+\
                     self.dualnodescore(input,(y,arange(self.numclasses))))
     logZ = logsumexp(alpha[:,-1])
     #get dual-node marginals:
     for t in range(T-1):
         for y in range(self.numclasses):
             for y_ in range(self.numclasses):
                 p_d[t][y,y_] = exp(alpha[y,t]+\
                              self.dualnodescore(input,(y,y_))+\
                              beta[y_,t+1]\
                              -logZ)
     #get single-node marginals by further marginalizing:
     for t in range(T-1):
         for y in range(self.numclasses):
             p_s[t][y] = sum(p_d[t][y,:])
     p_s[-1] = sum(p_d[T-2][:,:],0)
     p_d = p_d[:-1]
     return p_s,p_d
Exemple #9
0
 def cost(self,data,weightcost):
     if type(data)!=type([]):
         data = [data]
     numcases = len(data)
     cost = 0.0
     for i in range(numcases):
         input = data[i][0]
         output = data[i][1]
         modeloutput = self.scorefuncs[0](input)
         cost += sum(modeloutput[output]-logsumexp(modeloutput,0))/\
                                             double(numcases*input.shape[1])
     cost += 0.5 * weightcost * sum(self.params**2)
     return cost
Exemple #10
0
 def cost(self, data, weightcost):
     if type(data) != type([]):
         data = [data]
     numcases = len(data)
     cost = 0.0
     for i in range(numcases):
         input = data[i][0]
         output = data[i][1]
         modeloutput = self.scorefuncs[0](input)
         cost += sum(modeloutput[output]-logsumexp(modeloutput,0))/\
                                             double(numcases*input.shape[1])
     cost += 0.5 * weightcost * sum(self.params**2)
     return cost
def nadarayawatson(traininputs, trainoutputs, testinputs, h):
    """ Nadaraya Watson kernel regression using isotropic Gaussian kernels.
  
        traininputs: Float array of training inputs (columnwise).
        trainoutputs: Float array of training outputs (columnwise).
        testinputs:  Float array of test inputs (columnwise).
        h: 2 * variance of the input kernel. 
  
        Output: Nadaraya watson regression applied to the testinputs. 
    """
    if len(traininputs.shape) < 2:
        traininputs = traininputs[newaxis, :]
    if len(trainoutputs.shape) < 2:
        trainoutputs = trainoutputs[newaxis, :]
    if len(testinputs.shape) < 2:
        testinputs = testinputs[newaxis, :]
    K = -(1.0/h)*\
            (sum(testinputs**2,0)[:,newaxis]+sum(traininputs**2,0)[newaxis,:]\
                                              -2*dot(testinputs.T,traininputs))
    K = exp(K-logsumexp(K,1)[:,newaxis])
    return sum(trainoutputs[newaxis,:,:].transpose(0,2,1)*K[:,:,newaxis],1).T
def nadarayawatson(traininputs, trainoutputs, testinputs, h):
    """ Nadaraya Watson kernel regression using isotropic Gaussian kernels.
  
        traininputs: Float array of training inputs (columnwise).
        trainoutputs: Float array of training outputs (columnwise).
        testinputs:  Float array of test inputs (columnwise).
        h: 2 * variance of the input kernel. 
  
        Output: Nadaraya watson regression applied to the testinputs. 
    """
    if len(traininputs.shape) < 2:
        traininputs = traininputs[newaxis, :]
    if len(trainoutputs.shape) < 2:
        trainoutputs = trainoutputs[newaxis, :]
    if len(testinputs.shape) < 2:
        testinputs = testinputs[newaxis, :]
    K = -(1.0/h)*\
            (sum(testinputs**2,0)[:,newaxis]+sum(traininputs**2,0)[newaxis,:]\
                                              -2*dot(testinputs.T,traininputs))
    K = exp(K - logsumexp(K, 1)[:, newaxis])
    return sum(
        trainoutputs[newaxis, :, :].transpose(0, 2, 1) * K[:, :, newaxis], 1).T
Exemple #13
0
 def probabilities(self, features):
     scores = dot(self.weights,features) + self.biases[:,newaxis]
     return exp(scores - logsumexp(scores,0))
Exemple #14
0
 def probabilities(self, features):
     scores = self.scorefunc.fprop(features)
     return exp(scores - logsumexp(scores,0))
Exemple #15
0
 def probabilities(self, features):
     scores = dot(self.weights, features) + self.biases[:, newaxis]
     return exp(scores - logsumexp(scores, 0))
Exemple #16
0
 def probabilities(self, features):
     scores = self.scorefunc.fprop(features)
     return exp(scores - logsumexp(scores, 0))