Exemplo n.º 1
0
 def accGradParams(self, grad, scale=1.0, momentum=0.0):
     self.dw = backwardParamsRnn(self.inData, self.fulldata, self.W,
                                 self.reserve, self.descRnn)
     Blas.addVectorToVector(self.dw,
                            self.getVar("W").grad,
                            out=self.getVar("W").grad,
                            alpha=scale,
                            beta=momentum)
Exemplo n.º 2
0
 def accGradParams(self, grad, scale=1.0, momentum=0.0):
     if self.affine:
         Blas.addVectorToVector(self.scalegrad.ravel(),
                                self.vars["scale"].grad.ravel(),
                                out=self.vars["scale"].grad.ravel(),
                                alpha=scale,
                                beta=momentum)
         Blas.addVectorToVector(self.biasgrad.ravel(),
                                self.vars["bias"].grad.ravel(),
                                out=self.vars["bias"].grad.ravel(),
                                alpha=scale,
                                beta=momentum)
Exemplo n.º 3
0
    def calcVal(self, pred, target):
        diff = Blas.addVectorToVector(target.ravel(),
                                      pred.ravel(),
                                      alpha=1.0,
                                      beta=-1.0)
        error = Blas.dot(diff, diff) / (2.0 * np.prod(target.shape))

        return error
Exemplo n.º 4
0
    def calcVal(self, pred, target):
        diff = Blas.addVectorToVector(pred.ravel(),
                                      target.ravel(),
                                      alpha=1.0,
                                      beta=-1.0)
        error = Blas.vectorL1Norm(diff) / np.prod(target.shape)

        return error
Exemplo n.º 5
0
    def calcError(self, pred, target):
        diff = Blas.addVectorToVector(pred.ravel(),
                                      target.ravel(),
                                      alpha=1.0,
                                      beta=-1.0)

        self.devErr.fill(Blas.vectorL1Norm(diff) / np.prod(pred.shape[1:]))
        self.accumErr += self.devErr
Exemplo n.º 6
0
    def calcGrad(self, pred, target):
        c = 1.0 / np.prod(target.shape)
        grad = Blas.addVectorToVector(target.ravel(),
                                      pred.ravel(),
                                      alpha=c,
                                      beta=-c)
        grad = grad.reshape(pred.shape)

        return grad
Exemplo n.º 7
0
    def updateGrad(self, grad):
        self.grad, meansGrad = mapLRNBackward(self.inData,
                                              self.data,
                                              grad,
                                              self.means,
                                              None,
                                              N=self.N,
                                              alpha=self.alpha,
                                              beta=self.beta,
                                              K=self.K)

        if self.includePad:
            meansGrad = poolNdBackward(self.inData,
                                       self.means,
                                       meansGrad,
                                       self.workspace,
                                       size=self.size,
                                       stride=1,
                                       pad=self.pad,
                                       mode=self.mode)
            Blas.addVectorToVector(self.grad.ravel(),
                                   meansGrad.ravel(),
                                   out=self.grad.ravel(),
                                   beta=-1.0)
Exemplo n.º 8
0
    def updateGrad(self, grad):
        if self.mode == PenaltyMode.l1:
            self.grad = gpuarray.empty(grad.shape,
                                       dtype=grad.dtype,
                                       allocator=memPool)
            l1penaltyKer(self.grad, grad, self.data,
                         self.weight / grad.shape[0])

        elif self.mode == PenaltyMode.l2:
            self.grad = Blas.addVectorToVector(grad.ravel(),
                                               self.data.ravel(),
                                               alpha=1.0,
                                               beta=-self.weight /
                                               grad.shape[0])
            self.grad = self.grad.reshape(grad.shape)

        else:
            raise NotImplementedError(self.mode)
Exemplo n.º 9
0
	def updateGrad(self, grad):
		meansGrad = poolNdBackward(self.inData, self.means, grad, self.workspace,
								   size=self.size, stride=1, pad=self.pad, mode=self.mode)

		Blas.addVectorToVector(grad.ravel(), meansGrad.ravel(), out=meansGrad.ravel(), beta=-1.0)
		self.grad = meansGrad
Exemplo n.º 10
0
	def updateData(self, data):
		self.means, self.workspace = poolNd(data, size=self.size, stride=1, pad=self.pad, mode=self.mode,
											test=not self.train)
		self.data = Blas.addVectorToVector(data.ravel(), self.means.ravel(), beta=-1.0).reshape(*data.shape)