예제 #1
0
    def updateGradInput(self, input, gradOutput):
        if self.gradInput is None:
            return

        if self._div is None:
            self._div = input.new()
        if self._output is None:
            self._output = self.output.new()
        if self._expand4 is None:
            self._expand4 = input.new()
        if self._gradOutput is None:
            self._gradOutput = input.new()

        if not self.fastBackward:
            self.updateOutput(input)

        inputSize, outputSize = self.weight.size(0), self.weight.size(1)
        """
        dy_j   -2 * c_j * c_j * (w_j - x)   c_j * c_j * (x - w_j)
        ---- = -------------------------- = ---------------------
         dx     2 || c_j * (w_j - x) ||              y_j
        """

        # to prevent div by zero (NaN) bugs
        self._output.resize_as_(self.output).copy_(self.output).add_(1e-7)
        self._view(self._gradOutput, gradOutput, gradOutput.size())
        torch.div(gradOutput, self._output, out=self._div)
        if input.dim() == 1:
            self._div.resize_(1, outputSize)
            self._expand4 = self._div.expand_as(self.weight)

            if torch.type(input) == 'torch.cuda.FloatTensor':
                self._repeat2.resize_as_(self._expand4).copy_(self._expand4)
                self._repeat2.mul_(self._repeat)
            else:
                self._repeat2.mul_(self._repeat, self._expand4)

            self._repeat2.mul_(self.diagCov)
            torch.sum(self._repeat2, 1, out=self.gradInput)
            self.gradInput.resize_as_(input)
        elif input.dim() == 2:
            batchSize = input.size(0)

            self._div.resize_(batchSize, 1, outputSize)
            self._expand4 = self._div.expand(batchSize, inputSize, outputSize)

            if input.type() == 'torch.cuda.FloatTensor':
                self._repeat2.resize_as_(self._expand4).copy_(self._expand4)
                self._repeat2.mul_(self._repeat)
                self._repeat2.mul_(self._repeat3)
            else:
                torch.mul(self._repeat, self._expand4, out=self._repeat2)
                self._repeat2.mul_(self._expand3)

            torch.sum(self._repeat2, 2, out=self.gradInput)
            self.gradInput.resize_as_(input)
        else:
            raise RuntimeError("1D or 2D input expected")

        return self.gradInput
예제 #2
0
def _smooth_l1_loss(x, t, in_weight, sigma):
    sigma2 = sigma**2
    in_weight = in_weight.type(torch.FloatTensor)
    x = x.type(torch.FloatTensor)
    t = t.type(torch.FloatTensor)
    diff = in_weight * (x - t)
    abs_diff = diff.abs()
    flag = (abs_diff.data < (1. / sigma2)).float()
    y = (flag * (sigma2 / 2.) * (diff**2) + (1 - flag) *
         (abs_diff - 0.5 / sigma2))
    return y.sum()
예제 #3
0
    def accGradParameters(self, input, gradOutput, scale=1):
        inputSize, outputSize = self.weight.size(0), self.weight.size(1)

        """
        dy_j   2 * c_j * c_j * (w_j - x)    c_j * c_j * (w_j - x)
        ---- = -------------------------- = ---------------------
        dw_j    2 || c_j * (w_j - x) ||             y_j

        dy_j    2 * c_j * (w_j - x)^2    c_j * (w_j - x)^2
        ---- = ----------------------- = -----------------
        dc_j   2 || c_j * (w_j - x) ||         y_j
        #"""
        # assumes a preceding call to updateGradInput
        if input.dim() == 1:
            self.gradWeight.add_(-scale, self._repeat2)

            self._repeat.div_(self.diagCov)
            self._repeat.mul_(self._repeat)
            self._repeat.mul_(self.diagCov)

            if torch.type(input) == 'torch.cuda.FloatTensor':
                self._repeat2.resize_as_(self._expand4).copy_(self._expand4)
                self._repeat2.mul_(self._repeat)
            else:
                torch.mul(self._repeat, self._expand4, out=self._repeat2)

            self.gradDiagCov.add_(self._repeat2)
        elif input.dim() == 2:
            if self._sum is None:
                self._sum = input.new()
            torch.sum(self._repeat2, 0, True, out=self._sum)
            self._sum.resize_(inputSize, outputSize)
            self.gradWeight.add_(-scale, self._sum)

            if input.type() == 'torch.cuda.FloatTensor':
                # requires lots of memory, but minimizes cudaMallocs and loops
                self._repeat.div_(self._repeat3)
                self._repeat.mul_(self._repeat)
                self._repeat.mul_(self._repeat3)
                self._repeat2.resize_as_(self._expand4).copy_(self._expand4)
                self._repeat.mul_(self._repeat2)
            else:
                self._repeat.div_(self._expand3)
                self._repeat.mul_(self._repeat)
                self._repeat.mul_(self._expand3)
                self._repeat.mul_(self._expand4)

            torch.sum(self._repeat, 0, True, out=self._sum)
            self._sum.resize_(inputSize, outputSize)
            self.gradDiagCov.add_(scale, self._sum)
        else:
            raise RuntimeError("1D or 2D input expected")
예제 #4
0
    def accGradParameters(self, input, gradOutput, scale=1):
        inputSize, outputSize = self.weight.size(0), self.weight.size(1)
        """
        dy_j   2 * c_j * c_j * (w_j - x)    c_j * c_j * (w_j - x)
        ---- = -------------------------- = ---------------------
        dw_j    2 || c_j * (w_j - x) ||             y_j

        dy_j    2 * c_j * (w_j - x)^2    c_j * (w_j - x)^2
        ---- = ----------------------- = -----------------
        dc_j   2 || c_j * (w_j - x) ||         y_j
        #"""
        # assumes a preceding call to updateGradInput
        if input.dim() == 1:
            self.gradWeight.add_(-scale, self._repeat2)

            self._repeat.div_(self.diagCov)
            self._repeat.mul_(self._repeat)
            self._repeat.mul_(self.diagCov)

            if torch.type(input) == 'torch.cuda.FloatTensor':
                self._repeat2.resize_as_(self._expand4).copy_(self._expand4)
                self._repeat2.mul_(self._repeat)
            else:
                torch.mul(self._repeat, self._expand4, out=self._repeat2)

            self.gradDiagCov.add_(self._repeat2)
        elif input.dim() == 2:
            if self._sum is None:
                self._sum = input.new()
            torch.sum(self._repeat2, 0, out=self._sum)
            self._sum.resize_(inputSize, outputSize)
            self.gradWeight.add_(-scale, self._sum)

            if input.type() == 'torch.cuda.FloatTensor':
                # requires lots of memory, but minimizes cudaMallocs and loops
                self._repeat.div_(self._repeat3)
                self._repeat.mul_(self._repeat)
                self._repeat.mul_(self._repeat3)
                self._repeat2.resize_as_(self._expand4).copy_(self._expand4)
                self._repeat.mul_(self._repeat2)
            else:
                self._repeat.div_(self._expand3)
                self._repeat.mul_(self._repeat)
                self._repeat.mul_(self._expand3)
                self._repeat.mul_(self._expand4)

            torch.sum(self._repeat, 0, out=self._sum)
            self._sum.resize_(inputSize, outputSize)
            self.gradDiagCov.add_(scale, self._sum)
        else:
            raise RuntimeError("1D or 2D input expected")
예제 #5
0
파일: Concat.py 프로젝트: Jsmilemsj/pytorch
    def __tostring__(self):
        tab = '  '
        line = '\n'
        next = '  |`-> '
        ext = '  |    '
        extlast = '       '
        last = '   +. -> '
        res = torch.type(self)
        res += ' {' + line + tab + 'input'
        for i in range(len(self.modules)):
            if i == len(self.modules) - 1:
                res += line + tab + next + '(' + i + '): ' + str(self.modules[i]).replace(line, line + tab + extlast)
            else:
                res += line + tab + next + '(' + i + '): ' + str(self.modules[i]).replace(line, line + tab + ext)

        res += line + tab + last + 'output'
        res += line + '}'
        return res
예제 #6
0
    def __tostring__(self):
        tab = '  '
        line = '\n'
        next = '  |`-> '
        ext = '  |    '
        extlast = '       '
        last = '   +. -> '
        res = torch.type(self)
        res += ' {' + line + tab + 'input'
        for i in range(len(self.modules)):
            if i == len(self.modules) - 1:
                res += line + tab + next + '(' + i + '): ' + str(
                    self.modules[i]).replace(line, line + tab + extlast)
            else:
                res += line + tab + next + '(' + i + '): ' + str(
                    self.modules[i]).replace(line, line + tab + ext)

        res += line + tab + last + 'output'
        res += line + '}'
        return res
예제 #7
0
def binary(config, gan, net):
    net = torch.gt(net, 0)
    net = torch.type(net, torch.Float)
    return net
예제 #8
0
파일: init.py 프로젝트: Anandkamath122/GUNN
 def sharingKey(m):
    key = torch.type(m)
    if m.__shareGradInputKey:
예제 #9
0
파일: init.py 프로젝트: Anandkamath122/GUNN
   model = require('models/' .. opt.netType)(opt)
   if checkpoint:
      modelPath = paths.concat(opt.resume, checkpoint.modelFile)
      assert(paths.filep(modelPath), 'Saved model not found: ' .. modelPath)
      print('=> Resuming model from ' .. modelPath)
      model0 = torch.load(modelPath):type(opt.tensorType)
      M.copyModel(model, model0)
   elif opt.retrain ~= 'none':
      assert(paths.filep(opt.retrain), 'File not found: ' .. opt.retrain)
      print('Loading model from file: ' .. opt.retrain)
      model0 = torch.load(opt.retrain).type(opt.tensorType)
      M.copyModel(model, model0)
   

  
   if torch.type(model) == 'nn.DataParallelTable':
      model = model.get(1)
   

  
   if opt.optnet or opt.optMemory == 1: 
      optnet = require 'optnet'
      imsize = opt.dataset == 'imagenet' and 224 or 32
      sampleInput = torch.zeros(4,3,imsize,imsize):type(opt.tensorType)
      optnet.optimizeMemory(model, sampleInput, {inplace = false, mode = 'training'})
   

   if opt.shareGradInput or opt.optMemory >= 2:
      M.shareGradInput(model, opt)
      M.sharePrevOutput(model, opt)
   
예제 #10
0
    def updateGradInput(self, input, gradOutput):
        if self.gradInput is None:
            return

        if self._div is None:
            self._div = input.new()
        if self._output is None:
            self._output = self.output.new()
        if self._expand4 is None:
            self._expand4 = input.new()
        if self._gradOutput is None:
            self._gradOutput = input.new()

        if not self.fastBackward:
            self.updateOutput(input)

        inputSize, outputSize = self.weight.size(0), self.weight.size(1)

        """
        dy_j   -2 * c_j * c_j * (w_j - x)   c_j * c_j * (x - w_j)
        ---- = -------------------------- = ---------------------
         dx     2 || c_j * (w_j - x) ||              y_j
        """

        # to prevent div by zero (NaN) bugs
        self._output.resize_as_(self.output).copy_(self.output).add_(1e-7)
        self._view(self._gradOutput, gradOutput, gradOutput.size())
        torch.div(gradOutput, self._output, out=self._div)
        if input.dim() == 1:
            self._div.resize_(1, outputSize)
            self._expand4 = self._div.expand_as(self.weight)

            if torch.type(input) == 'torch.cuda.FloatTensor':
                self._repeat2.resize_as_(self._expand4).copy_(self._expand4)
                self._repeat2.mul_(self._repeat)
            else:
                self._repeat2.mul_(self._repeat, self._expand4)

            self._repeat2.mul_(self.diagCov)
            torch.sum(self._repeat2, 1, True, out=self.gradInput)
            self.gradInput.resize_as_(input)
        elif input.dim() == 2:
            batchSize = input.size(0)

            self._div.resize_(batchSize, 1, outputSize)
            self._expand4 = self._div.expand(batchSize, inputSize, outputSize)

            if input.type() == 'torch.cuda.FloatTensor':
                self._repeat2.resize_as_(self._expand4).copy_(self._expand4)
                self._repeat2.mul_(self._repeat)
                self._repeat2.mul_(self._repeat3)
            else:
                torch.mul(self._repeat, self._expand4, out=self._repeat2)
                self._repeat2.mul_(self._expand3)

            torch.sum(self._repeat2, 2, True, out=self.gradInput)
            self.gradInput.resize_as_(input)
        else:
            raise RuntimeError("1D or 2D input expected")

        return self.gradInput