예제 #1
0
    def accumulate_grads(self, grads):
        """Accumulates gradients from other source.

        This method just adds given gradient arrays to gradients that this
        optimizer holds. It is typically used in data-parallel optimization,
        where gradients for different shards are computed in parallel and
        aggregated by this method. This method correctly treats multiple GPU
        devices.

        Args:
            grads (Iterable): Iterable of gradient arrays to be accumulated.

        .. deprecated:: v1.5
           Use the :meth:`chainer.Link.addgrads` method of the target link
           instead.

        """
        for param, g_src in zip(self.target.params(), grads):
            g_dst = param.grad
            if isinstance(g_dst, numpy.ndarray):
                g_dst += cuda.to_cpu(g_src)
                continue

            with cuda.get_device(g_dst):
                if (isinstance(g_src, cuda.ndarray)
                        and g_dst.device != g_src.device):
                    g_dst += cuda.copy(g_src, out_device=g_dst.device)
                else:
                    g_dst += cuda.to_gpu(g_src)
예제 #2
0
    def accumulate_grads(self, grads):
        """Accumulates gradients from other source.

        This method just adds given gradient arrays to gradients that this
        optimizer holds. It is typically used in data-parallel optimization,
        where gradients for different shards are computed in parallel and
        aggregated by this method. This method correctly treats multiple GPU
        devices.

        Args:
            grads (Iterable): Iterable of gradient arrays to be accumulated.

        .. deprecated:: v1.5
           Use the :meth:`chainer.Link.addgrads` method of the target link
           instead.

        """
        for param, g_src in zip(self.target.params(), grads):
            g_dst = param.grad
            if isinstance(g_dst, numpy.ndarray):
                g_dst += cuda.to_cpu(g_src)
                continue

            with cuda.get_device(g_dst):
                if (isinstance(g_src, cuda.ndarray) and
                        g_dst.device != g_src.device):
                    g_dst += cuda.copy(g_src, out_device=g_dst.device)
                else:
                    g_dst += cuda.to_gpu(g_src)
예제 #3
0
 def forward_gpu(self, x):
     self.retain_inputs(())
     self._in_device = cuda.get_device_from_array(x[0])
     if self.out_device == -1:
         return cuda.to_cpu(x[0]),
     else:
         return cuda.copy(x[0], out_device=self.out_device),
예제 #4
0
    def forward(self, xs):

        # self.retain_inputs(())

        completed = xs[0]
        original = self.x_org
        out_ary = cuda.copy(original)

        self.slices = []
        self._in_shape = original.shape
        self._in_dtype = original.dtype

        for i in range(0, self.batchsize):
            mask_ind_h, mask_ind_w = self.mask_info['index'][i]
            mask_h, mask_w = self.mask_info['size'][i]

            # マスクされた部分をスライス表記して保存しておく
            self.slices.append(
                (slice(0, 3), slice(mask_ind_h, mask_ind_h + mask_h),
                 slice(mask_ind_w, mask_ind_w + mask_w)))

            # 元画像のマスクされる領域のみを補完器の出力に置き換える
            # <--> 補完器の出力のマスクされた領域以外を元の画像で置きかえる
            out_ary[i][self.slices[i]] = completed[i][self.slices[i]]

        return out_ary,
예제 #5
0
파일: copy.py 프로젝트: MakotoSeto/chainer
 def forward_gpu(self, x):
     self.retain_inputs(())
     self._in_device = cuda.get_device_from_array(x[0])
     if self.out_device == -1:
         return cuda.to_cpu(x[0]),
     else:
         return cuda.copy(x[0], out_device=self.out_device),
예제 #6
0
    def copy_parameters_from(self, params):
        """Copies parameters from another source without reallocation.

        Args:
            params (Iterable): Iterable of parameter arrays.

        """
        for dst, src in zip(self.parameters, params):
            if isinstance(dst, numpy.ndarray):
                if isinstance(src, numpy.ndarray):
                    numpy.copyto(dst, src)
                else:
                    dst[:] = src.get()
            elif isinstance(src, numpy.ndarray):
                dst.set(src)
            else:
                cuda.copy(src, out=dst)
예제 #7
0
    def copy_parameters_from(self, params):
        """Copies parameters from another source without reallocation.

        Args:
            params (Iterable): Iterable of parameter arrays.

        """
        for dst, src in zip(self.parameters, params):
            if isinstance(dst, numpy.ndarray):
                if isinstance(src, numpy.ndarray):
                    numpy.copyto(dst, src)
                else:
                    dst[:] = src.get()
            elif isinstance(src, numpy.ndarray):
                dst.set(src)
            else:
                cuda.copy(src, out=dst)
예제 #8
0
    def to_gpu(self, device=None):
        """Migrates the function to GPU and returns self.

        The default implementation moves all fields of type
        :class:`~numpy.ndarray` onto GPU.

        Args:
            device (int or :class:`pycuda.driver.Device` or ``None``): Device
                ID of GPU that the function will be migrated on. If this is
                ``None``, the current device is used.

        Returns:
            self.

        """
        with cuda.using_device(device):
            for k, v in six.iteritems(self.__dict__):
                if isinstance(v, numpy.ndarray):
                    setattr(self, k, cuda.to_gpu(v))
                elif (isinstance(v, cuda.GPUArray) and
                      v.gpudata.device != device):
                    setattr(self, k, cuda.copy(v, out_device=device))
        return self
예제 #9
0
    def to_gpu(self, device=None):
        """Migrates the function to GPU and returns self.

        The default implementation moves all fields of type
        :class:`~numpy.ndarray` onto GPU.

        Args:
            device (int or :class:`pycuda.driver.Device` or ``None``): Device
                ID of GPU that the function will be migrated on. If this is
                ``None``, the current device is used.

        Returns:
            self.

        """
        with cuda.using_device(device):
            for k, v in six.iteritems(self.__dict__):
                if isinstance(v, numpy.ndarray):
                    setattr(self, k, cuda.to_gpu(v))
                elif (isinstance(v, cuda.GPUArray)
                      and v.gpudata.device != device):
                    setattr(self, k, cuda.copy(v, out_device=device))
        return self
예제 #10
0
    def accumulate_grads(self, grads):
        """Accumulates gradients from other source.

        This method just adds given gradient arrays to gradients that this
        optimizer holds. It is typically used in data-parallel optimization,
        where gradients for different shards are computed in parallel and
        aggregated by this method. This method correctly treats multiple GPU
        devices.

        Args:
            grads (Iterable): Iterable of gradient arrays to be accumulated.

        """
        for (_, g_dst, _), g_src in zip(self.tuples, grads):
            if isinstance(g_dst, numpy.ndarray):
                g_dst += cuda.to_cpu(g_src)
                continue

            with cuda.get_device(g_dst):
                if (isinstance(g_src, cuda.ndarray)
                        and g_dst.gpudata.device != g_src.gpudata.device):
                    g_dst += cuda.copy(g_src, out_device=g_dst.gpudata.device)
                else:
                    g_dst += cuda.to_gpu(g_src)
예제 #11
0
파일: optimizer.py 프로젝트: kuwa32/chainer
    def accumulate_grads(self, grads):
        """Accumulates gradients from other source.

        This method just adds given gradient arrays to gradients that this
        optimizer holds. It is typically used in data-parallel optimization,
        where gradients for different shards are computed in parallel and
        aggregated by this method. This method correctly treats multiple GPU
        devices.

        Args:
            grads (Iterable): Iterable of gradient arrays to be accumulated.

        """
        for (_, g_dst, _), g_src in zip(self.tuples, grads):
            if isinstance(g_dst, numpy.ndarray):
                g_dst += cuda.to_cpu(g_src)
                continue

            with cuda.using_device(g_dst):
                if (isinstance(g_src, cuda.GPUArray) and
                        g_dst.gpudata.device != g_src.gpudata.device):
                    g_dst += cuda.copy(g_src, out_device=g_src.gpudata.device)
                else:
                    g_dst += cuda.to_gpu(g_src)
예제 #12
0
파일: copy.py 프로젝트: Accent-/chainer
 def forward_gpu(self, x):
     return cuda.copy(x[0], out_device=self.out_device),
예제 #13
0
 def backward_gpu(self, x, gy):
     if self.out_device == -1:
         return cuda.to_gpu(gy[0], device=self._in_device),
     else:
         return cuda.copy(gy[0], out_device=self._in_device),
예제 #14
0
파일: copy.py 프로젝트: nihohi0428/chainer
 def backward_gpu(self, x, gy):
     return cuda.copy(gy[0], out_device=cuda.get_device(x[0])),
예제 #15
0
파일: copy.py 프로젝트: 2php/chainer
 def forward_gpu(self, x):
     if self.out_device == -1:
         return cuda.to_cpu(x[0]),
     else:
         return cuda.copy(x[0], out_device=self.out_device),
예제 #16
0
파일: copy.py 프로젝트: nihohi0428/chainer
 def forward_gpu(self, x):
     return cuda.copy(x[0], out_device=self.out_device),
예제 #17
0
 def backward_gpu(self, x, gy):
     if self.out_device == -1:
         return cuda.to_gpu(gy[0], device=cuda.get_device(x[0])),
     else:
         return cuda.copy(gy[0], out_device=cuda.get_device(x[0])),
예제 #18
0
 def forward_gpu(self, x):
     if self.out_device == -1:
         return cuda.to_cpu(x[0]),
     else:
         return cuda.copy(x[0], out_device=self.out_device),
예제 #19
0
파일: copy.py 프로젝트: Accent-/chainer
 def backward_gpu(self, x, gy):
     return cuda.copy(gy[0], out_device=cuda.get_device(x[0])),
예제 #20
0
파일: copy.py 프로젝트: MakotoSeto/chainer
 def backward_gpu(self, x, gy):
     if self.out_device == -1:
         return cuda.to_gpu(gy[0], device=self._in_device),
     else:
         return cuda.copy(gy[0], out_device=self._in_device),
예제 #21
0
파일: copy.py 프로젝트: 2php/chainer
 def backward_gpu(self, x, gy):
     if self.out_device == -1:
         return cuda.to_gpu(gy[0], device=cuda.get_device(x[0])),
     else:
         return cuda.copy(gy[0], out_device=cuda.get_device(x[0])),