def make_node(self, images, top_down):
        """
        .. todo::

            WRITEME
        """
        images = as_cuda_ndarray_variable(images)
        top_down = as_cuda_ndarray_variable(top_down)

        assert images.ndim == 4
        assert top_down.ndim == 4

        channels_broadcastable = images.type.broadcastable[0]
        batch_broadcastable = images.type.broadcastable[3]

        rows_broadcastable = False
        cols_broadcastable = False

        houtput_broadcastable = (channels_broadcastable, rows_broadcastable,
                                 cols_broadcastable, batch_broadcastable)
        houtput_type = CudaNdarrayType(broadcastable=houtput_broadcastable)
        houtput = houtput_type()

        poutput_broadcastable = (channels_broadcastable, rows_broadcastable,
                                 cols_broadcastable, batch_broadcastable)
        poutput_type = CudaNdarrayType(broadcastable=poutput_broadcastable)
        poutput = poutput_type()

        return Apply(self, [images, top_down], [houtput, poutput])
Exemplo n.º 2
0
    def grad(self, inputs, g_outputs):
        """
        .. todo::

            WRITEME
        """
        hid_acts, filters, output_shape = inputs
        g_images, = g_outputs
        g_images = as_cuda_ndarray_variable(g_images)
        assert not isinstance(g_images, list)

        global FilterActs
        global WeightActs
        if FilterActs is None:
            from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs
            from pylearn2.sandbox.cuda_convnet.weight_acts import WeightActs

        g_filters = WeightActs(stride=self.stride,
                               partial_sum=self.partial_sum,
                               pad=self.pad)(g_images, hid_acts,
                                             filters.shape[1:3])[0]
        assert not isinstance(g_filters, list)
        g_hid_acts = FilterActs(stride=self.stride,
                                pad=self.pad,
                                partial_sum=self.partial_sum)(g_images,
                                                              filters)

        return [g_hid_acts, g_filters, DisconnectedType()()]
Exemplo n.º 3
0
    def make_node(self, images, maxout, gz):
        """
        .. todo::

            WRITEME
        """
        images = as_cuda_ndarray_variable(images)
        maxout = as_cuda_ndarray_variable(maxout)
        gz = as_cuda_ndarray_variable(gz)

        assert images.ndim == 4
        assert maxout.ndim == 4
        assert gz.ndim == 4
        try:
            # Note : `get_scalar_constant_value` returns a ndarray not a
            # int
            nb_channel = int(get_scalar_constant_value(images.shape[0]))
            assert nb_channel % 16 == 0
        except NotScalarConstantError:
            pass
        return Apply(self, [images, maxout, gz], [images.type()])
Exemplo n.º 4
0
    def make_node(self, images):
        """
        .. todo::

            WRITEME
        """
        images = as_cuda_ndarray_variable(images)

        assert images.ndim == 4

        channels_broadcastable = images.type.broadcastable[0]
        batch_broadcastable = images.type.broadcastable[3]

        rows_broadcastable = False
        cols_broadcastable = False

        targets_broadcastable = (channels_broadcastable, rows_broadcastable,
                                 cols_broadcastable, batch_broadcastable)
        targets_type = GpuArrayType(broadcastable=targets_broadcastable)
        targets = targets_type()
        seed = self.seed_state
        seed = as_cuda_ndarray_variable(seed)
        return Apply(self, [images, seed], [targets])
    def make_node(self, p, h, gp, gh, gp_iszero, gh_iszero):
        """
        .. todo::

            WRITEME
        """
        p = as_cuda_ndarray_variable(p)
        h = as_cuda_ndarray_variable(h)
        gp = as_cuda_ndarray_variable(gp)
        gh = as_cuda_ndarray_variable(gh)

        assert p.ndim == 4
        assert h.ndim == 4
        assert gp.ndim == 4
        assert gh.ndim == 4
        try:
            nb_channel = int(get_scalar_constant_value(h.shape[0]))
            assert nb_channel % 16 == 0
        except NotScalarConstantError:
            pass

        return Apply(self, [p, h, gp, gh, gp_iszero, gh_iszero],
                     [p.type(), h.type()])
    def grad(self, inp, grads):
        """
        .. todo::

            WRITEME
        """
        x, top_down = inp
        p, h = self(x, top_down)
        gp, gh = grads
        gp_iszero = 0.
        gh_iszero = 0.
        if isinstance(gp.type, theano.gradient.DisconnectedType):
            gp = tensor.zeros_like(p)
            gp_iszero = 1.
        if isinstance(gh.type, theano.gradient.DisconnectedType):
            gh = tensor.zeros_like(h)
            gh_iszero = 1.
        gp = gpu_contiguous(gp)
        gh = gpu_contiguous(gh)
        gp_iszero = as_cuda_ndarray_variable(gp_iszero)
        gh_iszero = as_cuda_ndarray_variable(gh_iszero)
        return ProbMaxPoolGrad(self.ds, self.stride,
                               self.start)(p, h, gp, gh, gp_iszero, gh_iszero)
Exemplo n.º 7
0
    def make_node(self, images, evals):
        """
        .. todo::

            WRITEME
        """
        images = as_cuda_ndarray_variable(images)
        evals = as_cuda_ndarray_variable(evals)

        assert images.ndim == 4
        assert evals.ndim == 4

        channels_broadcastable = images.type.broadcastable[0]
        batch_broadcastable = images.type.broadcastable[3]

        rows_broadcastable = False
        cols_broadcastable = False

        targets_broadcastable = (channels_broadcastable, rows_broadcastable,
                                 cols_broadcastable, batch_broadcastable)
        targets_type = CudaNdarrayType(broadcastable=targets_broadcastable)
        targets = targets_type()

        return Apply(self, [images, evals], [targets])
Exemplo n.º 8
0
    def grad(self, inputs, dout):
        """
        .. todo::

            WRITEME
        """
        images, = inputs
        acts, denoms = self(images)
        dout, _ = dout  # Ignore the gradient on "denoms"
        dout = as_cuda_ndarray_variable(dout)

        # dout must be contiguous, but it isn't always so, depending
        # of what is done on output of this node.
        dout = gpu_contiguous(dout)
        grad_op = CrossMapNormUndo(self._size_f, self._add_scale,
                                   self._pow_scale, self._blocked,
                                   inplace=False)
        return [grad_op(images, acts, denoms, dout)[0]]