Esempio n. 1
0
def local_conv3d_cpu(node):
    if not isinstance(node.op, AbstractConv3d):
        return None

    img, kern = node.inputs
    if ((not isinstance(img.type, TensorType) or
         not isinstance(kern.type, TensorType))):
        return None
    if node.op.border_mode not in ['valid', (0, 0, 0)]:
        return None
    if node.op.filter_dilation != (1, 1, 1):
        return None

    bias = theano.tensor.zeros_like(kern[:, 0, 0, 0, 0])

    # need to flip the kernel if necessary (conv3D does not flip)
    if node.op.filter_flip:
        kern = kern[:, :, ::-1, ::-1, ::-1]

    # conv3D expects shape (batch, row, column, time, channel)
    img = img.dimshuffle(0, 2, 3, 4, 1)
    kern = kern.dimshuffle(0, 2, 3, 4, 1)

    rval = conv3D(img, kern, bias, node.op.subsample)
    copy_stack_trace(node.outputs[0], rval)
    rval = rval.dimshuffle(0, 4, 1, 2, 3)

    return [rval]
Esempio n. 2
0
 def apply(self, dataset, can_fit=False):
     X = dataset.get_topological_view()
     d = len(X.shape) - 2
     assert d in [2, 3]
     assert X.dtype == 'float32' or X.dtype == 'float64'
     if d == 2:
         X = X.reshape([X.shape[0], X.shape[1], X.shape[2], 1, X.shape[3]])
     kernel_size = 1
     kernel_shape = [X.shape[-1]]
     for factor in self.sampling_factor:
         kernel_size *= factor
         kernel_shape.append(factor)
     if d == 2:
         kernel_shape.append(1)
     kernel_shape.append(X.shape[-1])
     kernel_value = 1. / float(kernel_size)
     kernel = np.zeros(kernel_shape, dtype=X.dtype)
     for i in xrange(X.shape[-1]):
         kernel[i, :, :, :, i] = kernel_value
     from theano.tensor.nnet.Conv3D import conv3D
     X_var = T.TensorType(broadcastable=[s == 1 for s in X.shape],
                          dtype=X.dtype)()
     downsampled = conv3D(X_var, kernel, np.zeros(X.shape[-1], X.dtype),
                          kernel_shape[1:-1])
     f = function([X_var], downsampled)
     X = f(X)
     if d == 2:
         X = X.reshape([X.shape[0], X.shape[1], X.shape[2], X.shape[4]])
     dataset.set_topological_view(X)
Esempio n. 3
0
def local_conv3d_cpu(node):
    if not isinstance(node.op, AbstractConv3d):
        return None

    img, kern = node.inputs
    if ((not isinstance(img.type, TensorType)
         or not isinstance(kern.type, TensorType))):
        return None
    if node.op.border_mode not in ['valid', (0, 0, 0)]:
        return None
    if node.op.filter_dilation != (1, 1, 1):
        return None

    bias = theano.tensor.zeros_like(kern[:, 0, 0, 0, 0])

    # need to flip the kernel if necessary (conv3D does not flip)
    if node.op.filter_flip:
        kern = kern[:, :, ::-1, ::-1, ::-1]

    # conv3D expects shape (batch, row, column, time, channel)
    img = img.dimshuffle(0, 2, 3, 4, 1)
    kern = kern.dimshuffle(0, 2, 3, 4, 1)

    rval = conv3D(img, kern, bias, node.op.subsample)
    copy_stack_trace(node.outputs[0], rval)
    rval = rval.dimshuffle(0, 4, 1, 2, 3)

    return [rval]
Esempio n. 4
0
 def apply(self, dataset, can_fit=False):
     X = dataset.get_topological_view()
     d = len(X.shape) - 2
     assert d in [2, 3]
     assert X.dtype == 'float32' or X.dtype == 'float64'
     if d == 2:
         X = X.reshape([X.shape[0], X.shape[1], X.shape[2], 1, X.shape[3]])
     kernel_size = 1
     kernel_shape = [X.shape[-1]]
     for factor in self.sampling_factor:
         kernel_size *= factor
         kernel_shape.append(factor)
     if d == 2:
         kernel_shape.append(1)
     kernel_shape.append(X.shape[-1])
     kernel_value = 1. / float(kernel_size)
     kernel = np.zeros(kernel_shape, dtype=X.dtype)
     for i in xrange(X.shape[-1]):
         kernel[i, :, :, :, i] = kernel_value
     from theano.tensor.nnet.Conv3D import conv3D
     X_var = T.TensorType(broadcastable=[s == 1 for s in X.shape],
                          dtype=X.dtype)()
     downsampled = conv3D(X_var, kernel, np.zeros(X.shape[-1], X.dtype),
                          kernel_shape[1:-1])
     f = function([X_var], downsampled)
     X = f(X)
     if d == 2:
         X = X.reshape([X.shape[0], X.shape[1], X.shape[2], X.shape[4]])
     dataset.set_topological_view(X)
Esempio n. 5
0
    def apply(self, graph):
        in_vw = graph.read_key(key="input")
        num_filters = graph.read_key(key="num_filters")
        filter_size = graph.read_key(key="filter_size")
        stride = graph.read_key_with_default(key="stride", default=(1, 1, 1))
        pad = graph.read_key_with_default(key="pad", default="valid")
        include_bias = graph.read_key_with_default(key="include_bias",
                                                   default=False)
        assert len(filter_size) == 3
        assert pad == "valid"

        # create weight
        num_channels = in_vw.shape[1]
        filter_shape = (num_filters, num_channels) + tuple(filter_size)
        W = th_utils.read_key_with_state_default(
            graph=graph,
            key="weight",
            tags={"weight": True,
                  "linear_weight": True,
                  "in_axes": (1,),
                  "out_axes": (0,),
                  "shape": filter_shape,
                  "dtype": fX},
            state_tags={"parameter": True,
                        "state": True}
        ).var
        # create bias
        if include_bias:
            b = th_utils.read_key_with_state_default(
                graph=graph,
                key="bias",
                tags={"bias": True,
                      "shape": (num_filters,),
                      "dtype": fX},
                state_tags={"parameter": True,
                            "state": True}
            ).var
        else:
            b = T.zeros(num_filters)

        from theano.tensor.nnet.Conv3D import conv3D
        # conv3D takes V in order: (batch, row, column, time, in channel)
        # and W in order: (out channel, row, column, time ,in channel)
        # but we keep the dimensions that W is stored in consistent with other
        # convolutions, so we have to dimshuffle here
        out_var = conv3D(V=in_vw.variable.dimshuffle(0, 2, 3, 4, 1),
                         W=W.dimshuffle(0, 2, 3, 4, 1),
                         b=b,
                         d=stride)

        out_shape = conv_output_shape(input_shape=in_vw.shape,
                                      num_filters=num_filters,
                                      axes=(2, 3, 4),
                                      conv_shape=filter_size,
                                      strides=stride,
                                      pads=(0, 0, 0))

        out_vw = VariableWrapper(out_var, out_shape)
        graph.write_key(key="output", value=out_vw)
Esempio n. 6
0
    def compute_output(self, network, in_vw):
        # gather hyperparameters
        num_filters = network.find_hyperparameter(["num_filters"])
        filter_size = network.find_hyperparameter(["filter_size"])
        stride = network.find_hyperparameter(["conv_stride", "stride"],
                                             (1, 1, 1))
        pad = network.find_hyperparameter(["conv_pad", "pad"], "valid")
        inits = list(toolz.concat(network.find_hyperparameters(
            ["inits"],
            [])))
        include_bias = network.find_hyperparameter(["include_bias"], False)
        assert len(filter_size) == 3
        assert pad == "valid"

        # create weight
        num_channels = in_vw.shape[1]
        filter_shape = (num_filters, num_channels) + tuple(filter_size)
        W = network.create_vw(
            name="weight",
            is_shared=True,
            shape=filter_shape,
            tags={"parameter", "weight"},
            inits=inits,
        ).variable
        # create bias
        if include_bias:
            b = network.create_vw(
                name="bias",
                is_shared=True,
                shape=(num_filters,),
                tags={"parameter", "bias"},
                inits=inits,
            ).variable
        else:
            b = T.zeros(num_filters)

        from theano.tensor.nnet.Conv3D import conv3D
        # conv3D takes V in order: (batch, row, column, time, in channel)
        # and W in order: (out channel, row, column, time ,in channel)
        # but we keep the dimensions that W is stored in consistent with other
        # convolutions, so we have to dimshuffle here
        out_var = conv3D(V=in_vw.variable.dimshuffle(0, 2, 3, 4, 1),
                         W=W.dimshuffle(0, 2, 3, 4, 1),
                         b=b,
                         d=stride)

        out_shape = conv_output_shape(input_shape=in_vw.shape,
                                      num_filters=num_filters,
                                      axes=(2, 3, 4),
                                      conv_shape=filter_size,
                                      strides=stride,
                                      pads=(0, 0, 0))

        network.create_vw(
            "default",
            variable=out_var,
            shape=out_shape,
            tags={"output"},
        )
Esempio n. 7
0
    def output(self, input, is_train):
        input = super(ConvLayer3d, self).output(input, is_train)

        u = conv3D(input.dimshuffle(0, 2, 3, 4, 1), self.filters,
                   self.b, d=self.s).dimshuffle(0, 4, 1, 2, 3)

        # TODO バイアスの挿入位置がここでほんとうに正しいのかテスト
        return self._activate(u, is_train)
    def _forward(self):
        inpt = self.inpt

        self.weights = self.declare(
            (self.n_output, self.filter_depth, self.n_inpt, self.filter_height,
             self.filter_width))
        self.bias = self.declare((self.n_output, ))

        if self.border_mode == 'same':
            pad_dim1 = self.filter_height - 1
            pad_dim2 = self.filter_width - 1
            pad_dim3 = self.filter_depth - 1

            if pad_dim1 > 0 or pad_dim2 > 0 or pad_dim3 > 0:
                output_shape = (inpt.shape[0], inpt.shape[1] + pad_dim3,
                                inpt.shape[2], inpt.shape[3] + pad_dim1,
                                inpt.shape[4] + pad_dim2)
                big_zero = T.zeros(output_shape)
                indices = (slice(None),
                           slice(pad_dim3 // 2,
                                 inpt.shape[1] + pad_dim3 // 2), slice(None),
                           slice(pad_dim1 // 2, inpt.shape[3] + pad_dim1 // 2),
                           slice(pad_dim2 // 2, inpt.shape[4] + pad_dim2 // 2))

                inpt = T.set_subtensor(big_zero[indices], inpt)

        #print '@basic.py implementation: ', self.implementation

        if self.implementation == 'conv3d2d':
            self.output_in = conv3d(signals=inpt, filters=self.weights)
            if self.use_bias:
                self.output_in = self.output_in + self.bias.dimshuffle(
                    'x', 'x', 0, 'x', 'x')
        elif self.implementation == 'conv3D':
            filters_flip = self.weights[:, ::-1, :, ::-1, ::-1]
            bias = self.bias if self.use_bias else T.zeros(self.bias.shape)
            self.output_in = conv3D(V=inpt.dimshuffle(0, 3, 4, 1, 2),
                                    W=filters_flip.dimshuffle(0, 3, 4, 1, 2),
                                    b=bias,
                                    d=(1, 1, 1))
            self.output_in = self.output_in.dimshuffle(0, 3, 4, 1, 2)
        elif self.implementation == 'dnn_conv3d':
            self.output_in = theano.sandbox.cuda.dnn.dnn_conv3d(
                img=inpt.dimshuffle(0, 2, 1, 3, 4),
                kerns=self.weights.dimshuffle(0, 2, 1, 3, 4))
            self.output_in = self.output_in.dimshuffle(0, 2, 1, 3, 4)
            if self.use_bias:
                self.output_in = self.output_in + self.bias.dimshuffle(
                    'x', 'x', 0, 'x', 'x')
        else:
            raise NotImplementedError(
                'This class only supports conv3d2d, conv3D and dnn_conv3d')

        self.output = self.output_in

        if self.strides != (1, 1, 1):
            self.output = self.output[:, ::self.strides[2], :, ::self.
                                      strides[0], ::self.strides[1]]
Esempio n. 9
0
    def compute_output(self, network, in_vw):
        # gather hyperparameters
        num_filters = network.find_hyperparameter(["num_filters"])
        filter_size = network.find_hyperparameter(["filter_size"])
        stride = network.find_hyperparameter(["conv_stride", "stride"],
                                             (1, 1, 1))
        pad = network.find_hyperparameter(["conv_pad", "pad"], "valid")
        include_bias = network.find_hyperparameter(["include_bias"], False)
        assert len(filter_size) == 3
        assert pad == "valid"

        # create weight
        num_channels = in_vw.shape[1]
        filter_shape = (num_filters, num_channels) + tuple(filter_size)
        W = network.create_vw(
            name="weight",
            is_shared=True,
            shape=filter_shape,
            tags={"parameter", "weight"},
            default_inits=[],
        ).variable
        # create bias
        if include_bias:
            b = network.create_vw(
                name="bias",
                is_shared=True,
                shape=(num_filters, ),
                tags={"parameter", "bias"},
                default_inits=[],
            ).variable
        else:
            b = T.zeros(num_filters)

        from theano.tensor.nnet.Conv3D import conv3D
        # conv3D takes V in order: (batch, row, column, time, in channel)
        # and W in order: (out channel, row, column, time ,in channel)
        # but we keep the dimensions that W is stored in consistent with other
        # convolutions, so we have to dimshuffle here
        out_var = conv3D(V=in_vw.variable.dimshuffle(0, 2, 3, 4, 1),
                         W=W.dimshuffle(0, 2, 3, 4, 1),
                         b=b,
                         d=stride)

        out_shape = conv_output_shape(input_shape=in_vw.shape,
                                      num_filters=num_filters,
                                      axes=(2, 3, 4),
                                      conv_shape=filter_size,
                                      strides=stride,
                                      pads=(0, 0, 0))

        network.create_vw(
            "default",
            variable=out_var,
            shape=out_shape,
            tags={"output"},
        )
Esempio n. 10
0
    def output(self, input, is_train):
        input = super(ConvLayer3d, self).output(input, is_train)

        u = conv3D(input.dimshuffle(0, 2, 3, 4, 1),
                   self.filters,
                   self.b,
                   d=self.s).dimshuffle(0, 4, 1, 2, 3)

        # TODO バイアスの挿入位置がここでほんとうに正しいのかテスト
        return self._activate(u, is_train)
Esempio n. 11
0
    def test_undefined_grad_grad(self):
        # tests that undefined grads are caught in the grad method

        V = theano.tensor.TensorType(dtype=config.floatX, broadcastable=(False, False, False, False, False))()
        W = theano.tensor.TensorType(dtype=config.floatX, broadcastable=(False, False, False, False, False))()
        b = theano.tensor.vector()
        d = theano.tensor.ivector()

        Z = conv3D(V, W, b, d)

        self.assertRaises(TypeError, theano.gradient.grad, Z.sum(), d)
Esempio n. 12
0
    def grad(self, inputs, output_gradients):
        C, d, WShape, B = inputs
        dLdA, = output_gradients

        z = T.zeros_like(C[0, 0, 0, 0, :])
        dLdC = convTransp3D(dLdA, z, d, B, C.shape[1:4])
        # d actually does affect the outputs, so it's not disconnected
        dLdd = grad_undefined(self, 1, d)
        # The shape of the weights doesn't affect the output elements
        dLdWShape = DisconnectedType()()
        dLdB = conv3D(C, dLdA, T.zeros_like(B[0, 0, 0, 0, :]), d)

        return [dLdC, dLdd, dLdWShape, dLdB]
Esempio n. 13
0
    def test_undefined_grad_grad(self):
        # tests that undefined grads are caught in the grad method

        V = theano.tensor.TensorType(dtype=config.floatX,
                                     broadcastable=(False, False, False, False, False))()
        W = theano.tensor.TensorType(dtype=config.floatX,
                                     broadcastable=(False, False, False, False, False))()
        b = theano.tensor.vector()
        d = theano.tensor.ivector()

        Z = conv3D(V, W, b, d)

        self.assertRaises(TypeError, theano.gradient.grad, Z.sum(), d)
Esempio n. 14
0
    def grad(self, inputs, output_gradients):
        C, d, WShape, B = inputs
        dLdA, = output_gradients

        z = T.zeros_like(C[0, 0, 0, 0, :])
        dLdC = convTransp3D(dLdA, z, d, B, C.shape[1:4])
        # d actually does affect the outputs, so it's not disconnected
        dLdd = grad_undefined(self, 1, d)
        # The shape of the weights doesn't affect the output elements
        dLdWShape = DisconnectedType()()
        dLdB = conv3D(C, dLdA, T.zeros_like(B[0, 0, 0, 0, :]), d)

        return [dLdC, dLdd, dLdWShape, dLdB]
Esempio n. 15
0
def test_undefined_grad_grad():
    #tests that undefined grads are caught in the grad method

    V = theano.tensor.TensorType(dtype=config.floatX,
            broadcastable=(False, False, False, False, False))()
    W = theano.tensor.TensorType(dtype=config.floatX,
            broadcastable=(False, False, False, False, False))()
    b = theano.tensor.vector()
    d = theano.tensor.ivector()

    Z = conv3D(V, W, b, d)

    try:
        g = theano.gradient.grad(Z.sum(), d)
        assert False
    except TypeError:
        pass
Esempio n. 16
0
    def setUp(self):
        super(TestConv3D, self).setUp()
        utt.seed_rng()
        self.rng = N.random.RandomState(utt.fetch_seed())

        mode = copy.copy(theano.compile.mode.get_default_mode())
        mode.check_py_code = False

        self.W = shared(N.ndarray(shape=(1, 1, 1, 1, 1), dtype=floatX))
        self.b = shared(N.zeros(1, dtype=floatX))
        self.rb = shared(N.zeros(1, dtype=floatX))
        self.V = shared(N.ndarray(shape=(1, 1, 1, 1, 1), dtype=floatX))
        self.d = shared(N.ndarray(shape=(3, ), dtype=int))

        self.H = conv3D(self.V, self.W, self.b, self.d)
        self.H_func = function([], self.H, mode=mode)
        self.H_shape_func = function([], self.H.shape, mode=mode)

        self.RShape = T.vector(dtype='int64')

        self.otherH = T.TensorType(floatX,
                        (False, False, False, False, False))(name='otherH')
        self.transp = convTransp3D(self.W, self.rb, self.d,
                                   self.otherH, self.RShape)
        self.transp_func = function([self.otherH, self.RShape],
                                    self.transp, mode=mode)

        self.R = convTransp3D(self.W, self.rb, self.d, self.H, self.RShape)
        self.R_func = function([self.RShape], self.R, mode=mode)
        self.R_shape_func = function([self.RShape], self.R.shape)

        self.reconsObj = T.sum(T.sqr(self.V - self.R))
        self.reconsObjFunc = function([self.RShape], self.reconsObj, mode=mode)

        self.gradientsFunc = function([self.RShape],
                        [T.grad(self.reconsObj, self.W), T.grad(self.reconsObj,
                        self.H), T.grad(self.reconsObj, self.V),
                         T.grad(self.reconsObj, self.b)], mode=mode)

        self.check_c_against_python = function([self.RShape],
                        [T.grad(self.reconsObj, self.W), T.grad(self.reconsObj,
                        self.H), T.grad(self.reconsObj, self.V),
                         T.grad(self.reconsObj, self.b)], mode='DEBUG_MODE')

        self.dCdW_shape_func = function([self.RShape],
                        T.grad(self.reconsObj, self.W).shape, mode=mode)
Esempio n. 17
0
def test_undefined_grad_grad():
    #tests that undefined grads are caught in the grad method

    V = theano.tensor.TensorType(dtype=config.floatX,
                                 broadcastable=(False, False, False, False,
                                                False))()
    W = theano.tensor.TensorType(dtype=config.floatX,
                                 broadcastable=(False, False, False, False,
                                                False))()
    b = theano.tensor.vector()
    d = theano.tensor.ivector()

    Z = conv3D(V, W, b, d)

    try:
        g = theano.gradient.grad(Z.sum(), d)
        assert False
    except TypeError:
        pass
Esempio n. 18
0
    def setUp(self):

        utt.seed_rng()

        self.rng = N.random.RandomState(utt.fetch_seed())

        mode = copy.copy(theano.compile.mode.get_default_mode())
        mode.check_py_code = False

        self.W  = shared(N.ndarray(shape=(1,1,1,1,1), dtype=floatX))
        self.b  = shared(N.zeros(1,dtype=floatX))
        self.rb = shared(N.zeros(1,dtype=floatX))
        self.V  = shared(N.ndarray(shape=(1,1,1,1,1), dtype=floatX))
        self.d  = shared(N.ndarray(shape=(3,),dtype=int))

        self.H = conv3D(self.V, self.W, self.b, self.d)

        self.H_func = function([], self.H, mode = mode)

        self.H_shape_func = function( [], self.H.shape, mode = mode)

        self.RShape = T.vector(dtype='int64')

        self.otherH = T.TensorType(floatX,(False,False,False,False,False))(name='otherH')
        self.transp = convTransp3D(self.W, self.rb, self.d, self.otherH, self.RShape)
        self.transp_func = function([self.otherH,self.RShape],self.transp, mode=mode)

        self.R = convTransp3D(self.W, self.rb, self.d, self.H, self.RShape)
        self.R_func = function([self.RShape], self.R, mode = mode)
        self.R_shape_func = function([self.RShape], self.R.shape)

        self.reconsObj = T.sum(T.sqr(self.V-self.R))
        self.reconsObjFunc = function([self.RShape], self.reconsObj, mode=mode)

        self.gradientsFunc = function([self.RShape], [ T.grad(self.reconsObj, self.W), T.grad(self.reconsObj, self.H), T.grad(self.reconsObj, self.V), T.grad(self.reconsObj,self.b) ] , mode=mode)
        self.check_c_against_python = function([self.RShape], [ T.grad(self.reconsObj, self.W), T.grad(self.reconsObj, self.H), T.grad(self.reconsObj, self.V), T.grad(self.reconsObj,self.b) ] , mode='DEBUG_MODE')


        self.dCdW_shape_func = function([self.RShape],  T.grad(self.reconsObj, self.W).shape, mode=mode)
Esempio n. 19
0
    def grad(self, inputs, output_gradients):
        W, b, d, H, RShape = inputs
        dCdR, = output_gradients
        dCdH = conv3D(dCdR, W, T.zeros_like(H[0, 0, 0, 0, :]), d)
        WShape = W.shape
        dCdW = convGrad3D(dCdR, d, WShape, H)
        dCdb = T.sum(dCdR, axis=(0, 1, 2, 3))
        # not differentiable, since d affects the output elements
        dCdd = grad_undefined(self, 2, d)
        # disconnected, since RShape just determines the output shape
        dCdRShape = DisconnectedType()()

        if 'name' in dir(dCdR) and dCdR.name is not None:
            dCdR_name = dCdR.name
        else:
            dCdR_name = 'anon_dCdR'

        if 'name' in dir(H) and H.name is not None:
            H_name = H.name
        else:
            H_name = 'anon_H'

        if 'name' in dir(W) and W.name is not None:
            W_name = W.name
        else:
            W_name = 'anon_W'

        if 'name' in dir(b) and b.name is not None:
            b_name = b.name
        else:
            b_name = 'anon_b'


        dCdW.name = 'ConvTransp3D_dCdW.H='+H_name+',dCdR='+dCdR_name+',W='+W_name
        dCdb.name = 'ConvTransp3D_dCdb.H='+H_name+',dCdR='+dCdR_name+',W='+W_name+',b='+b_name
        dCdH.name = 'ConvTransp3D_dCdH.H=' + H_name + ',dCdR=' + dCdR_name

        return [dCdW,  dCdb, dCdd, dCdH, dCdRShape]
Esempio n. 20
0
    def grad(self, inputs, output_gradients):
        W, b, d, H, RShape = inputs
        dCdR, = output_gradients
        dCdH = conv3D(dCdR, W, T.zeros_like(H[0, 0, 0, 0, :]), d)
        WShape = W.shape
        dCdW = convGrad3D(dCdR, d, WShape, H)
        dCdb = T.sum(dCdR, axis=(0, 1, 2, 3))
        # not differentiable, since d affects the output elements
        dCdd = grad_undefined(self, 2, d)
        # disconnected, since RShape just determines the output shape
        dCdRShape = DisconnectedType()()

        if 'name' in dir(dCdR) and dCdR.name is not None:
            dCdR_name = dCdR.name
        else:
            dCdR_name = 'anon_dCdR'

        if 'name' in dir(H) and H.name is not None:
            H_name = H.name
        else:
            H_name = 'anon_H'

        if 'name' in dir(W) and W.name is not None:
            W_name = W.name
        else:
            W_name = 'anon_W'

        if 'name' in dir(b) and b.name is not None:
            b_name = b.name
        else:
            b_name = 'anon_b'

        dCdW.name = 'ConvTransp3D_dCdW.H='+H_name+',dCdR='+dCdR_name+',W='+W_name
        dCdb.name = 'ConvTransp3D_dCdb.H='+H_name+',dCdR='+dCdR_name+',W='+W_name+',b='+b_name
        dCdH.name = 'ConvTransp3D_dCdH.H=' + H_name + ',dCdR=' + dCdR_name

        return [dCdW,  dCdb, dCdd, dCdH, dCdRShape]
Esempio n. 21
0
    def __call__(self, t):
        output = conv3D(self.V + t * self.dV, self.W + t * self.dW,
                        self.b + t * self.db, self.d)

        return output
Esempio n. 22
0
    def setUp(self):
        super(TestConv3D, self).setUp()
        utt.seed_rng()
        self.rng = N.random.RandomState(utt.fetch_seed())

        mode = copy.copy(theano.compile.mode.get_default_mode())
        mode.check_py_code = False

        self.W = shared(N.ndarray(shape=(1, 1, 1, 1, 1), dtype=floatX))
        self.W.name = 'W'
        self.b = shared(N.zeros(1, dtype=floatX))
        self.b.name = 'b'
        self.rb = shared(N.zeros(1, dtype=floatX))
        self.rb.name = 'rb'
        self.V = shared(N.ndarray(shape=(1, 1, 1, 1, 1), dtype=floatX))
        self.V.name = 'V'
        self.d = shared(N.ndarray(shape=(3, ), dtype=int))
        self.d.name = 'd'

        self.H = conv3D(self.V, self.W, self.b, self.d)
        self.H.name = 'H'
        self.H_func = function([], self.H, mode=mode)
        self.H_shape_func = function([], self.H.shape, mode=mode)

        self.RShape = T.vector(dtype='int64')
        self.RShape.name = 'RShape'

        self.otherH = T.TensorType(
            floatX, (False, False, False, False, False))(name='otherH')
        self.transp = convTransp3D(self.W, self.rb, self.d, self.otherH,
                                   self.RShape)
        self.transp.name = 'transp'
        self.transp_func = function([self.otherH, self.RShape],
                                    self.transp,
                                    mode=mode)

        self.R = convTransp3D(self.W, self.rb, self.d, self.H, self.RShape)
        self.R.name = 'R'
        self.R_func = function([self.RShape], self.R, mode=mode)
        self.R_shape_func = function([self.RShape], self.R.shape)

        diff = self.V - self.R
        diff.name = 'diff'
        sqr = T.sqr(diff)
        sqr.name = 'sqr'
        self.reconsObj = T.sum(sqr)
        self.reconsObj.name = 'reconsObj'
        self.reconsObjFunc = function([self.RShape], self.reconsObj, mode=mode)

        W_grad = T.grad(self.reconsObj, self.W)

        self.gradientsFunc = function([self.RShape], [
            W_grad,
            T.grad(self.reconsObj, self.H),
            T.grad(self.reconsObj, self.V),
            T.grad(self.reconsObj, self.b)
        ],
                                      mode=mode)

        self.check_c_against_python = function([self.RShape], [
            T.grad(self.reconsObj, self.W),
            T.grad(self.reconsObj, self.H),
            T.grad(self.reconsObj, self.V),
            T.grad(self.reconsObj, self.b)
        ],
                                               mode='DEBUG_MODE')

        self.dCdW_shape_func = function([self.RShape],
                                        T.grad(self.reconsObj, self.W).shape,
                                        mode=mode)
Esempio n. 23
0
    def __call__(self, t):
        output = conv3D(self.V + t * self.dV, self.W + t * self.dW,
                        self.b + t * self.db, self.d)

        return output
    def _forward(self):
        inpt = self.inpt

        self.weights = self.declare(
            (self.n_output, self.filter_depth, self.n_inpt,
             self.filter_height, self.filter_width)
        )
        self.bias = self.declare((self.n_output,))

        if self.border_mode == 'same':
            pad_dim1 = self.filter_height - 1
            pad_dim2 = self.filter_width - 1
            pad_dim3 = self.filter_depth - 1

            if pad_dim1 > 0 or pad_dim2 > 0 or pad_dim3 > 0:
                output_shape = (
                    inpt.shape[0], inpt.shape[1] + pad_dim3,
                    inpt.shape[2], inpt.shape[3] + pad_dim1,
                    inpt.shape[4] + pad_dim2
                )
                big_zero = T.zeros(output_shape)
                indices = (
                    slice(None),
                    slice(pad_dim3 // 2, inpt.shape[1] + pad_dim3 // 2),
                    slice(None),
                    slice(pad_dim1 // 2, inpt.shape[3] + pad_dim1 // 2),
                    slice(pad_dim2 // 2, inpt.shape[4] + pad_dim2 // 2)
                )

                inpt = T.set_subtensor(big_zero[indices], inpt)

        #print '@basic.py implementation: ', self.implementation

        if self.implementation == 'conv3d2d':
            self.output_in = conv3d(
                signals=inpt,
                filters=self.weights
            )
            if self.use_bias:
                self.output_in = self.output_in + self.bias.dimshuffle('x', 'x', 0, 'x', 'x')
        elif self.implementation == 'conv3D':
            filters_flip = self.weights[:, ::-1, :, ::-1, ::-1]
            bias = self.bias if self.use_bias else T.zeros(self.bias.shape)
            self.output_in = conv3D(
                V=inpt.dimshuffle(0, 3, 4, 1, 2),
                W=filters_flip.dimshuffle(0, 3, 4, 1, 2),
                b=bias,
                d=(1, 1, 1)
            )
            self.output_in = self.output_in.dimshuffle(0, 3, 4, 1, 2)
        elif self.implementation == 'dnn_conv3d':
            self.output_in = theano.sandbox.cuda.dnn.dnn_conv3d(
                img=inpt.dimshuffle(0, 2, 1, 3, 4),
                kerns=self.weights.dimshuffle(0, 2, 1, 3, 4)
            )
            self.output_in = self.output_in.dimshuffle(0, 2, 1, 3, 4)
            if self.use_bias:
                self.output_in = self.output_in + self.bias.dimshuffle('x', 'x', 0, 'x', 'x')
        else:
            raise NotImplementedError('This class only supports conv3d2d, conv3D and dnn_conv3d')

        self.output = self.output_in

        if self.strides != (1, 1, 1):
            self.output = self.output[:, ::self.strides[2], :, ::self.strides[0], ::self.strides[1]]