예제 #1
0
    def __init__(self,
                 input,
                 ker_size=(3, 3, 3),
                 bank_size=1,
                 input_maps=1,
                 output_maps=1,
                 activation=T.tanh):
        self.params = ()
        self.input = input

        x_s = self.input.shape
        n_x, n_y, n_z = ker_size

        x_params = ()
        y_params = ()
        z_params = ()

        for i in range(bank_size):
            x_w = self.make_params((output_maps, input_maps, n_y, n_z))

            conv2d_xi = nnet.conv3d(input=self.input,
                                    filters=x_w.reshape(
                                        (output_maps, input_maps, 1, n_y,
                                         n_z)),
                                    border_mode='half')
            x_params = x_params + (x_w, )

            y_w = self.make_params((output_maps, input_maps, n_x, n_z))

            conv2d_yi = nnet.conv3d(input=self.input,
                                    filters=y_w.reshape(
                                        (output_maps, input_maps, n_x, 1,
                                         n_z)),
                                    border_mode='half')
            y_params = y_params + (y_w, )

            z_w = self.make_params((output_maps, input_maps, n_x, n_y))

            conv2d_zi = nnet.conv3d(input=self.input,
                                    filters=z_w.reshape(
                                        (output_maps, input_maps, n_x, n_y,
                                         1)),
                                    border_mode='half')
            z_params = z_params + (z_w, )

            if (i == 0):
                self.output = (conv2d_xi + conv2d_yi + conv2d_zi) / 3.0
            else:
                self.output = self.output + (conv2d_xi + conv2d_yi +
                                             conv2d_zi) / 3.0

        w, b = self.make_params((output_maps, input_maps, n_x, n_y, n_z), True)
        self.output = self.output * 1.0 / bank_size
        self.output = activation(self.output +
                                 b.dimshuffle('x', 0, 'x', 'x', 'x'))
        self.params = x_params + y_params + z_params + (b, )
    def conv(self, input):
        # convolve input feature maps with filters
        conv_out = conv3d(
            input=input,
            filters=self.W,
            border_mode='half'
        )
        output = conv_out + self.b.dimshuffle('x', 0, 'x', 'x','x')

        return output
예제 #3
0
    def __init__(self, prev_layer, filter_shape, padding=None, params=None):

        super().__init__(prev_layer)
        prev_layer._input_shape
        n_c = filter_shape[0]
        n_x = self._input_shape[2]
        n_neighbor_d = filter_shape[1]
        n_neighbor_h = filter_shape[2]
        n_neighbor_w = filter_shape[3]

        # Compute all gates in one convolution
        self._gate_filter_shape = [4 * n_c, 1, n_x + n_c, 1, 1]

        self._filter_shape = [
            filter_shape[0],  # num out hidden representation
            filter_shape[1],  # time
            self._input_shape[2],  # in channel
            filter_shape[2],  # height
            filter_shape[3]
        ]  # width
        self._padding = padding

        # signals: (batch,       in channel, depth_i, row_i, column_i)
        # filters: (out channel, in channel, depth_f, row_f, column_f)

        # there are "num input feature maps * filter height * filter width"
        # inputs to each hidden unit
        if params is None:
            self.W = Weight(self._filter_shape, is_bias=False)
            self.b = Weight((filter_shape[0], ),
                            is_bias=True,
                            mean=0.1,
                            filler='constant')
            params = [self.W, self.b]
        else:
            self.W = params[0]
            self.b = params[1]

        self.params = [self.W, self.b]

        if padding is None:
            self._padding = [
                0,
                int((filter_shape[1] - 1) / 2), 0,
                int((filter_shape[2] - 1) / 2),
                int((filter_shape[3] - 1) / 2)
            ]

        self._output = conv3d(padded_input, self.W.val.dimshuffle(0, 2, 1, 3, 4)).dimshuffle(0, 2, 1, 3, 4)\
                             + self.b.val.dimshuffle('x', 'x', 0, 'x', 'x')
예제 #4
0
    def set_output(self):
        padding = self._padding
        input_shape = self._input_shape
        padded_input = tensor.alloc(0.0,  # Value to fill the tensor
                                    input_shape[0],
                                    input_shape[1] + 2 * padding[1],
                                    input_shape[2],
                                    input_shape[3] + 2 * padding[3],
                                    input_shape[4] + 2 * padding[4])

        padded_input = tensor.set_subtensor(padded_input[:, padding[1]:padding[1] + input_shape[
            1], :, padding[3]:padding[3] + input_shape[3], padding[4]:padding[4] + input_shape[4]],
                                            self._prev_layer.output)

        self._output = conv3d(padded_input, self.W.val) + \
            self.b.val.dimshuffle('x', 'x', 0, 'x', 'x')
예제 #5
0
def conv(X,
         w,
         input_shape=None,
         filter_shape=None,
         subsample=(2, 2, 2),
         border_mode=(1, 1, 1),
         conv_mode='conv',
         output_shape=None):
    """ 
    sets up dummy convolutional forward pass and uses its grad as deconv
    currently only tested/working with same padding
    input_shape: (batch size, num input feature maps, voxel height, voxel width, voxel depth)
    filter_shape: (output channels, input channels, filter height, filter width, filter depth)
    """
    if conv_mode == 'conv':
        return conv3d(input=X,
                      filters=w,
                      input_shape=input_shape,
                      filter_shape=filter_shape,
                      border_mode=border_mode,
                      subsample=subsample,
                      filter_flip=True)
    elif conv_mode == 'deconv':
        if output_shape == None:
            input_shape = (None, None, (input_shape[2] - 1) * subsample[0] +
                           filter_shape[2] - 2 * border_mode[0],
                           (input_shape[3] - 1) * subsample[1] +
                           filter_shape[3] - 2 * border_mode[0],
                           (input_shape[4] - 1) * subsample[2] +
                           filter_shape[4] - 2 * border_mode[0])
        else:
            input_shape = output_shape

        return conv3d_grad_wrt_inputs(
            output_grad=X,
            filters=w,
            input_shape=input_shape,
            filter_shape=filter_shape,
            border_mode=border_mode,
            subsample=subsample,
        )
예제 #6
0
    def set_output(self):
        padding = self._padding
        input_shape = self._input_shape
        padded_input = tensor.alloc(0.0,  # Value to fill the tensor
                                    input_shape[0],
                                    input_shape[1] + 2 * padding[1],
                                    input_shape[2],
                                    input_shape[3] + 2 * padding[3],
                                    input_shape[4] + 2 * padding[4])

        padded_input = tensor.set_subtensor(padded_input[:, padding[1]:padding[1] + input_shape[
            1], :, padding[3]:padding[3] + input_shape[3], padding[4]:padding[4] + input_shape[4]],
                                            self._prev_layer.output)

        fc_output = tensor.reshape(
            tensor.dot(self._fc_layer.output, self.Wx.val), self._output_shape)
        
        padded_input = padded_input.dimshuffle(0, 2, 1, 3, 4)
        
        self._output = conv3d(padded_input, self.Wh.val.dimshuffle(0, 2, 1, 3, 4)).dimshuffle(0, 2, 1, 3, 4)\
                               + fc_output +  self.b.val.dimshuffle('x', 'x', 0, 'x', 'x')
예제 #7
0
    def set_output(self):
        padding = self._padding
        input_shape = self._input_shape
        if np.sum(self._padding) > 0:
            padded_input = tensor.alloc(0.0,  # Value to fill the tensor
                                        input_shape[0],
                                        input_shape[1] + 2 * padding[1],
                                        input_shape[2],
                                        input_shape[3] + 2 * padding[3],
                                        input_shape[4] + 2 * padding[4])

            padded_input = tensor.set_subtensor(
                padded_input[:, padding[1]:padding[1] + input_shape[1], :, padding[3]:padding[3] +
                             input_shape[3], padding[4]:padding[4] + input_shape[4]],
                self._prev_layer.output)
        else:
            padded_input = self._prev_layer.output
        
        padded_input = padded_input.dimshuffle(0, 2, 1, 3, 4)
        
        self._output = conv3d(padded_input, self.W.val.dimshuffle(0, 2, 1, 3, 4)).dimshuffle(0, 2, 1, 3, 4)\
                             + self.b.val.dimshuffle('x', 'x', 0, 'x', 'x')
예제 #8
0
    def __init__(self, rng, input, signal_shape, filter_shape, poolsize=(2, 2, 2), stride=None, if_pool=False, if_hidden_pool=False,
                 act=None,
                 share_with=None,
                 tied=None,
                 border_mode='valid'):
        self.input = input

        if share_with:
            self.W = share_with.W
            self.b = share_with.b

            self.W_delta = share_with.W_delta
            self.b_delta = share_with.b_delta

        elif tied:
            self.W = tied.W.dimshuffle(1,0,2,3)
            self.b = tied.b

            self.W_delta = tied.W_delta.dimshuffle(1,0,2,3)
            self.b_delta = tied.b_delta

        else:
            fan_in = np.prod(filter_shape[1:])
            poolsize_size = np.prod(poolsize) if poolsize else 1
            fan_out = (filter_shape[0] * np.prod(filter_shape[2:]) / poolsize_size)
            W_bound = np.sqrt(6. / (fan_in + fan_out))
            self.W = theano.shared(
                np.asarray(
                    rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
                    dtype=theano.config.floatX
                ),
                borrow=True
            )
            b_values = np.zeros((filter_shape[0],), dtype=theano.config.floatX)
            self.b = theano.shared(value=b_values, borrow=True)

            self.W_delta = theano.shared(
                np.zeros(filter_shape, dtype=theano.config.floatX),
                borrow=True
            )

            self.b_delta = theano.shared(value=b_values, borrow=True)

        # convolution
        conv_out = nnet.conv3d(
            input,
            filters=self.W,
            input_shape=signal_shape,
            filter_shape=filter_shape,
            border_mode=border_mode)

        #if poolsize:
        if if_pool:
            conv_out = conv_out.dimshuffle(0,2,1,3,4) #maxpool3d works on last 3 dimesnions
            pooled_out = pools.pool_3d(
                input=conv_out,
                ds=poolsize,
                ignore_border=True)
            tmp_out = pooled_out.dimshuffle(0,2,1,3,4)
            tmp = tmp_out + self.b.dimshuffle('x', 'x', 0, 'x', 'x')
        elif if_hidden_pool:
            pooled_out = pools.pool_2d(
                input=conv_out,
                ds=poolsize[:2],
                st=stride,
                ignore_border=True)
            tmp = pooled_out + self.b.dimshuffle('x', 'x', 0, 'x', 'x')
        else:
            tmp = conv_out + self.b.dimshuffle('x', 'x', 0, 'x', 'x')

        if act == 'tanh':
            self.output = T.tanh(tmp)
        elif act == 'sigmoid':
            self.output = nnet.sigmoid(tmp)
        elif act == 'relu':
            # self.output = tmp * (tmp>0)
            self.output = 0.5 * (tmp + abs(tmp)) + 1e-9
        elif act == 'softplus':
            # self.output = T.log2(1+T.exp(tmp))
            self.output = nnet.softplus(tmp)
        else:
            self.output = tmp

        self.get_activation = theano.function(
            [self.input],
            self.output,
            updates=None,
            name='get hidden activation')

        # store parameters of this layer
        self.params = [self.W, self.b]
        self.deltas = [self.W_delta, self.b_delta]
예제 #9
0
    def __init__(self,
                 input,
                 ker_shape=(5, 1, 3, 3, 3),
                 pool_size=(2, 2, 2),
                 w=None,
                 b=None,
                 activation=None,
                 border_mode='valid',
                 rng=np.random.RandomState(23125)):
        """

            :type input: theano.tensor5
        	:param input: theano.tensor5 symbolic variable

        	:type ker_shape: tuple of length 5
        	:param ker_shape: tuple of size (output_maps,input_maps,neigbourhood_x,neighbourhood_y,neighbourhood_z) default to (1,5,3,3,3)

        	:type pool_size: tuple
        	:param pool_size: tuple of size (pool_x,pool_y,pool_z) default to (2,2,2)

        	:type W: numpy.ndarray
        	:param W: numpy array of size (n_in,n_out) default to None

        	:type b: numpy.ndarray
        	:param b: numpy vector of size (n_out,) default to None

        	:type activation: theano symbolic function
        	:param activation: nonlinear activation function Default to theano.tensor.tanh

        	:type rng: numpy.random.RandomState
        	:param rng: a random state object for generating random numbers for parameters

		"""

        fil_shape = ker_shape
        w_shp = ker_shape

        w_bound = np.power(np.product(ker_shape[1:]), 1.0 / 3)

        self.input = input

        if w is None:
            w = theano.shared(np.asarray(rng.uniform(low=-1.0 / w_bound,
                                                     high=1.0 / w_bound,
                                                     size=w_shp),
                                         dtype=self.input.dtype),
                              name='W',
                              borrow=True)

        if b is None:
            b_shp = (fil_shape[0], )
            b = theano.shared(np.asarray(rng.uniform(low=-.5,
                                                     high=.5,
                                                     size=b_shp),
                                         dtype=self.input.dtype),
                              name='b',
                              borrow=True)

        if activation is None:
            activation = T.tanh

        self.W = w
        self.b = b

        if border_mode == 'same':
            border_mode = 'half'

        conv_out = conv3d(input=self.input,
                          filters=self.W,
                          border_mode=border_mode)

        if pool_size != (1, 1, 1):
            pool_out = max_pool_3d(input=conv_out,
                                   ws=pool_size,
                                   ignore_border=True)
            self.output = activation(pool_out +
                                     b.dimshuffle('x', 0, 'x', 'x', 'x'))
        else:
            self.output = activation(conv_out +
                                     b.dimshuffle('x', 0, 'x', 'x', 'x'))

        self.params = (self.W, self.b)
        self.ker_shape = ker_shape
        self.pool_size = pool_size
        self.activation = activation
        self.border_mode = border_mode