Ejemplo n.º 1
0
    def __init__(self, incoming, num_filters, filter_size, stride=(1, 1),
                 pad=0, untie_biases=False,
                 W=init.GlorotUniform(), b=init.Constant(0.),
                 nonlinearity=nonlinearities.rectify,
                 convolution=T.nnet.conv2d, **kwargs):
        super(Conv2DLayer, self).__init__(incoming, **kwargs)
        if nonlinearity is None:
            self.nonlinearity = nonlinearities.identity
        else:
            self.nonlinearity = nonlinearity

        self.num_filters = num_filters
        self.filter_size = as_tuple(filter_size, 2)
        self.stride = as_tuple(stride, 2)
        self.untie_biases = untie_biases
        self.convolution = convolution

        if pad == 'valid':
            self.pad = (0, 0)
        elif pad in ('full', 'same'):
            self.pad = pad
        else:
            self.pad = as_tuple(pad, 2, int)

        self.W = self.add_param(W, self.get_W_shape(), name="W")
        if b is None:
            self.b = None
        else:
            if self.untie_biases:
                biases_shape = (num_filters, self.output_shape[2], self.
                                output_shape[3])
            else:
                biases_shape = (num_filters,)
            self.b = self.add_param(b, biases_shape, name="b",
                                    regularizable=False)
 def __init__(self,
              incoming,
              pnorm,
              pool_size,
              stride=None,
              pad=(0, 0),
              ignore_border=True,
              mode='average_inc_pad',
              **kwargs):
     super(LPPool2DLayer, self).__init__(incoming, **kwargs)
     if len(self.input_shape) != 4:
         raise ValueError("Tried to create a 2D pooling layer with "
                          "input shape %r. Expected 4 input dimensions "
                          "(batchsize, channels, 2 spatial dimensions)." %
                          (self.input_shape, ))
     self.pnorm = T.cast(pnorm, dtype=theano.config.floatX)
     self.pool_size = as_tuple(pool_size, 2)
     if stride is None:
         self.stride = self.pool_size
     else:
         self.stride = as_tuple(stride, 2)
     self.pad = as_tuple(pad, 2)
     self.ignore_border = ignore_border
     # The ignore_border argument is for compatibility with MaxPool2DLayer.
     # ignore_border=False is not supported. Borders are always ignored.
     # if not ignore_border:
     #     raise NotImplementedError("LPPool2DLayer is based on "
     #                               "Pool2DDNNLayer that does not support "
     #                               "ignore_border=False.")
     if mode != 'average_inc_pad' and mode != 'average_exc_pad':
         raise ValueError("LPPool2DLayer requires mode=average_inc_pad"
                          " or mode=average_exc_pad, but received "
                          "mode={} instead.".format(mode))
     self.mode = mode
Ejemplo n.º 3
0
 def __init__(self, incoming, pnorm, pool_size, stride=None, pad=(0, 0),
              ignore_border=True, mode='average_inc_pad', **kwargs):
     super(LPPool2DLayer, self).__init__(incoming, **kwargs)
     if len(self.input_shape) != 4:
         raise ValueError("Tried to create a 2D pooling layer with "
                          "input shape %r. Expected 4 input dimensions "
                          "(batchsize, channels, 2 spatial dimensions)."
                          % (self.input_shape,))
     self.pnorm = T.cast(pnorm, dtype=theano.config.floatX)
     self.pool_size = as_tuple(pool_size, 2)
     if stride is None:
         self.stride = self.pool_size
     else:
         self.stride = as_tuple(stride, 2)
     self.pad = as_tuple(pad, 2)
     self.ignore_border = ignore_border
     # The ignore_border argument is for compatibility with MaxPool2DLayer.
     # ignore_border=False is not supported. Borders are always ignored.
     # if not ignore_border:
     #     raise NotImplementedError("LPPool2DLayer is based on "
     #                               "Pool2DDNNLayer that does not support "
     #                               "ignore_border=False.")
     if mode != 'average_inc_pad' and mode != 'average_exc_pad':
         raise ValueError("LPPool2DLayer requires mode=average_inc_pad"
                          " or mode=average_exc_pad, but received "
                          "mode={} instead.".format(mode))
     self.mode = mode
Ejemplo n.º 4
0
    def __init__(self,
                 incoming,
                 pool_size,
                 stride=None,
                 pad=(0, 0, 0),
                 ignore_border=True,
                 mode='max',
                 **kwargs):
        super(Pool3DLayer, self).__init__(incoming, **kwargs)

        self.pool_size = as_tuple(pool_size, 3)

        if len(self.input_shape) != 5:
            raise ValueError("Tried to create a 3D pooling layer with "
                             "input shape %r. Expected 5 input dimensions "
                             "(batchsize, channels, 3 spatial dimensions)." %
                             (self.input_shape, ))

        if stride is None:
            self.stride = self.pool_size
        else:
            self.stride = as_tuple(stride, 3)

        self.pad = as_tuple(pad, 3)

        self.ignore_border = ignore_border
        self.mode = mode
Ejemplo n.º 5
0
    def __init__(self, incoming, num_filters, filter_size, stride=(1, 1),
                 pad="same", untie_biases=False,
                 W=lasagne.init.GlorotUniform(), b=lasagne.init.Constant(0.1),
                 nonlinearity=lasagne.nonlinearities.rectify,
                 convolution=T.nnet.conv2d, **kwargs):
        super(ConvMMax2DLayer, self).__init__(incoming, **kwargs)
        if nonlinearity is None:
            self.nonlinearity = lasagne.nonlinearities.identity
        else:
            self.nonlinearity = nonlinearity

        self.num_filters = num_filters
        self.filter_size = as_tuple(filter_size, 2)
        self.stride = as_tuple(stride, 2)
        self.untie_biases = untie_biases
        self.convolution = convolution

        if pad == 'valid':
            self.pad = (0, 0)
        elif pad in ('full', 'same'):
            self.pad = pad
        else:
            self.pad = as_tuple(pad, 2, int)

        self.W = self.add_param(W, self.get_W_shape(), name="W")
        if b is None:
            self.b = None
        else:
            if self.untie_biases:
                biases_shape = (num_filters, self.output_shape[2], self.
                                output_shape[3])
            else:
                biases_shape = (num_filters,)
            self.b = self.add_param(b, biases_shape, name="b",
                                    regularizable=False)
Ejemplo n.º 6
0
    def __init__(self,
                 incoming,
                 num_filters,
                 filter_size,
                 stride=1,
                 pad=0,
                 untie_biases=False,
                 W=init.GlorotUniform(),
                 b=init.Constant(0.),
                 nonlinearity=nonlinearities.rectify,
                 flip_filters=True,
                 n=None,
                 **kwargs):
        super(BaseConvLayer, self).__init__(incoming, **kwargs)
        if nonlinearity is None:
            self.nonlinearity = nonlinearities.identity
        else:
            self.nonlinearity = nonlinearity

        if n is None:
            n = len(self.input_shape) - 2
        elif n != len(self.input_shape) - 2:
            raise ValueError("Tried to create a %dD convolution layer with "
                             "input shape %r. Expected %d input dimensions "
                             "(batchsize, channels, %d spatial dimensions)." %
                             (n, self.input_shape, n + 2, n))
        self.n = n
        self.num_filters = num_filters
        self.filter_size = as_tuple(filter_size, n, int)
        self.flip_filters = flip_filters
        self.stride = as_tuple(stride, n, int)
        self.untie_biases = untie_biases

        if pad == 'same':
            if any(s % 2 == 0 for s in self.filter_size):
                raise NotImplementedError(
                    '`same` padding requires odd filter size.')
        if pad == 'valid':
            self.pad = as_tuple(0, n)
        elif pad in ('full', 'same'):
            self.pad = pad
        else:
            self.pad = as_tuple(pad, n, int)

        self.W = self.add_param(W, self.get_W_shape(), name="W")
        if b is None:
            self.b = None
        else:
            if self.untie_biases:
                biases_shape = (num_filters, ) + self.output_shape[2:]
            else:
                biases_shape = (num_filters, )
            self.b = self.add_param(b,
                                    biases_shape,
                                    name="b",
                                    regularizable=False)
Ejemplo n.º 7
0
 def __init__(self, incoming, localization_network, method,
     scale_factor=1, resize_factor = 1, dtype = 'float32', **kwargs):
   super(PerspectiveLayer, self).__init__(
       [incoming, localization_network], **kwargs)
   self.scale_factor = as_tuple(scale_factor, 2)
   self.dtype = dtype
   self.resize_factor = as_tuple(resize_factor, 2)
   input_shp, loc_shp = self.input_shapes
   self.method = method
   if len(input_shp) != 4:
       raise ValueError("The input network must have a 4-dimensional "
                        "output shape: (batch_size, num_input_channels, "
                        "input_rows, input_columns)")
    def __init__(self, incoming, num_filters, filter_size, stride=(1, 1),
                 border_mode=None, untie_biases=False, W=upsample_filt([64,64,3,3]).astype(np.float32),
                 pad=None, nonlinearity=lasagne.nonlinearities.rectify, flip_filters=False, **kwargs):
        super(DeConv2DDNNLayer, self).__init__(incoming, **kwargs)

        if nonlinearity is None:
            self.nonlinearity = nonlinearities.identity
        else:
            self.nonlinearity = nonlinearity
            
        self.num_filters = num_filters
        self.filter_size = as_tuple(filter_size, 2)
        self.stride = as_tuple(stride, 2)
        self.untie_biases = untie_biases
        self.flip_filters = flip_filters

        if border_mode is not None and pad is not None:
            raise RuntimeError("You cannot specify both 'border_mode' and "
                               "'pad'. To avoid ambiguity, please specify "
                               "only one of them.")
        elif border_mode is None and pad is None:
            # no option specified, default to valid mode
            self.pad = (0, 0)
            self.border_mode = 'valid'
        elif border_mode is not None:
            if border_mode == 'valid':
                self.pad = (0, 0)
                self.border_mode = 'valid'
            elif border_mode == 'full':
                self.pad = (self.filter_size[0] - 1, self.filter_size[1] - 1)
                self.border_mode = 'full'
            elif border_mode == 'same':
                # dnn_conv does not support same, so we just specify
                # padding directly.
                # only works for odd filter size, but the even filter size
                # case is probably not worth supporting.
                self.pad = ((self.filter_size[0] - 1) // 2,
                            (self.filter_size[1] - 1) // 2)
                self.border_mode = None
            else:
                raise RuntimeError("Invalid border mode: '%s'" % border_mode)
        else:
            self.pad = as_tuple(pad, 2)
            self.border_mode = None

        #self.W = self.add_param(W, self.get_W_shape(), name="W")
        #W = upsample_filt([num_filters,num_filters,filter_size[0],filter_size[0]]).astype(np.float32)
        self.W = self.add_param(W, self.get_W_shape(), name='W', trainable=False, regularizable=False)

	'''
Ejemplo n.º 9
0
    def __init__(self, incoming, pool_size, stride=None, pad=0,
                 ignore_border=True, mode='max', **kwargs):
        super(Pool1DLayer, self).__init__(incoming, **kwargs)

        if len(self.input_shape) != 4:
            raise ValueError("Tried to create a 1D time-step pooling layer with "
                             "input shape %r. Expected 4 input dimensions "
                             "(batchsize, n-step, channels, 1 spatial dimensions)."
                             % (self.input_shape,))

        self.pool_size = as_tuple(pool_size, 1)
        self.stride = self.pool_size if stride is None else as_tuple(stride, 1)
        self.pad = as_tuple(pad, 1)
        self.ignore_border = ignore_border
        self.mode = mode
Ejemplo n.º 10
0
def transposed_convNd(input, kernel, crop, stride=1, n=None):
    if n is None:
        n = input.ndim - 2
    if crop == 'valid':
        pad = 'full'
    elif crop == 'full':
        pad = 'valid'
    elif crop == 'same':
        pad = 'same'
    else:
        crop = as_tuple(crop, n, int)
        pad = tuple(f - 1 - c for f, c in zip(kernel.shape[2:], crop))
    stride = as_tuple(stride, n, int)
    dilated_input = dilate(input, (1, 1) + stride)
    return convNd(dilated_input, kernel, pad, stride=1, n=n)
Ejemplo n.º 11
0
def transposed_convNd(input, kernel, crop, stride=1, n=None):
    if n is None:
        n = input.ndim - 2
    if crop == 'valid':
        pad = 'full'
    elif crop == 'full':
        pad = 'valid'
    elif crop == 'same':
        pad = 'same'
    else:
        crop = as_tuple(crop, n, int)
        pad = tuple(f - 1 - c for f, c in zip(kernel.shape[2:], crop))
    stride = as_tuple(stride, n, int)
    dilated_input = dilate(input, (1, 1) + stride)
    return convNd(dilated_input, kernel, pad, stride=1, n=n)
Ejemplo n.º 12
0
 def __init__(self,
              incoming,
              num_filters,
              filter_size,
              stride=(1, 1),
              crop=0,
              untie_biases=False,
              W=init.GlorotUniform(),
              b=init.Constant(0.),
              nonlinearity=nonlinearities.rectify,
              flip_filters=False,
              output_size=None,
              **kwargs):
     # output_size must be set before calling the super constructor
     if (not isinstance(output_size, T.Variable)
             and output_size is not None):
         output_size = as_tuple(output_size, 2, int)
     self.output_size = output_size
     super(TransposedConv2DLayer, self).__init__(incoming,
                                                 num_filters,
                                                 filter_size,
                                                 stride,
                                                 crop,
                                                 untie_biases,
                                                 W,
                                                 b,
                                                 nonlinearity,
                                                 flip_filters,
                                                 n=2,
                                                 **kwargs)
     # rename self.pad to self.crop:
     self.crop = self.pad
     del self.pad
 def __init__(self,
              incoming,
              num_filters,
              filter_size,
              stride=(1, 1, 1),
              crop=0,
              untie_biases=False,
              W=lasagne.init.GlorotUniform(),
              b=lasagne.init.Constant(0.),
              nonlinearity=lasagne.nonlinearities.rectify,
              flip_filters=False,
              output_size=None,
              **kwargs):
     # output_size must be set before calling the super constructor
     if (not isinstance(output_size, T.Variable)
             and output_size is not None):
         output_size = as_tuple(output_size, 3, int)
     self.output_size = output_size
     BaseConvLayer.__init__(self,
                            incoming,
                            num_filters,
                            filter_size,
                            stride,
                            crop,
                            untie_biases,
                            W,
                            b,
                            nonlinearity,
                            flip_filters,
                            n=3,
                            **kwargs)
     # rename self.pad to self.crop:
     #if crop is None:
     self.crop = self.pad
     del self.pad
Ejemplo n.º 14
0
    def __init__(self, incoming, scale_factor, **kwargs):
        super(Upscale3DLayer, self).__init__(incoming, **kwargs)

        self.scale_factor = as_tuple(scale_factor, 3)

        if any([s<1 for s in self.scale_factor]):
            raise ValueError('Scale factor must be >= 1, not {0}'.format(
                self.scale_factor))
Ejemplo n.º 15
0
    def __init__(self, incoming, num_filters, filter_size, stride=1, pad=0,
                 untie_biases=False,
                 W=init.GlorotUniform(), b=init.Constant(0.),
                 nonlinearity=nonlinearities.rectify, flip_filters=True,
                 n=None, **kwargs):
        super(BaseConvLayer, self).__init__(incoming, **kwargs)
        if nonlinearity is None:
            self.nonlinearity = nonlinearities.identity
        else:
            self.nonlinearity = nonlinearity

        if n is None:
            n = len(self.input_shape) - 2
        elif n != len(self.input_shape) - 2:
            raise ValueError("Tried to create a %dD convolution layer with "
                             "input shape %r. Expected %d input dimensions "
                             "(batchsize, channels, %d spatial dimensions)." %
                             (n, self.input_shape, n+2, n))
        self.n = n
        self.num_filters = num_filters
        self.filter_size = as_tuple(filter_size, n, int)
        self.flip_filters = flip_filters
        self.stride = as_tuple(stride, n, int)
        self.untie_biases = untie_biases

        if pad == 'same':
            if any(s % 2 == 0 for s in self.filter_size):
                raise NotImplementedError(
                    '`same` padding requires odd filter size.')
        if pad == 'valid':
            self.pad = as_tuple(0, n)
        elif pad in ('full', 'same'):
            self.pad = pad
        else:
            self.pad = as_tuple(pad, n, int)

        self.W = self.add_param(W, self.get_W_shape(), name="W")
        if b is None:
            self.b = None
        else:
            if self.untie_biases:
                biases_shape = (num_filters,) + self.output_shape[2:]
            else:
                biases_shape = (num_filters,)
            self.b = self.add_param(b, biases_shape, name="b",
                                    regularizable=False)
Ejemplo n.º 16
0
    def __init__(self,
                 incoming,
                 filter_size,
                 init_std=5.,
                 W_logstd=None,
                 stride=1,
                 pad=0,
                 nonlinearity=None,
                 convolution=conv1d_mc0,
                 **kwargs):
        super(GaussianScan1DLayer, self).__init__(incoming, **kwargs)
        # convolution = conv1d_gpucorrmm_mc0
        # convolution = conv.conv1d_mc0
        # convolution = T.nnet.conv2d
        if nonlinearity is None:
            self.nonlinearity = nonlinearities.identity
        else:
            self.nonlinearity = nonlinearity

        self.filter_size = as_tuple(filter_size, 1)
        self.stride = as_tuple(stride, 1)
        self.convolution = convolution

        # if self.filter_size[0] % 2 == 0:
        #     raise NotImplementedError(
        #         'GaussianConv1dLayer requires odd filter size.')

        if pad == 'valid':
            self.pad = (0, )
        elif pad in ('full', 'same', 'strictsame'):
            self.pad = pad
        else:
            self.pad = as_tuple(pad, 1, int)

        if W_logstd is None:
            init_std = np.asarray(init_std, dtype=floatX)
            W_logstd = init.Constant(np.log(init_std))
        # print(W_std)
        # W_std = init.Constant(init_std),
        self.num_input_channels = self.input_shape[1]
        # self.num_filters = self.num_input_channels
        self.W_logstd = self.add_param(W_logstd, (self.num_input_channels, ),
                                       name="W_logstd",
                                       regularizable=False)
        self.W = self.make_gaussian_filter()
Ejemplo n.º 17
0
 def __init__(self, incoming, scale_factor, pool2d_layer, pool2d_layer_in,
              **kwargs):
     super(Upscale2DLayer, self).__init__(incoming, **kwargs)
     self.scale_factor = as_tuple(scale_factor, 2)
     self.pool2d_layer = pool2d_layer
     self.pool2d_layer_in = pool2d_layer_in
     if self.scale_factor[0] < 1 or self.scale_factor[1] < 1:
         raise ValueError('Scale factor must be >= 1, not {0}'.format(
             self.scale_factor))
Ejemplo n.º 18
0
    def __init__(self, incoming, pool_size, stride=None, pad=(0, 0),
                 ignore_border=True, mode='max', **kwargs):
        super(Pool2DXLayer, self).__init__(incoming, **kwargs)

        self.pool_size = as_tuple(pool_size, 2)

        if stride is None:
            self.stride = self.pool_size
        else:
            self.stride = as_tuple(stride, 2)

        if pad == 'strictsamex':
            self.pad = pad
        else:
            self.pad = as_tuple(pad, 2)

        self.ignore_border = ignore_border
        self.mode = mode
Ejemplo n.º 19
0
    def __init__(self, incoming, num_filters, filter_size, stride=(1, 1),
                 pad=0, untie_biases=False,
                 W=init.GlorotUniform(), b=init.Constant(0.),
                 nonlinearity=nonlinearities.rectify,
                 convolution=T.nnet.conv2d, **kwargs):
        super(Conv2DXLayer, self).__init__(incoming, **kwargs)
        if nonlinearity is None:
            self.nonlinearity = nonlinearities.identity
        else:
            self.nonlinearity = nonlinearity

        self.num_filters = num_filters
        self.filter_size = as_tuple(filter_size, 2)
        self.stride = as_tuple(stride, 2)
        self.untie_biases = untie_biases
        self.convolution = convolution

        if pad == 'same':
            if any(s % 2 == 0 for s in self.filter_size):
                raise NotImplementedError(
                    '`same` padding requires odd filter size.')
        if pad == 'strictsamex':
            if not (stride == 1 or stride == (1, 1)):
                raise NotImplementedError(
                    '`strictsamex` padding requires stride=(1, 1) or 1')

        if pad == 'valid':
            self.pad = (0, 0)
        elif pad in ('full', 'same', 'strictsamex'):
            self.pad = pad
        else:
            self.pad = as_tuple(pad, 2, int)

        self.W = self.add_param(W, self.get_W_shape(), name="W")
        if b is None:
            self.b = None
        else:
            if self.untie_biases:
                biases_shape = (num_filters, self.output_shape[2], self.
                                output_shape[3])
            else:
                biases_shape = (num_filters,)
            self.b = self.add_param(b, biases_shape, name="b",
                                    regularizable=False)
Ejemplo n.º 20
0
def transposed_convNd(input, kernel, crop, stride=1, n=None, extend=None):
    if n is None:
        n = input.ndim - 2
    if crop == 'valid':
        pad = 'full'
    elif crop == 'full':
        pad = 'valid'
    elif crop == 'same':
        pad = 'same'
    else:
        crop = as_tuple(crop, n, int)
        pad = tuple(f - 1 - c for f, c in zip(kernel.shape[2:], crop))
    stride = as_tuple(stride, n, int)
    dilated_input = dilate(input, (1, 1) + stride)
    if extend is not None:
        extend = as_tuple(extend, n, int)
        extend = [(0, p) for p in (0, 0) + extend]
        dilated_input = np.pad(dilated_input, extend, mode='constant')
    return convNd(dilated_input, kernel, pad, stride=1, n=n)
Ejemplo n.º 21
0
def transposed_convNd(input, kernel, crop, stride=1, n=None, extend=None):
    if n is None:
        n = input.ndim - 2
    if crop == 'valid':
        pad = 'full'
    elif crop == 'full':
        pad = 'valid'
    elif crop == 'same':
        pad = 'same'
    else:
        crop = as_tuple(crop, n, int)
        pad = tuple(f - 1 - c for f, c in zip(kernel.shape[2:], crop))
    stride = as_tuple(stride, n, int)
    dilated_input = dilate(input, (1, 1) + stride)
    if extend is not None:
        extend = as_tuple(extend, n, int)
        extend = [(0, p) for p in (0, 0) + extend]
        dilated_input = np.pad(dilated_input, extend, mode='constant')
    return convNd(dilated_input, kernel, pad, stride=1, n=n)
Ejemplo n.º 22
0
    def __init__(self,
                 incoming,
                 num_filters,
                 num_rot,
                 filter_size,
                 stride=(1, 1),
                 border_mode="valid",
                 untie_biases=False,
                 W=init.GlorotUniform(),
                 b=init.Constant(0.),
                 nonlinearity=nonlinearities.rectify,
                 convolution=T.nnet.conv2d,
                 **kwargs):
        super(RotConv, self).__init__(incoming, **kwargs)
        if nonlinearity is None:
            self.nonlinearity = nonlinearities.identity
        else:
            self.nonlinearity = nonlinearity

        self.num_filters = num_filters
        self.num_rot = num_rot
        self.filter_size = as_tuple(filter_size, 2)
        self.stride = as_tuple(stride, 2)
        self.border_mode = border_mode
        self.untie_biases = untie_biases
        self.convolution = convolution

        if self.border_mode not in ['valid', 'full', 'same']:
            raise RuntimeError("Invalid border mode: '%s'" % self.border_mode)

        self.W = self.add_param(W, self.get_W_shape(), name="W")
        if b is None:
            self.b = None
        else:
            if self.untie_biases:
                biases_shape = (num_filters, self.output_shape[2],
                                self.output_shape[3])
            else:
                biases_shape = (num_filters, )
            self.b = self.add_param(b,
                                    biases_shape,
                                    name="b",
                                    regularizable=False)
Ejemplo n.º 23
0
    def __init__(self, incoming, filter_size,
                 init_std=5., W_logstd=None,
                 stride=1, pad=0,
                 nonlinearity=None,
                 convolution=conv1d_mc0, **kwargs):
        super(GaussianScan1DLayer, self).__init__(incoming, **kwargs)
        # convolution = conv1d_gpucorrmm_mc0
        # convolution = conv.conv1d_mc0
        # convolution = T.nnet.conv2d
        if nonlinearity is None:
            self.nonlinearity = nonlinearities.identity
        else:
            self.nonlinearity = nonlinearity

        self.filter_size = as_tuple(filter_size, 1)
        self.stride = as_tuple(stride, 1)
        self.convolution = convolution

        # if self.filter_size[0] % 2 == 0:
        #     raise NotImplementedError(
        #         'GaussianConv1dLayer requires odd filter size.')

        if pad == 'valid':
            self.pad = (0,)
        elif pad in ('full', 'same', 'strictsame'):
            self.pad = pad
        else:
            self.pad = as_tuple(pad, 1, int)

        if W_logstd is None:
            init_std = np.asarray(init_std, dtype=floatX)
            W_logstd = init.Constant(np.log(init_std))
        # print(W_std)
        # W_std = init.Constant(init_std),
        self.num_input_channels = self.input_shape[1]
        # self.num_filters = self.num_input_channels
        self.W_logstd = self.add_param(W_logstd,
                                       (self.num_input_channels,),
                                       name="W_logstd",
                                       regularizable=False)
        self.W = self.make_gaussian_filter()
Ejemplo n.º 24
0
    def __init__(self, incoming, pool_size, stride=None, pad=(0, 0, 0),
                 ignore_border=True, mode='max', **kwargs):
        super(Pool3DLayer, self).__init__(incoming, **kwargs)

        self.pool_size = as_tuple(pool_size, 3)

        if len(self.input_shape) != 5:
            raise ValueError("Tried to create a 3D pooling layer with "
                             "input shape %r. Expected 5 input dimensions "
                             "(batchsize, channels, 3 spatial dimensions)."
                             % (self.input_shape,))

        if stride is None:
            self.stride = self.pool_size
        else:
            self.stride = as_tuple(stride, 3)

        self.pad = as_tuple(pad, 3)

        self.ignore_border = ignore_border
        self.mode = mode
Ejemplo n.º 25
0
def test_as_tuple_fails():
    from lasagne.utils import as_tuple, int_types
    with pytest.raises(ValueError) as exc:
        as_tuple([1, 2, 3], 4)
    assert "length 4" in exc.value.args[0]
    with pytest.raises(TypeError) as exc:
        as_tuple('asdf', 4, int)
    assert "of int," in exc.value.args[0]
    with pytest.raises(TypeError) as exc:
        as_tuple('asdf', 4, (int, float))
    assert "of int or float," in exc.value.args[0]
    with pytest.raises(TypeError) as exc:
        as_tuple('asdf', 4, int_types)
    assert "of int," in exc.value.args[0]
Ejemplo n.º 26
0
def test_as_tuple_fails():
    from lasagne.utils import as_tuple, int_types
    with pytest.raises(ValueError) as exc:
        as_tuple([1, 2, 3], 4)
    assert "length 4" in exc.value.args[0]
    with pytest.raises(TypeError) as exc:
        as_tuple('asdf', 4, int)
    assert "of int," in exc.value.args[0]
    with pytest.raises(TypeError) as exc:
        as_tuple('asdf', 4, (int, float))
    assert "of int or float," in exc.value.args[0]
    with pytest.raises(TypeError) as exc:
        as_tuple('asdf', 4, int_types)
    assert "of int," in exc.value.args[0]
Ejemplo n.º 27
0
    def __init__(self,
                 incoming,
                 window=scipy.signal.hann,
                 n_ch=2,
                 n_fft=2048,
                 hop_size=512,
                 log_amplitude=True,
                 **kwargs):
        """
        """
        super(STFTLayer, self).__init__(incoming, **kwargs)

        n = 2  # 2D convolution

        if n_ch > 2 or n_ch < 1:
            raise ValueError("n_ch should be either 1 (mono) or 2 (stereo)")

        self.n_ch = n_ch
        self.window = window
        self.n_fft = n_fft
        self.hop_size = hop_size
        self.log_amp = log_amplitude

        self.filter_size = as_tuple((n_fft, 1), n, int)
        self.stride = as_tuple((hop_size, 1), n, int)

        # dft kernels for real and imaginary domain
        W_r, W_i = _get_stft_kernels(n_fft, window=window, keras_ver='old')

        self.W_r = self.add_param(W_r,
                                  shape=W_r.shape,
                                  name='DFT_real_kernel',
                                  trainable=False,
                                  regularizable=False)
        self.W_i = self.add_param(W_i,
                                  shape=W_i.shape,
                                  name='DFT_image_kernel',
                                  trainable=False,
                                  regularizable=False)
Ejemplo n.º 28
0
    def __init__(self,
                 incoming,
                 pool_size,
                 stride=None,
                 pad=0,
                 ignore_border=True,
                 mode='max',
                 **kwargs):
        super(Pool1DLayer, self).__init__(incoming, **kwargs)

        if len(self.input_shape) != 4:
            raise ValueError(
                "Tried to create a 1D time-step pooling layer with "
                "input shape %r. Expected 4 input dimensions "
                "(batchsize, n-step, channels, 1 spatial dimensions)." %
                (self.input_shape, ))

        self.pool_size = as_tuple(pool_size, 1)
        self.stride = self.pool_size if stride is None else as_tuple(stride, 1)
        self.pad = as_tuple(pad, 1)
        self.ignore_border = ignore_border
        self.mode = mode
Ejemplo n.º 29
0
    def __init__(self, incoming, scale_factor, mode='repeat', **kwargs):
        super(Upscale2DLayer, self).__init__(incoming, **kwargs)

        self.scale_factor = as_tuple(scale_factor, 2)

        if self.scale_factor[0] < 1 or self.scale_factor[1] < 1:
            raise ValueError('Scale factor must be >= 1, not {0}'.format(
                self.scale_factor))

        if mode not in {'repeat', 'dilate'}:
            msg = "Mode must be either 'repeat' or 'dilate', not {0}"
            raise ValueError(msg.format(mode))
        self.mode = mode
Ejemplo n.º 30
0
    def __init__(self,
                 incoming,
                 filter_size,
                 init_std=5.,
                 stride=1,
                 pad=0,
                 nonlinearity=None,
                 convolution=conv1d_mc0,
                 **kwargs):
        super(GaussianScan1DLayer, self).__init__(incoming, **kwargs)
        if nonlinearity is None:
            self.nonlinearity = nonlinearities.identity
        else:
            self.nonlinearity = nonlinearity

        self.filter_size = as_tuple(filter_size, 1)
        self.stride = as_tuple(stride, 1)
        self.convolution = convolution

        if pad == 'valid':
            self.pad = (0, )
        elif pad in ('full', 'same', 'strictsame'):
            self.pad = pad
        else:
            self.pad = as_tuple(pad, 1, int)

        init_std = np.asarray(init_std, dtype=floatX)
        W_logstd = init.Constant(np.log(init_std))
        # print(W_std)
        # W_std = init.Constant(init_std),
        self.num_input_channels = self.input_shape[1]
        # self.num_filters = self.num_input_channels
        self.W_logstd = self.add_param(W_logstd, (self.num_input_channels, ),
                                       name="W_logstd",
                                       regularizable=False,
                                       trainable=False)
        self.W = self.make_gaussian_filter()
Ejemplo n.º 31
0
    def __init__(self, incoming, localization_network, downsample_factor=1,
                 **kwargs):
        super(Transformer3DLayer, self).__init__(
            [incoming, localization_network], **kwargs)
        self.downsample_factor = as_tuple(downsample_factor, 3)

        input_shp, loc_shp = self.input_shapes

        if loc_shp[-1] != 12 or len(loc_shp) != 2:
            raise ValueError("The localization network must have "
                             "output shape: (batch_size, 12)")
        if len(input_shp) != 5:
            raise ValueError("The input network must have a 5-dimensional "
                             "output shape: (batch_size, num_input_channels, "
                             "input_rows, input_columns, input_depth)")
Ejemplo n.º 32
0
    def __init__(self, incoming, pool_size, stride=None, pad=(0, 0), ignore_border=True, mode='learned_norm', P=2.0,
                 **kwargs):
        super(LearnedNorm2DLayer, self).__init__(incoming, pool_size, stride=None, pad=(0, 0), ignore_border=True,
                                                 **kwargs)

        self.pool_size = as_tuple(pool_size, 2)

        if len(self.input_shape) != 4:
            raise ValueError("Tried to create a 2D pooling layer with "
                             "input shape %r. Expected 4 input dimensions "
                             "(batchsize, channels, 2 spatial dimensions)."
                             % (self.input_shape,))

        if stride is None:
            self.stride = self.pool_size
        else:
            self.stride = as_tuple(stride, 2)

        self.pad = as_tuple(pad, 2)

        self.ignore_border = ignore_border
        self.mode = mode

        self.P = self.add_param(numpy.asarray(P), numpy.shape(numpy.asarray(P)), name='P', trainable=True)
Ejemplo n.º 33
0
def conv2d(input, kernel, pad):
    """Execute a 2D convolution.

    Parameters
    ----------
    input : numpy array
    kernel : numpy array
    pad : {0, 'valid', 'same', 'full'}

    Returns
    -------
    numpy array
    """
    if pad not in ['valid', 'same', 'full']:
        pad = as_tuple(pad, 2, int)
        input = np.pad(input,
                       ((0, 0), (0, 0), (pad[0], pad[0]), (pad[1], pad[1])),
                       mode='constant')
        pad = 'valid'

    output = np.zeros((input.shape[0],
                       kernel.shape[0],
                       input.shape[2] + kernel.shape[2] - 1,
                       input.shape[3] + kernel.shape[3] - 1,
                       ))

    for i in range(kernel.shape[2]):
        for j in range(kernel.shape[3]):
            k = kernel[:, :, i, j][:, :, np.newaxis, np.newaxis]
            output[:, :, i:i + input.shape[2],
                   j:j + input.shape[3]] += (input[:, np.newaxis] * k).sum(2)

    if pad == 'valid':
        trim = (kernel.shape[2] - 1, kernel.shape[3] - 1)
        output = output[:,
                        :,
                        trim[0]:-trim[0] or None,
                        trim[1]:-trim[1] or None]

    elif pad == 'same':
        shift_x = (kernel.shape[2] - 1) // 2
        shift_y = (kernel.shape[3] - 1) // 2
        output = output[:, :, shift_x:input.shape[2] + shift_x,
                        shift_y:input.shape[3] + shift_y]
    return output
Ejemplo n.º 34
0
    def __init__(self,
                 incoming,
                 localization_network,
                 downsample_factor=1,
                 **kwargs):
        super(TransformerLayer,
              self).__init__([incoming, localization_network], **kwargs)
        self.downsample_factor = as_tuple(downsample_factor, 2)

        input_shp, loc_shp = self.input_shapes

        if loc_shp[-1] != 6 or len(loc_shp) != 2:
            raise ValueError("The localization network must have "
                             "output shape: (batch_size, 6)")
        if len(input_shp) != 4:
            raise ValueError("The input network must have a 4-dimensional "
                             "output shape: (batch_size, num_input_channels, "
                             "input_rows, input_columns)")
Ejemplo n.º 35
0
def conv2d(input, kernel, pad):
    """Execute a 2D convolution.

    Parameters
    ----------
    input : numpy array
    kernel : numpy array
    pad : {0, 'valid', 'same', 'full'}

    Returns
    -------
    numpy array
    """
    if pad not in ['valid', 'same', 'full']:
        pad = as_tuple(pad, 2, int)
        input = np.pad(input,
                       ((0, 0), (0, 0), (pad[0], pad[0]), (pad[1], pad[1])),
                       mode='constant')
        pad = 'valid'

    output = np.zeros((
        input.shape[0],
        kernel.shape[0],
        input.shape[2] + kernel.shape[2] - 1,
        input.shape[3] + kernel.shape[3] - 1,
    ))

    for i in range(kernel.shape[2]):
        for j in range(kernel.shape[3]):
            k = kernel[:, :, i, j][:, :, np.newaxis, np.newaxis]
            output[:, :, i:i + input.shape[2],
                   j:j + input.shape[3]] += (input[:, np.newaxis] * k).sum(2)

    if pad == 'valid':
        trim = (kernel.shape[2] - 1, kernel.shape[3] - 1)
        output = output[:, :, trim[0]:-trim[0] or None,
                        trim[1]:-trim[1] or None]

    elif pad == 'same':
        shift_x = (kernel.shape[2] - 1) // 2
        shift_y = (kernel.shape[3] - 1) // 2
        output = output[:, :, shift_x:input.shape[2] + shift_x,
                        shift_y:input.shape[3] + shift_y]
    return output
Ejemplo n.º 36
0
    def __init__(self,
                 incoming,
                 localization_network,
                 downsample_factor=1,
                 control_points=16,
                 precompute_grid='auto',
                 **kwargs):
        super(TPSTransformerLayer,
              self).__init__([incoming, localization_network], **kwargs)

        self.downsample_factor = as_tuple(downsample_factor, 2)
        self.control_points = control_points

        input_shp, loc_shp = self.input_shapes

        # Error checking
        if loc_shp[-1] != 2 * control_points or len(loc_shp) != 2:
            raise ValueError("The localization network must have "
                             "output shape: (batch_size, "
                             "2*control_points)")

        if round(np.sqrt(control_points)) != np.sqrt(control_points):
            raise ValueError("The number of control points must be"
                             " a perfect square.")

        if len(input_shp) != 4:
            raise ValueError("The input network must have a 4-dimensional "
                             "output shape: (batch_size, num_input_channels, "
                             "input_rows, input_columns)")

        # Process precompute grid
        can_precompute_grid = all(s is not None for s in input_shp[2:])
        if precompute_grid == 'auto':
            precompute_grid = can_precompute_grid
        elif precompute_grid and not can_precompute_grid:
            raise ValueError("Grid can only be precomputed if the input "
                             "height and width are pre-specified.")
        self.precompute_grid = precompute_grid

        # Create source points and L matrix
        self.right_mat, self.L_inv, self.source_points, self.out_height, \
            self.out_width = _initialize_tps(
                control_points, input_shp, self.downsample_factor,
                precompute_grid)
Ejemplo n.º 37
0
    def __init__(self,
                 incoming,
                 localization_network,
                 downsample_factor=1,
                 control_points=16,
                 border_mode='nearest',
                 **kwargs):
        super(TPSTransformer, self).__init__([incoming, localization_network],
                                             **kwargs)

        self.border_mode = border_mode
        self.downsample_factor = as_tuple(downsample_factor, 2)
        self.control_points = control_points

        input_shp, loc_shp = self.input_shapes

        #localization should output coefficient (batch_size,2,num_l_points+3)
        # Create source points and L matrix
        self.right_mat, self.source_points, self.out_height, self.out_width = _initialize_tps(
            control_points, input_shp, self.downsample_factor)
Ejemplo n.º 38
0
 def __init__(self, incoming, num_filters, filter_size, dilation=(1, 1),
              pad=0, untie_biases=False,
              W=init.GlorotUniform(), b=init.Constant(0.),
              nonlinearity=nonlinearities.rectify, flip_filters=False,
              **kwargs):
     self.dilation = as_tuple(dilation, 2, int)
     super(DilatedConv2DLayer, self).__init__(
             incoming, num_filters, filter_size, 1, pad,
             untie_biases, W, b, nonlinearity, flip_filters, n=2, **kwargs)
     # remove self.stride:
     del self.stride
     # require valid convolution
     if self.pad != (0, 0):
         raise NotImplementedError(
                 "DilatedConv2DLayer requires pad=0 / (0,0) / 'valid', but "
                 "got %r. For a padded dilated convolution, add a PadLayer."
                 % (pad,))
     # require unflipped filters
     if self.flip_filters:
         raise NotImplementedError(
                 "DilatedConv2DLayer requires flip_filters=False.")
    def __init__(self, incoming, n, W = lasagne.init.Normal(5), name=None, downsample_factor = 1, control_points=16, precompute_grid = 'auto', **kwargs):
        super(TPSTransformationMatrixLayer, self).__init__(
                incoming, **kwargs)
        self.n = n
        self.W = self.add_param(W, (n, control_points * 2), name = "localization")


        self.downsample_factor = as_tuple(downsample_factor, 2)
        self.control_points = control_points

        input_shp = self.input_shape

        if round(np.sqrt(control_points)) != np.sqrt(
                control_points):
            raise ValueError("The number of control points must be"
                             " a perfect square.")

        if len(input_shp) != 4:
            raise ValueError("The input network must have a 4-dimensional "
                             "output shape: (batch_size, num_input_channels, "
                             "input_rows, input_columns)")

        # Process precompute grid
        can_precompute_grid = all(s is not None for s in input_shp[2:])
        if precompute_grid == 'auto':
            precompute_grid = can_precompute_grid
        elif precompute_grid and not can_precompute_grid:
            raise ValueError("Grid can only be precomputed if the input "
                             "height and width are pre-specified.")
        self.precompute_grid = precompute_grid

        # Create source points and L matrix
        self.right_mat, self.L_inv, self.source_points, self.out_height, \
            self.out_width = initialize_tps(
                control_points, input_shp, self.downsample_factor,
                precompute_grid)
Ejemplo n.º 40
0
def convNd(input, kernel, pad, stride=1, n=None):
    """Execute a batch of a stack of N-dimensional convolutions.

    Parameters
    ----------
    input : numpy array
    kernel : numpy array
    pad : {0, 'valid', 'same', 'full'}, int or tuple of int
    stride : int or tuple of int
    n : int

    Returns
    -------
    numpy array
    """
    if n is None:
        n = input.ndim - 2
    if pad not in ['valid', 'same', 'full']:
        pad = as_tuple(pad, n, int)
        input = np.pad(input, [(p, p) for p in (0, 0) + pad], mode='constant')
        pad = 'valid'

    output = np.zeros((input.shape[0], kernel.shape[0]) +
                      tuple(i + k - 1 for i, k in zip(input.shape[2:],
                                                      kernel.shape[2:])))

    if n == 1:
        for i in range(kernel.shape[2]):
            f = kernel[:, :, i:i+1]
            c = (input[:, np.newaxis] * f).sum(axis=2)
            output[:, :,
                   i:i + input.shape[2]] += c
    elif n == 2:
        for i in range(kernel.shape[2]):
            for j in range(kernel.shape[3]):
                f = kernel[:, :, i:i+1, j:j+1]
                c = (input[:, np.newaxis] * f).sum(axis=2)
                output[:, :,
                       i:i + input.shape[2],
                       j:j + input.shape[3]] += c
    elif n == 3:
        for i in range(kernel.shape[2]):
            for j in range(kernel.shape[3]):
                for k in range(kernel.shape[4]):
                    f = kernel[:, :, i:i+1, j:j+1, k:k+1]
                    c = (input[:, np.newaxis] * f).sum(axis=2)
                    output[:, :,
                           i:i + input.shape[2],
                           j:j + input.shape[3],
                           k:k + input.shape[4]] += c
    else:
        raise NotImplementedError("convNd() only supports n in (1, 2, 3)")

    if pad == 'valid':
        trim = tuple(k - 1 for k in kernel.shape[2:])
        slices = [slice(None), slice(None)]
        slices += [slice(t, -t or None) for t in trim]
        output = output[slices]
    elif pad == 'same':
        shift = tuple((k - 1) // 2 for k in kernel.shape[2:])
        slices = [slice(None), slice(None)]
        slices += [slice(s, s + i) for s, i in zip(shift, input.shape[2:])]
        output = output[slices]

    stride = as_tuple(stride, n, int)
    if any(s > 1 for s in stride):
        slices = [slice(None), slice(None)]
        slices += [slice(None, None, s) for s in stride]
        output = output[slices]

    return output
Ejemplo n.º 41
0
def test_as_tuple_fails():
    from lasagne.utils import as_tuple
    with pytest.raises(ValueError):
        as_tuple([1, 2, 3], 4)
    with pytest.raises(TypeError):
        as_tuple('asdf', 4, int)
Ejemplo n.º 42
0
def convNd(input, kernel, pad, stride=1, n=None):
    """Execute a batch of a stack of N-dimensional convolutions.

    Parameters
    ----------
    input : numpy array
    kernel : numpy array
    pad : {0, 'valid', 'same', 'full'}, int or tuple of int
    stride : int or tuple of int
    n : int

    Returns
    -------
    numpy array
    """
    if n is None:
        n = input.ndim - 2
    if pad not in ['valid', 'same', 'full']:
        pad = as_tuple(pad, n, int)
        input = np.pad(input, [(p, p) for p in (0, 0) + pad], mode='constant')
        pad = 'valid'

    output = np.zeros((input.shape[0], kernel.shape[0]) + tuple(
        i + k - 1 for i, k in zip(input.shape[2:], kernel.shape[2:])))

    if n == 1:
        for i in range(kernel.shape[2]):
            f = kernel[:, :, i:i + 1]
            c = (input[:, np.newaxis] * f).sum(axis=2)
            output[:, :, i:i + input.shape[2]] += c
    elif n == 2:
        for i in range(kernel.shape[2]):
            for j in range(kernel.shape[3]):
                f = kernel[:, :, i:i + 1, j:j + 1]
                c = (input[:, np.newaxis] * f).sum(axis=2)
                output[:, :, i:i + input.shape[2], j:j + input.shape[3]] += c
    elif n == 3:
        for i in range(kernel.shape[2]):
            for j in range(kernel.shape[3]):
                for k in range(kernel.shape[4]):
                    f = kernel[:, :, i:i + 1, j:j + 1, k:k + 1]
                    c = (input[:, np.newaxis] * f).sum(axis=2)
                    output[:, :, i:i + input.shape[2], j:j + input.shape[3],
                           k:k + input.shape[4]] += c
    else:
        raise NotImplementedError("convNd() only supports n in (1, 2, 3)")

    if pad == 'valid':
        trim = tuple(k - 1 for k in kernel.shape[2:])
        slices = [slice(None), slice(None)]
        slices += [slice(t, -t or None) for t in trim]
        output = output[slices]
    elif pad == 'same':
        shift = tuple((k - 1) // 2 for k in kernel.shape[2:])
        slices = [slice(None), slice(None)]
        slices += [slice(s, s + i) for s, i in zip(shift, input.shape[2:])]
        output = output[slices]

    stride = as_tuple(stride, n, int)
    if any(s > 1 for s in stride):
        slices = [slice(None), slice(None)]
        slices += [slice(None, None, s) for s in stride]
        output = output[slices]

    return output
Ejemplo n.º 43
0
    def __init__(self,
                 incoming,
                 filter_mask_size=(3, 3),
                 filter_shape=(1, ),
                 convolution_axes=(2, 3),
                 channel_axes=(1, ),
                 stride=1,
                 width=1,
                 pad='same',
                 untie_biases=False,
                 W=lasagne.init.Orthogonal("relu"),
                 b=lasagne.init.Constant(0.),
                 nonlinearity=lasagne.nonlinearities.rectify,
                 flip_filters=False,
                 **kwargs):

        assert len(filter_shape) == len(channel_axes)
        assert len(filter_mask_size) == len(convolution_axes)

        super(ConvolutionLayer, self).__init__(incoming, **kwargs)

        if nonlinearity is None:
            self.nonlinearity = lasagne.nonlinearities.identity
        else:
            self.nonlinearity = nonlinearity

        n = len(convolution_axes)

        self.channel_axes = channel_axes
        self.convolution_axes = convolution_axes
        self.filter_shape = filter_shape
        self.n = n
        self.num_filters = np.prod(filter_shape)
        self.filter_mask_size = filter_mask_size
        self.flip_filters = flip_filters
        self.stride = as_tuple(stride, n, int)
        self.width = as_tuple(width, n, int)
        self.untie_biases = untie_biases

        if pad == 'same':
            if any(s % 2 == 0 for s in self.filter_mask_size):
                raise NotImplementedError(
                    '`same` padding requires odd filter size.')
        if pad == 'valid':
            self.pad = as_tuple(0, n)
        elif pad in ('full', 'same'):
            self.pad = pad
        else:
            self.pad = as_tuple(pad, n, int)

        if W is None:
            self.W = None
        else:
            self.W = self.add_param(W, self.get_W_shape(), name="W")

        if b is None:
            self.b = None
        else:
            if self.untie_biases is False:
                biases_shape = self.filter_shape
            else:
                biases_shape = self.filter_shape + tuple(
                    [self.output_shape[i] for i in self.convolution_axes])
            self.b = self.add_param(b,
                                    biases_shape,
                                    name="b",
                                    regularizable=False)

        assert all([
            self.input_shape[self.convolution_axes[i]] %
            (self.width[i] * self.stride[i]) == 0
            for i in xrange(len(self.filter_mask_size))
        ])
Ejemplo n.º 44
0
def dilated_convNd(input, kernel, pad, dilation=1, n=None):
    if n is None:
        n = input.ndim - 2
    dilation = as_tuple(dilation, n, int)
    dilated_kernel = dilate(kernel, (1, 1) + dilation)
    return convNd(input, dilated_kernel, pad, stride=1, n=n)
Ejemplo n.º 45
0
def dilated_convNd(input, kernel, pad, dilation=1, n=None):
    if n is None:
        n = input.ndim - 2
    dilation = as_tuple(dilation, n, int)
    dilated_kernel = dilate(kernel, (1, 1) + dilation)
    return convNd(input, dilated_kernel, pad, stride=1, n=n)
Ejemplo n.º 46
0
    def __init__(self,
                 x,
                 cell_previous,
                 hid_previous,
                 filter_size,
                 stride=(1, 1),
                 pad='same',
                 flip_filters=True,
                 n=None,
                 convolution=T.nnet.conv2d,
                 ingate=Gate(W_in=init.GlorotUniform(),
                             W_hid=init.GlorotUniform()),
                 forgetgate=Gate(W_in=init.GlorotUniform(),
                                 W_hid=init.GlorotUniform()),
                 cell=Gate(W_in=init.GlorotUniform(),
                           W_hid=init.GlorotUniform(),
                           W_cell=None,
                           nonlinearity=nonlinearities.tanh),
                 outgate=Gate(),
                 nonlinearity=nonlinearities.tanh,
                 cell_init=init.Constant(0.),
                 hid_init=init.Constant(0.),
                 learn_init=False,
                 peepholes=True,
                 grad_clipping=0,
                 **kwargs):

        # Initialize parent layer
        super(ConvLSTMCell, self).__init__([x, cell_previous, hid_previous],
                                           **kwargs)

        # If the provided nonlinearity is None, make it linear
        if nonlinearity is None:
            self.nonlinearity = nonlinearities.identity
        else:
            self.nonlinearity = nonlinearity

        # Retrieve the dimensionality of the incoming layer
        self.input_shape_x = self.input_shapes[0]
        self.input_shape_c = self.input_shapes[1]
        self.input_shape_h = self.input_shapes[2]

        # From ConvNet
        if n is None:
            n = len(self.input_shape_x) - 2
        elif n != len(self.input_shape_x) - 2:
            raise ValueError("Tried to create a %dD convolution layer with "
                             "input shape %r. Expected %d input dimensions "
                             "(batchsize, channels, %d spatial dimensions)." %
                             (n, self.input_shape_x, n + 2, n))
        self.n = n
        self.pad = pad
        self.num_filters = self.input_shape_h[1]
        self.filter_size = as_tuple(filter_size, n, int)
        self.flip_filters = flip_filters
        self.stride = as_tuple(stride, n, int)
        self.convolution = convolution

        if self.pad == 'same':
            if any(s % 2 == 0 for s in self.filter_size):
                raise NotImplementedError(
                    '`same` padding requires odd filter size.')
        else:
            raise ValueError("You must use the 'same' padding")

        self.learn_init = learn_init
        self.peepholes = peepholes
        self.grad_clipping = grad_clipping

        def add_gate_params(gate, gate_name):
            """ Convenience function for adding layer parameters from a Gate
            instance. """
            return (self.add_param(gate.W_in,
                                   self.get_W_shape(self.input_shape_x),
                                   name="W_in_to_{}".format(gate_name)),
                    self.add_param(gate.W_hid,
                                   self.get_W_shape(self.input_shape_h),
                                   name="W_hid_to_{}".format(gate_name)),
                    self.add_param(gate.b, (self.num_filters, ),
                                   name="b_{}".format(gate_name),
                                   regularizable=False), gate.nonlinearity)

        # Add in parameters from the supplied Gate instances
        (self.W_in_to_ingate, self.W_hid_to_ingate, self.b_ingate,
         self.nonlinearity_ingate) = add_gate_params(ingate, 'ingate')

        (self.W_in_to_forgetgate, self.W_hid_to_forgetgate, self.b_forgetgate,
         self.nonlinearity_forgetgate) = add_gate_params(
             forgetgate, 'forgetgate')

        (self.W_in_to_cell, self.W_hid_to_cell, self.b_cell,
         self.nonlinearity_cell) = add_gate_params(cell, 'cell')

        (self.W_in_to_outgate, self.W_hid_to_outgate, self.b_outgate,
         self.nonlinearity_outgate) = add_gate_params(outgate, 'outgate')

        # If peephole (cell to gate) connections were enabled, initialize
        # peephole connections.  These are elementwise products with the cell
        # state, so they are represented as vectors.

        if self.peepholes:
            self.W_cell_to_ingate = self.add_param(ingate.W_cell,
                                                   (self.num_filters, ),
                                                   name="W_cell_to_ingate")

            self.W_cell_to_forgetgate = self.add_param(
                forgetgate.W_cell, (self.num_filters, ),
                name="W_cell_to_forgetgate")

            self.W_cell_to_outgate = self.add_param(outgate.W_cell,
                                                    (self.num_filters, ),
                                                    name="W_cell_to_outgate")

        # Setup initial values for the cell and the hidden units
        self.cell_init = self.add_param(cell_init,
                                        (1, ) + self.input_shape_c[1:],
                                        name="cell_init",
                                        trainable=learn_init,
                                        regularizable=False)

        self.hid_init = self.add_param(hid_init,
                                       (1, ) + self.input_shape_h[1:],
                                       name="hid_init",
                                       trainable=learn_init,
                                       regularizable=False)
Ejemplo n.º 47
0
    def __init__(self, x, cell_previous, hid_previous,
                 filter_size, stride=(1, 1), pad='same',
                 flip_filters=True, n=None,
                 convolution=T.nnet.conv2d,
                 ingate=Gate(W_in=init.GlorotUniform(),
                             W_hid=init.GlorotUniform()),
                 forgetgate=Gate(W_in=init.GlorotUniform(),
                                 W_hid=init.GlorotUniform()),
                 cell=Gate(W_in=init.GlorotUniform(),
                           W_hid=init.GlorotUniform(),
                           W_cell=None,
                           nonlinearity=nonlinearities.tanh),
                 outgate=Gate(),
                 nonlinearity=nonlinearities.tanh,
                 cell_init=init.Constant(0.),
                 hid_init=init.Constant(0.),
                 learn_init=False,
                 peepholes=True,
                 grad_clipping=0,
                 **kwargs):

        # Initialize parent layer
        super(ConvLSTMCell, self).__init__(
            [x, cell_previous, hid_previous], **kwargs)

        # If the provided nonlinearity is None, make it linear
        if nonlinearity is None:
            self.nonlinearity = nonlinearities.identity
        else:
            self.nonlinearity = nonlinearity

        # Retrieve the dimensionality of the incoming layer
        self.input_shape_x = self.input_shapes[0]
        self.input_shape_c = self.input_shapes[1]
        self.input_shape_h = self.input_shapes[2]

        # From ConvNet
        if n is None:
            n = len(self.input_shape_x) - 2
        elif n != len(self.input_shape_x) - 2:
            raise ValueError("Tried to create a %dD convolution layer with "
                             "input shape %r. Expected %d input dimensions "
                             "(batchsize, channels, %d spatial dimensions)." %
                             (n, self.input_shape_x, n+2, n))
        self.n = n
        self.pad = pad
        self.num_filters = self.input_shape_h[1]
        self.filter_size = as_tuple(filter_size, n, int)
        self.flip_filters = flip_filters
        self.stride = as_tuple(stride, n, int)
        self.convolution = convolution

        if self.pad == 'same':
            if any(s % 2 == 0 for s in self.filter_size):
                raise NotImplementedError(
                    '`same` padding requires odd filter size.')
        else:
            raise ValueError("You must use the 'same' padding")

        self.learn_init = learn_init
        self.peepholes = peepholes
        self.grad_clipping = grad_clipping

        def add_gate_params(gate, gate_name):
            """ Convenience function for adding layer parameters from a Gate
            instance. """
            return (
                self.add_param(
                    gate.W_in,
                    self.get_W_shape(self.input_shape_x),
                    name="W_in_to_{}".format(gate_name)),
                self.add_param(
                    gate.W_hid,
                    self.get_W_shape(self.input_shape_h),
                    name="W_hid_to_{}".format(gate_name)),
                self.add_param(
                    gate.b,
                    (self.num_filters, ),
                    name="b_{}".format(gate_name),
                    regularizable=False),
                gate.nonlinearity)

        # Add in parameters from the supplied Gate instances
        (self.W_in_to_ingate, self.W_hid_to_ingate, self.b_ingate,
         self.nonlinearity_ingate) = add_gate_params(ingate, 'ingate')

        (self.W_in_to_forgetgate, self.W_hid_to_forgetgate, self.b_forgetgate,
         self.nonlinearity_forgetgate) = add_gate_params(forgetgate,
                                                         'forgetgate')

        (self.W_in_to_cell, self.W_hid_to_cell, self.b_cell,
         self.nonlinearity_cell) = add_gate_params(cell, 'cell')

        (self.W_in_to_outgate, self.W_hid_to_outgate, self.b_outgate,
         self.nonlinearity_outgate) = add_gate_params(outgate, 'outgate')

        # If peephole (cell to gate) connections were enabled, initialize
        # peephole connections.  These are elementwise products with the cell
        # state, so they are represented as vectors.

        if self.peepholes:
            self.W_cell_to_ingate = self.add_param(
                ingate.W_cell,
                (self.num_filters, ),
                name="W_cell_to_ingate")

            self.W_cell_to_forgetgate = self.add_param(
                forgetgate.W_cell,
                (self.num_filters, ),
                name="W_cell_to_forgetgate")

            self.W_cell_to_outgate = self.add_param(
                outgate.W_cell,
                (self.num_filters, ),
                name="W_cell_to_outgate")

        # Setup initial values for the cell and the hidden units
        self.cell_init = self.add_param(
            cell_init, (1, ) + self.input_shape_c[1:], name="cell_init",
            trainable=learn_init, regularizable=False)

        self.hid_init = self.add_param(
            hid_init, (1, ) + self.input_shape_h[1:], name="hid_init",
            trainable=learn_init, regularizable=False)
Ejemplo n.º 48
0
def test_as_tuple_fails():
    from lasagne.utils import as_tuple
    with pytest.raises(ValueError):
        as_tuple([1, 2, 3], 4)
    with pytest.raises(TypeError):
        as_tuple('asdf', 4, int)