Пример #1
0
def shpcmp(shape1, shape2):
    # Lambda to compare lists except for None's.
    cmplst = lambda l1, l2: all([
        elem1 == elem2 if None not in [elem1, elem2] else True
        for elem1, elem2 in zip(l1, l2)
    ]) and (len(l1) == len(l2))
    # First test: Shape1 and shape2 must both (not) be a list of list
    shpeq = pyk.islistoflists(shape1) == pyk.islistoflists(shape2)
    # Second test: number of inputs.
    shpeq = shpeq and (len(pyk.list2listoflists(shape1)) == len(
        pyk.list2listoflists(shape2)))
    # Third test: list comparisons
    shpeq = shpeq and all([
        cmplst(l1, l2) for l1, l2 in zip(pyk.list2listoflists(shape1),
                                         pyk.list2listoflists(shape2))
    ])
    # Done
    return shpeq
Пример #2
0
    def inferoutshape(self, inpshape=None, checkinput=True):
        if inpshape is None:
            inpshape = self.inpshape

        if checkinput:
            # Compare shape with Antipasti inpshape
            assert netutils.shpcmp(self.lasinpshape, inpshape), "Lasagne input shape is not consistent with the " \
                                                                "inferred Antipasti input shape."

        # Get output shape from Lasagne
        outshape = las.layers.get_output_shape(
            self.outputlayers, {
                inplayer: ishp
                for inplayer, ishp in zip(pyk.obj2list(self.inputlayers),
                                          pyk.list2listoflists(inpshape))
            })

        outshape = pyk.listoftuples2listoflists(outshape) if pyk.islistoflists(
            outshape) else list(outshape)
        return outshape
Пример #3
0
def parselayerinfo(dim=None,
                   inpdim=None,
                   issequence=None,
                   allowsequences=None,
                   numinp=None,
                   inpshape=None,
                   verbose=True):
    parsey = {
        'dim': dim,
        'inpdim': inpdim,
        'issequence': issequence,
        'allowsequences': allowsequences,
        'numinp': numinp,
        'inpshape': inpshape
    }

    # Parse from inpshape
    if parsey['inpshape'] is not None:
        # Make sure inpshape is a list
        assert isinstance(
            parsey['inpshape'],
            list), "inpshape must be a list, e.g. [None, 3, None, None]."

        # Fetch number of inputs
        if pyk.islistoflists(parsey['inpshape']):
            _numinp = len(parsey['inpshape'])
            _inpdim = [len(ishp) for ishp in parsey['inpshape']]
        else:
            _numinp = 1
            _inpdim = len(parsey['inpshape'])

        # Write to parsed (if not written already)
        # numinp
        parsey['numinp'] = _numinp if parsey['numinp'] is None else parsey[
            'numinp']
        # Consistency check
        assert parsey['numinp'] == _numinp, "The provided inpshape requires numinp = {}, " \
                                            "but the value given was {}".format(_numinp, parsey['numinp'])
        # inpdim
        parsey['inpdim'] = _inpdim if parsey['inpdim'] is None else parsey[
            'inpdim']
        assert parsey['inpdim'] == _inpdim, "The provided inpshape requires inpdim = {}, " \
                                            "but the value given was {}".format(_inpdim, parsey['inpdim'])

    # Check if dim, inpdim, issequence or allowsequences is a list of multiple elements and numinp is not given.
    if parsey['numinp'] is None:
        for argname in ['dim', 'inpdim', 'issequence', 'allowsequences']:
            if isinstance(parsey[argname], list):
                parsey['numinp'] = len(parsey[argname])

    # Parse from numinp
    if parsey['numinp'] is not None:
        for argname, argtype in zip(
            ['dim', 'inpdim', 'issequence', 'allowsequences'],
            [int, int, bool, bool]):
            if isinstance(parsey[argname], argtype) and parsey['numinp'] > 1:
                # If numinp is > 1 and allowseqences or issequence or inpdim or dim is bool or bool or int or int,
                # assume that the user (or the author) is too lazy to type in a list.
                parsey[argname] = [
                    parsey[argname],
                ] * parsey['numinp']
            elif isinstance(parsey[argname], list) and parsey['numinp'] > 1:
                # If the user was not lazy, make sure the given list sizes check out
                assert len(parsey[argname]) == parsey['numinp'], \
                    "{} must be a {} or a list of length {} (= numinp).".format(argname, argtype, parsey['numinp'])

        # Check if inpshape is consistent
        if parsey['inpshape'] is not None and parsey['numinp'] > 1:
            assert pyk.islistoflists(parsey['inpshape']) and len(
                parsey['inpshape']) == parsey['numinp']

    else:
        if verbose:
            warn("Guessing that numinp = 1.")
        # Guess numinp = 1.
        parsey['numinp'] = 1

    # Parse allowsequences
    # At this point, allowsequences must be known (or no conclusions can be drawn on issequence and dim)
    if parsey['allowsequences'] is None:
        if verbose:
            warn("Guessing that sequences are allowed.")
        parsey['allowsequences'] = pyk.delist([
            True,
        ] * parsey['numinp'])
    else:
        # Okay, so it's known if sequences are allowed. Check if issequence is consistent.
        if pyk.obj2list(parsey['allowsequences']) == [
                False,
        ] * parsey['numinp'] and parsey['issequence'] is not None:
            # If sequences are not allowed, make sure issequence is False
            assert pyk.obj2list(parsey['issequence']) == [False,] * parsey['numinp'], \
                "Input(s) are not allowed to be sequential, yet they are."

    # Parse issequence
    if parsey['issequence'] is not None:
        # Delist issequence
        parsey['issequence'] = pyk.delist(parsey['issequence']) \
            if isinstance(parsey['issequence'], list) else parsey['issequence']

        # Check if issequence is consistent with everything
        if isinstance(parsey['issequence'], list):
            assert len(parsey['issequence']) == parsey['numinp'], "issequence must be a list of the same lenght as " \
                                                                  "numinp = {} if numinp > 1.".format(parsey['numinp'])

        # Check if consistent with allowsequences. At this point, issequence may have None's.
        assert all([(bool(isseq) and allowseq) or not isseq
                    for isseq, allowseq in zip(pyk.obj2list(parsey['issequence']),
                                               pyk.obj2list(parsey['allowsequences']))]), \
            "Input is a sequence although it's not allowed to. " \
            "issequence = {}, allowsequences = {}.".format(parsey['issequence'], parsey['allowsequences'])

    else:
        if verbose:
            warn("Guessing that input(s) is(are) not sequential.")
        parsey['issequence'] = pyk.delist([
            False,
        ] * parsey['numinp'])

    # Parse inpdim
    # Compute expected inpdim from what's known
    # Check in from issequence
    _inpdim = pyk.delist(
        [5 if isseq else None for isseq in pyk.obj2list(parsey['issequence'])])
    # Check in from dim
    if parsey['dim'] is not None:
        _inpdim = pyk.delist([
            5 if d == 3 else indim for d, indim in zip(
                pyk.obj2list(parsey['dim']), pyk.obj2list(_inpdim))
        ])
        _inpdim = pyk.delist([
            4 if (d == 2 and not isseq) else indim for d, indim, isseq in zip(
                pyk.obj2list(parsey['dim']), pyk.obj2list(_inpdim),
                pyk.obj2list(parsey['issequence']))
        ])

    if parsey['inpdim'] is None:
        # Make sure there are no None's remaining in _inpdim
        assert None not in pyk.obj2list(
            _inpdim
        ), "Input dimensionality could not be parsed due to missing information."
        parsey['inpdim'] = _inpdim
    else:
        assert pyk.smartlen(parsey['inpdim']) == pyk.smartlen(_inpdim), \
            "Expected {} elements in inpdim, got {}.".format(pyk.smartlen(_inpdim), pyk.smartlen(parsey['inpdim']))
        # Check consistency with the expected _inpdim
        assert all([_indim == indim for _indim, indim in zip(pyk.obj2list(_inpdim), pyk.obj2list(parsey['inpdim']))
                    if _indim is not None]), \
            "Provided inpdim is inconsistent with either dim or issequence."

    # Parse dim
    # Compute expected _inpdim from what's known
    _dim = pyk.delist([
        2 if (indim == 4 or isseq) else 3 for indim, isseq in zip(
            pyk.obj2list(parsey['inpdim']), pyk.obj2list(parsey['issequence']))
    ])
    # Check in from dim
    if parsey['dim'] is None:
        parsey['dim'] = _dim
    else:
        assert parsey[
            'dim'] == _dim, "Given dim ({}) is not consistent with expectation ({})".format(
                parsey['dim'], _dim)

    # Reparse inpshape
    if parsey['inpshape'] is None:
        parsey['inpshape'] = pyk.delist([[
            None,
        ] * indim for indim in pyk.obj2list(parsey['inpdim'])])

    # Return parsey :(
    return parsey
Пример #4
0
    def __theano__pool(self,
                       inp,
                       ds,
                       stride=None,
                       padding=None,
                       poolmode='max',
                       dim=None,
                       ignoreborder=True,
                       issequence=False):

        # Do imports locally to prevent circular dependencies
        import netutils as nu
        import pykit as pyk
        from theano.tensor.signal import pool as downsample

        # Determine the dimensionality of convolution (2 or 3?)
        if dim is None:
            dim = 3 if not issequence and len(ds) == 3 and inp.ndim == 5 else 2

        # Defaults
        poolmode = 'average_exc_pad' if poolmode in [
            'mean', 'average', 'average_exc_pad'
        ] else poolmode
        padding = [[0, 0]] * dim if padding is None else padding
        stride = ds if stride is None else stride

        # Autofix inputs
        if isinstance(padding, int):
            padding = [padding] * dim
        if not pyk.islistoflists(pyk.obj2list(padding)):
            padding = [[padval] * dim for padval in pyk.obj2list(padding)]
        if isinstance(stride, int):
            stride = [stride] * dim

        # Check if theano can pad input as required
        autopaddable = all(
            [all([dimpad == pad[0] for dimpad in pad]) for pad in padding])

        # Reshape 2D sequential data if required
        # Log input shape
        inpshape = inp.shape
        reallyissequential = issequence and inp.ndim == 5
        if issequence:
            if reallyissequential:
                # Sequential input must be paddable by theano. This is required to reshape the sequential input back to
                # its original shape after pooling.
                assert autopaddable, "Sequential inputs must be paddable by theano. Provided padding {} cannot be " \
                                     "handled at present.".format(padding)
                inp = inp.reshape((inpshape[0] * inpshape[1], inpshape[2],
                                   inpshape[3], inpshape[4]),
                                  ndim=4)
                ds = ds[0:2]
                stride = stride[0:2]
                padding = padding[0:2]
            else:
                warn(
                    "Expected 5D sequential output, but got 4D non-sequential instead."
                )

        # Determine what theano needs to be told about how to pad the input
        if autopaddable:
            autopadding = tuple([pad[0] for pad in padding])
        else:
            autopadding = (0, ) * dim

        if not autopaddable and not all(
            [padval == 0 for padval in pyk.flatten(padding)]):
            if not th.config.device == 'cpu' and not self.cpupadwarned:
                warn(
                    "Padding might occur on the CPU, which tends to slow things down."
                )
                self.cpupadwarned = True
            inp = nu.pad(inp, padding)

        if dim == 2:
            y = downsample.pool_2d(input=inp,
                                   ds=ds,
                                   st=stride,
                                   padding=autopadding,
                                   ignore_border=ignoreborder,
                                   mode=poolmode)

        elif dim == 3:
            # parse downsampling ratio, stride and padding
            dsyx = ds[0:2]
            styx = stride[0:2]
            padyx = autopadding[0:2]

            ds0z = (1, ds[2])
            st0z = (1, stride[2])
            pad0z = (0, autopadding[2])

            # Dowsnample yx
            H = downsample.pool_2d(input=inp,
                                   ds=dsyx,
                                   st=styx,
                                   padding=padyx,
                                   mode=poolmode)
            # Rotate tensor
            H = H.dimshuffle(0, 2, 3, 4, 1)
            # Downsample 0z
            H = downsample.pool_2d(input=H,
                                   ds=ds0z,
                                   st=st0z,
                                   padding=pad0z,
                                   mode=poolmode)
            # Undo rotate tensor
            y = H.dimshuffle(0, 4, 1, 2, 3)

        else:
            raise NotImplementedError("Pooling is implemented in 2D and 3D.")

        if issequence and reallyissequential:
            # Compute symbolic pool output length
            if ignoreborder:
                pooleny, poolenx = \
                    [T.floor((inpshape[tensorindex] + 2 * autopadding[index] - ds[index] + stride[index])/stride[index])
                     for index, tensorindex in enumerate([3, 4])]
            else:
                poolen = [None, None]

                for index, tensorindex in enumerate([3, 4]):
                    if stride[index] >= ds[index]:
                        poolen[index] = T.floor(
                            (inpshape[tensorindex] + stride[index] - 1) /
                            stride[index])
                    else:
                        plen = T.floor((inpshape[tensorindex] - ds[index] +
                                        stride[index] - 1) / stride[index])
                        poolen[index] = T.switch(plen > 0, plen, 0)

                pooleny, poolenx = poolen

            y = y.reshape(
                (inpshape[0], inpshape[1], inpshape[2], pooleny, poolenx),
                ndim=5)

        return y
Пример #5
0
    def __theano__conv(self,
                       inp,
                       filters,
                       stride=None,
                       dilation=None,
                       padding=None,
                       bias=None,
                       filtergradmask=None,
                       biasgradmask=None,
                       filtermask=None,
                       biasmask=None,
                       filtergradclips=None,
                       biasgradclips=None,
                       dim=None,
                       convmode='same',
                       issequence=False,
                       implementation='auto'):

        # Do imports locally to prevent circular dependencies
        import netutils as nu
        import theanops as tho
        import pykit as pyk

        # Determine the dimensionality of convolution (2 or 3?)
        if dim is None:
            dim = 3 if not issequence and len(
                filters.get_value().shape) == 5 and inp.ndim == 5 else 2

        # Smart fix: if convmode is 'same', stride != 1 and padding is None: set automagically set padding.

        # Defaults
        padding = [[0, 0]] * dim if padding is None else padding
        stride = [1] * dim if stride is None else stride
        dilation = [1] * dim if dilation is None else dilation
        filtergradclips = [
            -np.inf, np.inf
        ] if filtergradclips is None else list(filtergradclips)
        biasgradclips = [-np.inf, np.inf
                         ] if biasgradclips is None else list(biasgradclips)

        # Autofix inputs
        if isinstance(padding, int):
            padding = [padding] * dim
        if not pyk.islistoflists(pyk.obj2list(padding)):
            padding = [[padval] * dim for padval in pyk.obj2list(padding)]
        if isinstance(stride, int):
            stride = [stride] * dim
        if isinstance(dilation, int):
            dilation = [dilation] * dim

        # TODO: Tests
        pass

        # Reshape 2D sequential data if required
        # Log input shape
        inpshape = inp.shape
        reallyissequential = issequence and inp.ndim == 5
        if issequence:
            if reallyissequential:
                inp = inp.reshape((inpshape[0] * inpshape[1], inpshape[2],
                                   inpshape[3], inpshape[4]),
                                  ndim=4)
                stride = stride[0:2]
                padding = padding[0:2]
                # TODO: Get rid of these restrictions
                assert stride == [
                    1, 1
                ], "Strided convolution is not implemented for sequential data."
                assert convmode == 'same', "Convmode must be 'same' for sequential data."

            else:
                warn(
                    "Expected 5D sequential output, but got 4D non-sequential instead."
                )

        # Apply gradient masks if required
        if filtergradmask is not None:
            filters = tho.maskgradient(filters, filtergradmask)
        if biasgradmask is not None and bias is not None:
            bias = tho.maskgradient(bias, biasgradmask)

        # Apply masks if required
        if filtermask is not None:
            filters = filtermask * filters
        if biasmask is not None:
            bias = biasmask * bias

        # Determine border_mode for CuDNN/3D conv
        autopaddable, bordermode, trim = self.__theano__bordermode(
            convmode, padding,
            filters.get_value().shape)

        # Pad input if required (warn that it's ridiculously slow)
        if not autopaddable and not all(
            [padval == 0 for padval in pyk.flatten(padding)]):
            if not isinstance(bordermode,
                              str) and pyk.islistoflists(bordermode):
                # Override padding for 3D convolutions
                inp = nu.pad(inp, bordermode)
                bordermode = 'valid'
            else:
                inp = nu.pad(inp, padding)

        # Switch implementation
        if implementation == 'auto':
            # Fall back implementation: 'vanilla'
            implementation = 'vanilla'
            if dilation != [1, 1]:
                implementation = 'dilated'

        # Convolve 2D (with gradmask + bias), reshape sequential data
        if dim == 2:
            if implementation == 'vanilla':
                if list(dilation) != [1, 1]:
                    warn(
                        "Filter dilation is not possible with this implementation."
                    )

                # Convolve
                y = T.nnet.conv2d(input=inp,
                                  filters=th.gradient.grad_clip(
                                      filters, *filtergradclips),
                                  border_mode=tuple(bordermode) if isinstance(
                                      bordermode, list) else bordermode,
                                  filter_shape=filters.get_value().shape,
                                  subsample=tuple(stride))

            elif implementation == 'dilated':

                # Make sure stride is 1
                assert list(stride) == [
                    1, 1
                ], "Stride should equal [1, 1] for dilated convolutions."
                assert not issequence, "Dilated convolution is not supported for sequential data."
                # Dilated conv can't handle padding at the moment, do this manually
                if isinstance(bordermode, tuple):
                    padding = [[bm, bm] for bm in bordermode]
                    inp = nu.pad(inp, padding)
                elif bordermode == 'full':
                    raise NotImplementedError(
                        "Convolution mode 'full' is not implemented for dilated convolutions."
                    )
                elif bordermode == 'valid':
                    pass
                elif bordermode == 'half':
                    assert all([d % 2 == 0 for d in dilation]), "Dilation amount must be divisible by 2 for dilated " \
                                                                "convolution with 'same' border handling."

                    padding = [[
                        (filters.get_value().shape[n] - 1) * d / 2,
                    ] * 2 for n, d in zip([2, 3], dilation)]
                    inp = nu.pad(inp, padding)
                else:
                    raise NotImplementedError(
                        "Unknown bordermode: {}.".format(bordermode))

                # Get output image shape
                oishp = [
                    inp.shape[n] - (filters.shape[n] - 1) * d
                    for n, d in zip([2, 3], dilation)
                ]

                # Get computin'
                op = T.nnet.abstract_conv.AbstractConv2d_gradWeights(
                    subsample=tuple(dilation),
                    border_mode='valid',
                    filter_flip=False)
                y = op(inp.transpose(1, 0, 2, 3),
                       filters.transpose(1, 0, 2, 3), tuple(oishp))
                y = y.transpose(1, 0, 2, 3)

            else:
                raise NotImplementedError(
                    "Implementation {} is not implemented.".format(
                        implementation))

            # Trim if required
            if trim:
                y = self.__theano__convtrim(
                    inp=y, filtershape=filters.get_value().shape)

            # Add bias if required
            if bias is not None:
                y = y + th.gradient.grad_clip(bias, *biasgradclips).dimshuffle(
                    'x', 0, 'x', 'x')

        elif dim == 3:
            # Convolve 3D (with bias)
            if implementation == 'auto' or implementation == 'conv2d':

                assert stride == [
                    1, 1, 1
                ], "Implementation 'conv2d' does not support strided convolution in 3D."
                assert convmode == 'valid', "Implementation 'conv2d' only supports 'valid' convolutions."

                y = T.nnet.conv3d2d.conv3d(
                    signals=inp,
                    filters=th.gradient.grad_clip(filters, *filtergradclips),
                    border_mode=bordermode,
                    filters_shape=filters.get_value().shape)
            else:
                raise NotImplementedError(
                    "Implementation {} is not implemented.".format(
                        implementation))

            # Trim if required
            if trim:
                y = self.__theano__convtrim(
                    inp=y, filtershape=filters.get_value().shape)

            # Add bias if required
            if bias is not None:
                y = y + th.gradient.grad_clip(bias, *biasgradclips).dimshuffle(
                    'x', 'x', 0, 'x', 'x')

        else:
            raise NotImplementedError(
                "Convolution is implemented in 2D and 3D.")

        # Reshape sequential data
        if issequence and reallyissequential:
            y = y.reshape(
                (inpshape[0], inpshape[1], filters.get_value().shape[0],
                 inpshape[3], inpshape[4]),
                ndim=5)

        # Return
        return y
    def __theano__conv(self,
                       inp,
                       filters,
                       stride=None,
                       padding=None,
                       bias=None,
                       filtergradmask=None,
                       biasgradmask=None,
                       filtermask=None,
                       biasmask=None,
                       filtergradclips=None,
                       biasgradclips=None,
                       dim=None,
                       convmode='same',
                       issequence=False,
                       implementation='auto'):

        # Do imports locally to prevent circular dependencies
        import netutils as nu
        import theanops as tho
        import pykit as pyk

        # Determine the dimensionality of convolution (2 or 3?)
        if dim is None:
            dim = 3 if not issequence and len(
                filters.get_value().shape) == 5 and inp.ndim == 5 else 2

        # Smart fix: if convmode is 'same', stride != 1 and padding is None: set automagically set padding.

        # Defaults
        padding = [[0, 0]] * dim if padding is None else padding
        stride = [1] * dim if stride is None else stride
        filtergradclips = [
            -np.inf, np.inf
        ] if filtergradclips is None else list(filtergradclips)
        biasgradclips = [-np.inf, np.inf
                         ] if biasgradclips is None else list(biasgradclips)

        # Autofix inputs
        if isinstance(padding, int):
            padding = [padding] * dim
        if not pyk.islistoflists(pyk.obj2list(padding)):
            padding = [[padval] * dim for padval in pyk.obj2list(padding)]
        if isinstance(stride, int):
            stride = [stride] * dim

        # TODO: Tests
        pass

        # Reshape 2D sequential data if required
        # Log input shape
        inpshape = inp.shape
        reallyissequential = issequence and inp.ndim == 5
        if issequence:
            if reallyissequential:
                inp = inp.reshape((inpshape[0] * inpshape[1], inpshape[2],
                                   inpshape[3], inpshape[4]),
                                  ndim=4)
                stride = stride[0:2]
                padding = padding[0:2]
                # TODO: Get rid of these restrictions
                assert stride == [
                    1, 1
                ], "Strided convolution is not implemented for sequential data."
                assert convmode == 'same', "Convmode must be 'same' for sequential data."

            else:
                warn(
                    "Expected 5D sequential output, but got 4D non-sequential instead."
                )

        # Apply gradient masks if required
        if filtergradmask is not None:
            filters = tho.maskgradient(filters, filtergradmask)
        if biasgradmask is not None and bias is not None:
            bias = tho.maskgradient(bias, biasgradmask)

        # Apply masks if required
        if filtermask is not None:
            filters = filtermask * filters
        if biasmask is not None:
            bias = biasmask * bias

        # Determine border_mode for CuDNN/3D conv
        autopaddable, bordermode, trim = self.__theano__bordermode(
            convmode, padding,
            filters.get_value().shape)

        # Pad input if required (warn that it's ridiculously slow)
        if not autopaddable and not all(
            [padval == 0 for padval in pyk.flatten(padding)]):
            if not th.config.device == 'cpu' and not self.cpupadwarned:
                warn(
                    "Padding might occur on the CPU, which tends to slow things down."
                )
                self.cpupadwarned = True
            if not isinstance(bordermode,
                              str) and pyk.islistoflists(bordermode):
                # Override padding for 3D convolutions
                inp = nu.pad(inp, bordermode)
                bordermode = 'valid'
            else:
                inp = nu.pad(inp, padding)

        # Convolve 2D (with gradmask + bias), reshape sequential data
        if dim == 2:
            if implementation == 'auto':
                # Convolve
                y = T.nnet.conv2d(input=inp,
                                  filters=th.gradient.grad_clip(
                                      filters, *filtergradclips),
                                  border_mode=tuple(bordermode) if isinstance(
                                      bordermode, list) else bordermode,
                                  filter_shape=filters.get_value().shape,
                                  subsample=tuple(stride))
            else:
                raise NotImplementedError(
                    "Implementation {} is not implemented.".format(
                        implementation))

            # Trim if required
            if trim:
                y = self.__theano__convtrim(
                    inp=y, filtershape=filters.get_value().shape)

            # Add bias if required
            if bias is not None:
                y = y + th.gradient.grad_clip(bias, *biasgradclips).dimshuffle(
                    'x', 0, 'x', 'x')

        elif dim == 3:
            # Convolve 3D (with bias)
            if implementation == 'auto' or implementation == 'conv2d':

                assert stride == [
                    1, 1, 1
                ], "Implementation 'conv2d' does not support strided convolution in 3D."
                assert convmode == 'valid', "Implementation 'conv2d' only supports 'valid' convolutions."

                y = T.nnet.conv3d2d.conv3d(
                    signals=inp,
                    filters=th.gradient.grad_clip(filters, *filtergradclips),
                    border_mode=bordermode,
                    filters_shape=filters.get_value().shape)
            else:
                raise NotImplementedError(
                    "Implementation {} is not implemented.".format(
                        implementation))

            # Trim if required
            if trim:
                y = self.__theano__convtrim(
                    inp=y, filtershape=filters.get_value().shape)

            # Add bias if required
            if bias is not None:
                y = y + th.gradient.grad_clip(bias, *biasgradclips).dimshuffle(
                    'x', 'x', 0, 'x', 'x')

        else:
            raise NotImplementedError(
                "Convolution is implemented in 2D and 3D.")

        # Reshape sequential data
        if issequence and reallyissequential:
            y = y.reshape(
                (inpshape[0], inpshape[1], filters.get_value().shape[0],
                 inpshape[3], inpshape[4]),
                ndim=5)

        # Return
        return y
Пример #7
0
    def __theano__conv(self, inp, filters, stride=None, padding=None, bias=None, filtergradmask=None,
                       biasgradmask=None, filtermask=None, biasmask=None, filtergradclips=None, biasgradclips=None,
                       dim=None, convmode='same', issequence=False, implementation='auto'):

        # Do imports locally to prevent circular dependencies
        import netutils as nu
        import theanops as tho
        import pykit as pyk

        # Determine the dimensionality of convolution (2 or 3?)
        if dim is None:
            dim = 3 if not issequence and len(filters.get_value().shape) == 5 and inp.ndim == 5 else 2

        # Smart fix: if convmode is 'same', stride != 1 and padding is None: set automagically set padding.

        # Defaults
        padding = [[0, 0]] * dim if padding is None else padding
        stride = [1] * dim if stride is None else stride
        filtergradclips = [-np.inf, np.inf] if filtergradclips is None else list(filtergradclips)
        biasgradclips = [-np.inf, np.inf] if biasgradclips is None else list(biasgradclips)

        # Autofix inputs
        if isinstance(padding, int):
            padding = [padding] * dim
        if not pyk.islistoflists(pyk.obj2list(padding)):
            padding = [[padval] * dim for padval in pyk.obj2list(padding)]
        if isinstance(stride, int):
            stride = [stride] * dim

        # TODO: Tests
        pass

        # Reshape 2D sequential data if required
        # Log input shape
        inpshape = inp.shape
        reallyissequential = issequence and inp.ndim == 5
        if issequence:
            if reallyissequential:
                inp = inp.reshape((inpshape[0] * inpshape[1], inpshape[2], inpshape[3], inpshape[4]), ndim=4)
                stride = stride[0:2]
                padding = padding[0:2]
                # TODO: Get rid of these restrictions
                assert stride == [1, 1], "Strided convolution is not implemented for sequential data."
                assert convmode == 'same', "Convmode must be 'same' for sequential data."

            else:
                warn("Expected 5D sequential output, but got 4D non-sequential instead.")

        # Apply gradient masks if required
        if filtergradmask is not None:
            filters = tho.maskgradient(filters, filtergradmask)
        if biasgradmask is not None and bias is not None:
            bias = tho.maskgradient(bias, biasgradmask)

        # Apply masks if required
        if filtermask is not None:
            filters = filtermask * filters
        if biasmask is not None:
            bias = biasmask * bias

        # Determine border_mode for CuDNN/3D conv
        autopaddable, bordermode, trim = self.__theano__bordermode(convmode, padding, filters.get_value().shape)

        # Pad input if required (warn that it's ridiculously slow)
        if not autopaddable and not all([padval == 0 for padval in pyk.flatten(padding)]):
            if not th.config.device == 'cpu' and not self.cpupadwarned:
                warn("Padding might occur on the CPU, which tends to slow things down.")
                self.cpupadwarned = True
            if not isinstance(bordermode, str) and pyk.islistoflists(bordermode):
                # Override padding for 3D convolutions
                inp = nu.pad(inp, bordermode)
                bordermode = 'valid'
            else:
                inp = nu.pad(inp, padding)


        # Convolve 2D (with gradmask + bias), reshape sequential data
        if dim == 2:
            if implementation == 'auto':
                # Convolve
                y = T.nnet.conv2d(input=inp, filters=th.gradient.grad_clip(filters, *filtergradclips),
                                  border_mode=tuple(bordermode) if isinstance(bordermode, list) else bordermode,
                                  filter_shape=filters.get_value().shape, subsample=tuple(stride))
            else:
                raise NotImplementedError("Implementation {} is not implemented.".format(implementation))

            # Trim if required
            if trim:
                y = self.__theano__convtrim(inp=y, filtershape=filters.get_value().shape)

            # Add bias if required
            if bias is not None:
                y = y + th.gradient.grad_clip(bias, *biasgradclips).dimshuffle('x', 0, 'x', 'x')

        elif dim == 3:
            # Convolve 3D (with bias)
            if implementation == 'auto' or implementation == 'conv2d':

                assert stride == [1, 1, 1], "Implementation 'conv2d' does not support strided convolution in 3D."
                assert convmode == 'valid', "Implementation 'conv2d' only supports 'valid' convolutions."

                y = T.nnet.conv3d2d.conv3d(signals=inp, filters=th.gradient.grad_clip(filters, *filtergradclips),
                                           border_mode=bordermode,
                                           filters_shape=filters.get_value().shape)
            else:
                raise NotImplementedError("Implementation {} is not implemented.".format(implementation))

            # Trim if required
            if trim:
                y = self.__theano__convtrim(inp=y, filtershape=filters.get_value().shape)

            # Add bias if required
            if bias is not None:
                y = y + th.gradient.grad_clip(bias, *biasgradclips).dimshuffle('x', 'x', 0, 'x', 'x')

        else:
            raise NotImplementedError("Convolution is implemented in 2D and 3D.")

        # Reshape sequential data
        if issequence and reallyissequential:
            y = y.reshape((inpshape[0], inpshape[1], filters.get_value().shape[0], inpshape[3], inpshape[4]), ndim=5)

        # Return
        return y
Пример #8
0
    def __theano__pool(self, inp, ds, stride=None, padding=None, poolmode='max', dim=None,
                       ignoreborder=True, issequence=False):

        # Do imports locally to prevent circular dependencies
        import netutils as nu
        import pykit as pyk
        from theano.tensor.signal import downsample

        # Determine the dimensionality of convolution (2 or 3?)
        if dim is None:
            dim = 3 if not issequence and len(ds) == 3 and inp.ndim == 5 else 2

        # Defaults
        poolmode = 'average_exc_pad' if poolmode in ['mean', 'average', 'average_exc_pad'] else poolmode
        padding = [[0, 0]] * dim if padding is None else padding
        stride = ds if stride is None else stride

        # Autofix inputs
        if isinstance(padding, int):
            padding = [padding] * dim
        if not pyk.islistoflists(pyk.obj2list(padding)):
            padding = [[padval] * dim for padval in pyk.obj2list(padding)]
        if isinstance(stride, int):
            stride = [stride] * dim

        # Check if theano can pad input as required
        autopaddable = all([all([dimpad == pad[0] for dimpad in pad]) for pad in padding])

        # Reshape 2D sequential data if required
        # Log input shape
        inpshape = inp.shape
        reallyissequential = issequence and inp.ndim == 5
        if issequence:
            if reallyissequential:
                # Sequential input must be paddable by theano. This is required to reshape the sequential input back to
                # its original shape after pooling.
                assert autopaddable, "Sequential inputs must be paddable by theano. Provided padding {} cannot be " \
                                     "handled at present.".format(padding)
                inp = inp.reshape((inpshape[0] * inpshape[1], inpshape[2], inpshape[3], inpshape[4]), ndim=4)
                ds = ds[0:2]
                stride = stride[0:2]
                padding = padding[0:2]
            else:
                warn("Expected 5D sequential output, but got 4D non-sequential instead.")

        # Determine what theano needs to be told about how to pad the input
        if autopaddable:
            autopadding = tuple([pad[0] for pad in padding])
        else:
            autopadding = (0,) * dim

        if not autopaddable and not all([padval == 0 for padval in pyk.flatten(padding)]):
            if not th.config.device == 'cpu' and not self.cpupadwarned:
                warn("Padding might occur on the CPU, which tends to slow things down.")
                self.cpupadwarned = True
            inp = nu.pad(inp, padding)

        if dim == 2:
            y = downsample.max_pool_2d(input=inp, ds=ds, st=stride, padding=autopadding, ignore_border=ignoreborder,
                                       mode=poolmode)

        elif dim == 3:
            # parse downsampling ratio, stride and padding
            dsyx = ds[0:2]
            styx = stride[0:2]
            padyx = autopadding[0:2]

            ds0z = (1, ds[2])
            st0z = (1, stride[2])
            pad0z = (0, autopadding[2])

            # Dowsnample yx
            H = downsample.max_pool_2d(input=inp, ds=dsyx, st=styx, padding=padyx, mode=poolmode)
            # Rotate tensor
            H = H.dimshuffle(0, 2, 3, 4, 1)
            # Downsample 0z
            H = downsample.max_pool_2d(input=H, ds=ds0z, st=st0z, padding=pad0z, mode=poolmode)
            # Undo rotate tensor
            y = H.dimshuffle(0, 4, 1, 2, 3)

        else:
            raise NotImplementedError("Pooling is implemented in 2D and 3D.")

        if issequence and reallyissequential:
            # Compute symbolic pool output length
            if ignoreborder:
                pooleny, poolenx = \
                    [T.floor((inpshape[tensorindex] + 2 * autopadding[index] - ds[index] + stride[index])/stride[index])
                     for index, tensorindex in enumerate([3, 4])]
            else:
                poolen = [None, None]

                for index, tensorindex in enumerate([3, 4]):
                    if stride[index] >= ds[index]:
                        poolen[index] = T.floor((inpshape[tensorindex] + stride[index] - 1)/stride[index])
                    else:
                        plen = T.floor((inpshape[tensorindex] - ds[index] + stride[index] - 1)/stride[index])
                        poolen[index] = T.switch(plen > 0, plen, 0)

                pooleny, poolenx = poolen

            y = y.reshape((inpshape[0], inpshape[1], inpshape[2], pooleny, poolenx), ndim=5)

        return y