Пример #1
0
def create_kim_cnn(layer0_input, embedding_size, input_len, config, pref):
    '''
        One layer convolution with different filter-sizes and maxpooling
    '''
    filter_width_list = [
        int(fw) for fw in config[pref + '_filterwidth'].split()
    ]
    print filter_width_list
    num_filters = int(config[pref + '_num_filters'])
    #num_filters /= len(filter_width_list)
    totfilters = 0
    for i, fw in enumerate(filter_width_list):
        num_feature_map = input_len - fw + 1  #39
        conv = Convolutional(image_size=(input_len, embedding_size),
                             filter_size=(fw, embedding_size),
                             num_filters=min(int(config[pref + '_maxfilter']),
                                             num_filters * fw),
                             num_channels=1)
        totfilters += conv.num_filters
        initialize2(conv, num_feature_map)
        conv.name = pref + 'conv_' + str(fw)
        convout = conv.apply(layer0_input)
        pool_layer = MaxPooling(pooling_size=(num_feature_map, 1))
        pool_layer.name = pref + 'pool_' + str(fw)
        act = Rectifier()
        act.name = pref + 'act_' + str(fw)
        outpool = act.apply(pool_layer.apply(convout)).flatten(2)
        if i == 0:
            outpools = outpool
        else:
            outpools = T.concatenate([outpools, outpool], axis=1)
    name_rep_len = totfilters
    return outpools, name_rep_len
Пример #2
0
def create_kim_cnn(layer0_input, embedding_size, input_len, config, pref):
    '''
        One layer convolution with different filter-sizes and maxpooling
    '''
    filter_width_list = [int(fw) for fw in config[pref + '_filterwidth'].split()]
    print filter_width_list
    num_filters = int(config[pref+'_num_filters'])
    #num_filters /= len(filter_width_list)
    totfilters = 0
    for i, fw in enumerate(filter_width_list):
        num_feature_map = input_len - fw + 1 #39
        conv = Convolutional(
            image_size=(input_len, embedding_size),
            filter_size=(fw, embedding_size),
            num_filters=min(int(config[pref + '_maxfilter']), num_filters * fw),
            num_channels=1
        )
        totfilters += conv.num_filters
        initialize2(conv, num_feature_map)
        conv.name = pref + 'conv_' + str(fw)
        convout = conv.apply(layer0_input)
        pool_layer = MaxPooling(
            pooling_size=(num_feature_map,1)
        )
        pool_layer.name = pref + 'pool_' + str(fw)
        act = Rectifier()
        act.name = pref + 'act_' + str(fw)
        outpool = act.apply(pool_layer.apply(convout)).flatten(2)
        if i == 0:
            outpools = outpool
        else:
            outpools = T.concatenate([outpools, outpool], axis=1)
    name_rep_len = totfilters
    return outpools, name_rep_len
Пример #3
0
def create_yy_cnn(numConvLayer, conv_input, embedding_size, input_len, config,
                  pref):
    '''
     CNN with several layers of convolution, each with specific filter size. 
     Maxpooling at the end. 
    '''
    filter_width_list = [
        int(fw) for fw in config[pref + '_filterwidth'].split()
    ]
    base_num_filters = int(config[pref + '_num_filters'])
    assert len(filter_width_list) == numConvLayer
    convs = []
    fmlist = []
    last_fm = input_len
    for i in range(numConvLayer):
        fw = filter_width_list[i]
        num_feature_map = last_fm - fw + 1  #39
        conv = Convolutional(image_size=(last_fm, embedding_size),
                             filter_size=(fw, embedding_size),
                             num_filters=min(int(config[pref + '_maxfilter']),
                                             base_num_filters * fw),
                             num_channels=1)
        fmlist.append(num_feature_map)
        last_fm = num_feature_map
        embedding_size = conv.num_filters
        convs.append(conv)

    initialize(convs)
    for i, conv in enumerate(convs):
        conv.name = pref + '_conv' + str(i)
        conv_input = conv.apply(conv_input)
        conv_input = conv_input.flatten().reshape(
            (conv_input.shape[0], 1, fmlist[i], conv.num_filters))
        lastconv = conv
        lastconv_out = conv_input
    pool_layer = MaxPooling(pooling_size=(last_fm, 1))
    pool_layer.name = pref + '_pool_' + str(fw)
    act = Rectifier()
    act.name = 'act_' + str(fw)
    outpool = act.apply(pool_layer.apply(lastconv_out).flatten(2))
    return outpool, lastconv.num_filters
Пример #4
0
def create_yy_cnn(numConvLayer, conv_input, embedding_size, input_len, config, pref):
    '''
     CNN with several layers of convolution, each with specific filter size. 
     Maxpooling at the end. 
    '''
    filter_width_list = [int(fw) for fw in config[pref + '_filterwidth'].split()]
    base_num_filters = int(config[pref + '_num_filters'])
    assert len(filter_width_list) == numConvLayer
    convs = []; fmlist = []
    last_fm = input_len
    for i in range(numConvLayer):
        fw = filter_width_list[i]
        num_feature_map = last_fm - fw + 1 #39
        conv = Convolutional(
            image_size=(last_fm, embedding_size),
            filter_size=(fw, embedding_size),
            num_filters=min(int(config[pref + '_maxfilter']), base_num_filters * fw),
            num_channels=1
        )
        fmlist.append(num_feature_map)
        last_fm = num_feature_map
        embedding_size = conv.num_filters
        convs.append(conv)

    initialize(convs)
    for i, conv in enumerate(convs):
        conv.name = pref+'_conv' + str(i)
        conv_input = conv.apply(conv_input)
        conv_input = conv_input.flatten().reshape((conv_input.shape[0], 1, fmlist[i], conv.num_filters))
        lastconv = conv 
        lastconv_out = conv_input
    pool_layer = MaxPooling(
        pooling_size=(last_fm,1)
    )
    pool_layer.name = pref+'_pool_' + str(fw)
    act = Rectifier(); act.name = 'act_' + str(fw)
    outpool = act.apply(pool_layer.apply(lastconv_out).flatten(2))
    return outpool, lastconv.num_filters
Пример #5
0
class ResidualConvolutional(Initializable):
    @lazy(allocation=['filter_size', 'num_filters', 'num_channels'])
    def __init__(self, filter_size, num_filters, num_channels,
                 batch_size=None,
                 mid_noise=False,
                 out_noise=False,
                 tied_noise=False,
                 tied_sigma=False,
                 noise_rate=None,
                 noise_batch_size=None,
                 prior_noise_level=None,
                 image_size=(None, None), step=(1, 1),
                 **kwargs):
        self.filter_size = filter_size
        self.num_filters = num_filters
        self.batch_size = batch_size
        self.num_channels = num_channels
        self.image_size = image_size
        self.mid_noise = mid_noise
        self.noise_batch_size = noise_batch_size
        self.noise_rate = noise_rate
        self.step = step
        self.border_mode = 'half'
        self.tied_biases = True
        depth = 2

        self.b0 = SpatialBatchNormalization(name='b0')
        self.r0 = Rectifier(name='r0')
        self.n0 = (SpatialNoise(name='n0', noise_rate=self.noise_rate,
                tied_noise=tied_noise, tied_sigma=tied_sigma,
                prior_noise_level=prior_noise_level) if mid_noise else None)
        self.c0 = Convolutional(name='c0')
        self.b1 = SpatialBatchNormalization(name='b1')
        self.r1 = Rectifier(name='r1')
        self.n1 = (SpatialNoise(name='n1', noise_rate=self.noise_rate,
                tied_noise=tied_noise, tied_sigma=tied_sigma,
                prior_noise_level=prior_noise_level) if out_noise else None)
        self.c1 = Convolutional(name='c1')
        kwargs.setdefault('children', []).extend([c for c in [
            self.c0, self.b0, self.r0, self.n0,
            self.c1, self.b1, self.r1, self.n1] if c is not None])
        super(ResidualConvolutional, self).__init__(**kwargs)

    def get_dim(self, name):
        if name == 'input_':
            return ((self.num_channels,) + self.image_size)
        if name == 'output':
            return self.c1.get_dim(name)
        return super(ResidualConvolutionalUnit, self).get_dim(name)

    @property
    def num_output_channels(self):
        return self.num_filters

    def _push_allocation_config(self):
        self.b0.input_dim = self.get_dim('input_')
        self.b0.push_allocation_config()
        if self.r0:
            self.r0.push_allocation_config()
        if self.n0:
            self.n0.noise_batch_size = self.noise_batch_size
            self.n0.num_channels = self.num_channels
            self.n0.image_size = self.image_size
        self.c0.filter_size = self.filter_size
        self.c0.batch_size = self.batch_size
        self.c0.num_channels = self.num_channels
        self.c0.num_filters = self.num_filters
        self.c0.border_mode = self.border_mode
        self.c0.image_size = self.image_size
        self.c0.step = self.step
        self.c0.use_bias = False
        self.c0.push_allocation_config()
        c0_shape = self.c0.get_dim('output')
        self.b1.input_dim = c0_shape
        self.b1.push_allocation_config()
        self.r1.push_allocation_config()
        if self.n1:
            self.n1.noise_batch_size = self.noise_batch_size
            self.n1.num_channels = self.num_filters
            self.n1.image_size = c0_shape[1:]
        self.c1.filter_size = self.filter_size
        self.c1.batch_size = self.batch_size
        self.c1.num_channels = self.num_filters
        self.c1.num_filters = self.num_filters
        self.c1.border_mode = self.border_mode
        self.c1.image_size = c0_shape[1:]
        self.c1.step = (1, 1)
        self.c1.use_bias = False
        self.c1.push_allocation_config()

    @application(inputs=['input_'], outputs=['output'])
    def apply(self, input_):
        shortcut = input_
        # Batchnorm, then Relu, then Convolution
        first_conv = self.b0.apply(input_)
        first_conv = self.r0.apply(first_conv)
        if self.n0:
            first_conv = self.n0.apply(first_conv)
        first_conv = self.c0.apply(first_conv)
        # Batchnorm, then Relu, then Convolution (second time)
        second_conv = self.b1.apply(first_conv)
        second_conv = self.r1.apply(second_conv)
        if self.n1:
            second_conv = self.n1.apply(second_conv)
        residual = second_conv

        # Apply stride and zero-padding to match shortcut to output
        if self.step and self.step != (1, 1):
            shortcut = shortcut[:,:,::self.step[0],::self.step[1]]
        if self.num_filters > self.num_channels:
            padshape = (residual.shape[0],
                    self.num_filters - self.num_channels,
                    residual.shape[2], residual.shape[3])
            shortcut = tensor.concatenate(
                    [shortcut, tensor.zeros(padshape, dtype=residual.dtype)],
                    axis=1)
        elif self.num_filters < self.num_channels:
            shortcut = shortcut[:,:self.num_channels,:,:]

        response = shortcut + residual
        return response
Пример #6
0
class NoisyConvolutional2(Initializable, Feedforward, Random):
    """Convolutional transformation sent through a learned noisy channel.

    Applies the noise after the Relu rather than before it.

    Parameters (same as Convolutional)
    """
    @lazy(allocation=[
        'filter_size', 'num_filters', 'num_channels', 'noise_batch_size'
    ])
    def __init__(self,
                 filter_size,
                 num_filters,
                 num_channels,
                 noise_batch_size,
                 image_size=(None, None),
                 step=(1, 1),
                 border_mode='valid',
                 tied_biases=True,
                 prior_mean=0,
                 prior_noise_level=0,
                 **kwargs):
        self.convolution = Convolutional()
        self.rectifier = Rectifier()
        self.mask = Convolutional(name='mask')
        children = [self.convolution, self.rectifier, self.mask]
        kwargs.setdefault('children', []).extend(children)
        super(NoisyConvolutional2, self).__init__(**kwargs)
        self.filter_size = filter_size
        self.num_filters = num_filters
        self.num_channels = num_channels
        self.noise_batch_size = noise_batch_size
        self.image_size = image_size
        self.step = step
        self.border_mode = border_mode
        self.tied_biases = tied_biases
        self.prior_mean = prior_mean
        self.prior_noise_level = prior_noise_level

    def _push_allocation_config(self):
        self.convolution.filter_size = self.filter_size
        self.convolution.num_filters = self.num_filters
        self.convolution.num_channels = self.num_channels
        # self.convolution.batch_size = self.batch_size
        self.convolution.image_size = self.image_size
        self.convolution.step = self.step
        self.convolution.border_mode = self.border_mode
        self.convolution.tied_biases = self.tied_biases
        self.mask.filter_size = (1, 1)
        self.mask.num_filters = self.num_filters
        self.mask.num_channels = self.num_filters
        # self.mask.batch_size = self.batch_size
        self.mask.image_size = self.convolution.get_dim('output')[1:]
        # self.mask.step = self.step
        # self.mask.border_mode = self.border_mode
        self.mask.tied_biases = self.tied_biases

    def _allocate(self):
        out_shape = self.convolution.get_dim('output')
        N = shared_floatx_zeros((self.noise_batch_size, ) + out_shape,
                                name='N')
        add_role(N, NOISE)
        self.parameters.append(N)

    @application(inputs=['input_'], outputs=['output'])
    def apply(self, input_, application_call):
        """Apply the linear transformation followed by masking with noise.
        Parameters
        ----------
        input_ : :class:`~tensor.TensorVariable`
            The input on which to apply the transformations
        Returns
        -------
        output : :class:`~tensor.TensorVariable`
            The transformed input
        """
        from theano.printing import Print

        pre_noise = self.rectifier.apply(self.convolution.apply(input_))
        # noise_level = self.mask.apply(input_)
        noise_level = (self.prior_noise_level -
                       tensor.clip(self.mask.apply(pre_noise), -16, 16))
        noise_level = copy_and_tag_noise(noise_level, self, LOG_SIGMA,
                                         'log_sigma')
        # Allow incomplete batches by just taking the noise that is needed
        noise = self.parameters[0][:noise_level.shape[0], :, :, :]
        # noise = self.theano_rng.normal(noise_level.shape)
        kl = (self.prior_noise_level - noise_level + 0.5 *
              (tensor.exp(2 * noise_level) +
               (pre_noise - self.prior_mean)**2) /
              tensor.exp(2 * self.prior_noise_level) - 0.5)
        application_call.add_auxiliary_variable(kl, roles=[NITS], name='nits')
        return pre_noise + tensor.exp(noise_level) * noise

    def get_dim(self, name):
        if name == 'input_':
            return self.convolution.get_dim(name)
        if name == 'output':
            return self.convolution.get_dim(name)
        if name == 'nits':
            return self.convolution.get_dim('output')
        return super(NoisyConvolutional2, self).get_dim(name)

    @property
    def num_output_channels(self):
        return self.num_filters
Пример #7
0
class ResidualConvolutional(Initializable):
    @lazy(allocation=['filter_size', 'num_filters', 'num_channels'])
    def __init__(self,
                 filter_size,
                 num_filters,
                 num_channels,
                 batch_size=None,
                 mid_noise=False,
                 out_noise=False,
                 tied_noise=False,
                 tied_sigma=False,
                 noise_rate=None,
                 noise_batch_size=None,
                 prior_noise_level=None,
                 image_size=(None, None),
                 step=(1, 1),
                 **kwargs):
        self.filter_size = filter_size
        self.num_filters = num_filters
        self.batch_size = batch_size
        self.num_channels = num_channels
        self.image_size = image_size
        self.mid_noise = mid_noise
        self.noise_batch_size = noise_batch_size
        self.noise_rate = noise_rate
        self.step = step
        self.border_mode = 'half'
        self.tied_biases = True
        depth = 2

        self.b0 = SpatialBatchNormalization(name='b0')
        self.r0 = Rectifier(name='r0')
        self.n0 = (SpatialNoise(name='n0',
                                noise_rate=self.noise_rate,
                                tied_noise=tied_noise,
                                tied_sigma=tied_sigma,
                                prior_noise_level=prior_noise_level)
                   if mid_noise else None)
        self.c0 = Convolutional(name='c0')
        self.b1 = SpatialBatchNormalization(name='b1')
        self.r1 = Rectifier(name='r1')
        self.n1 = (SpatialNoise(name='n1',
                                noise_rate=self.noise_rate,
                                tied_noise=tied_noise,
                                tied_sigma=tied_sigma,
                                prior_noise_level=prior_noise_level)
                   if out_noise else None)
        self.c1 = Convolutional(name='c1')
        kwargs.setdefault('children', []).extend([
            c for c in [
                self.c0, self.b0, self.r0, self.n0, self.c1, self.b1, self.r1,
                self.n1
            ] if c is not None
        ])
        super(ResidualConvolutional, self).__init__(**kwargs)

    def get_dim(self, name):
        if name == 'input_':
            return ((self.num_channels, ) + self.image_size)
        if name == 'output':
            return self.c1.get_dim(name)
        return super(ResidualConvolutionalUnit, self).get_dim(name)

    @property
    def num_output_channels(self):
        return self.num_filters

    def _push_allocation_config(self):
        self.b0.input_dim = self.get_dim('input_')
        self.b0.push_allocation_config()
        if self.r0:
            self.r0.push_allocation_config()
        if self.n0:
            self.n0.noise_batch_size = self.noise_batch_size
            self.n0.num_channels = self.num_channels
            self.n0.image_size = self.image_size
        self.c0.filter_size = self.filter_size
        self.c0.batch_size = self.batch_size
        self.c0.num_channels = self.num_channels
        self.c0.num_filters = self.num_filters
        self.c0.border_mode = self.border_mode
        self.c0.image_size = self.image_size
        self.c0.step = self.step
        self.c0.use_bias = False
        self.c0.push_allocation_config()
        c0_shape = self.c0.get_dim('output')
        self.b1.input_dim = c0_shape
        self.b1.push_allocation_config()
        self.r1.push_allocation_config()
        if self.n1:
            self.n1.noise_batch_size = self.noise_batch_size
            self.n1.num_channels = self.num_filters
            self.n1.image_size = c0_shape[1:]
        self.c1.filter_size = self.filter_size
        self.c1.batch_size = self.batch_size
        self.c1.num_channels = self.num_filters
        self.c1.num_filters = self.num_filters
        self.c1.border_mode = self.border_mode
        self.c1.image_size = c0_shape[1:]
        self.c1.step = (1, 1)
        self.c1.use_bias = False
        self.c1.push_allocation_config()

    @application(inputs=['input_'], outputs=['output'])
    def apply(self, input_):
        shortcut = input_
        # Batchnorm, then Relu, then Convolution
        first_conv = self.b0.apply(input_)
        first_conv = self.r0.apply(first_conv)
        if self.n0:
            first_conv = self.n0.apply(first_conv)
        first_conv = self.c0.apply(first_conv)
        # Batchnorm, then Relu, then Convolution (second time)
        second_conv = self.b1.apply(first_conv)
        second_conv = self.r1.apply(second_conv)
        if self.n1:
            second_conv = self.n1.apply(second_conv)
        residual = second_conv

        # Apply stride and zero-padding to match shortcut to output
        if self.step and self.step != (1, 1):
            shortcut = shortcut[:, :, ::self.step[0], ::self.step[1]]
        if self.num_filters > self.num_channels:
            padshape = (residual.shape[0],
                        self.num_filters - self.num_channels,
                        residual.shape[2], residual.shape[3])
            shortcut = tensor.concatenate(
                [shortcut,
                 tensor.zeros(padshape, dtype=residual.dtype)],
                axis=1)
        elif self.num_filters < self.num_channels:
            shortcut = shortcut[:, :self.num_channels, :, :]

        response = shortcut + residual
        return response
Пример #8
0
class NoisyConvolutional2(Initializable, Feedforward, Random):
    """Convolutional transformation sent through a learned noisy channel.

    Applies the noise after the Relu rather than before it.

    Parameters (same as Convolutional)
    """
    @lazy(allocation=[
        'filter_size', 'num_filters', 'num_channels', 'noise_batch_size'])
    def __init__(self, filter_size, num_filters, num_channels, noise_batch_size,
                 image_size=(None, None), step=(1, 1), border_mode='valid',
                 tied_biases=True,
                 prior_mean=0, prior_noise_level=0, **kwargs):
        self.convolution = Convolutional()
        self.rectifier = Rectifier()
        self.mask = Convolutional(name='mask')
        children = [self.convolution, self.rectifier, self.mask]
        kwargs.setdefault('children', []).extend(children)
        super(NoisyConvolutional2, self).__init__(**kwargs)
        self.filter_size = filter_size
        self.num_filters = num_filters
        self.num_channels = num_channels
        self.noise_batch_size = noise_batch_size
        self.image_size = image_size
        self.step = step
        self.border_mode = border_mode
        self.tied_biases = tied_biases
        self.prior_mean = prior_mean
        self.prior_noise_level = prior_noise_level

    def _push_allocation_config(self):
        self.convolution.filter_size = self.filter_size
        self.convolution.num_filters = self.num_filters
        self.convolution.num_channels = self.num_channels
        # self.convolution.batch_size = self.batch_size
        self.convolution.image_size = self.image_size
        self.convolution.step = self.step
        self.convolution.border_mode = self.border_mode
        self.convolution.tied_biases = self.tied_biases
        self.mask.filter_size = (1, 1)
        self.mask.num_filters = self.num_filters
        self.mask.num_channels = self.num_filters
        # self.mask.batch_size = self.batch_size
        self.mask.image_size = self.convolution.get_dim('output')[1:]
        # self.mask.step = self.step
        # self.mask.border_mode = self.border_mode
        self.mask.tied_biases = self.tied_biases

    def _allocate(self):
        out_shape = self.convolution.get_dim('output')
        N = shared_floatx_zeros((self.noise_batch_size,) + out_shape, name='N')
        add_role(N, NOISE)
        self.parameters.append(N)

    @application(inputs=['input_'], outputs=['output'])
    def apply(self, input_, application_call):
        """Apply the linear transformation followed by masking with noise.
        Parameters
        ----------
        input_ : :class:`~tensor.TensorVariable`
            The input on which to apply the transformations
        Returns
        -------
        output : :class:`~tensor.TensorVariable`
            The transformed input
        """
        from theano.printing import Print

        pre_noise = self.rectifier.apply(self.convolution.apply(input_))
        # noise_level = self.mask.apply(input_)
        noise_level = (self.prior_noise_level
                - tensor.clip(self.mask.apply(pre_noise), -16, 16))
        noise_level = copy_and_tag_noise(
                noise_level, self, LOG_SIGMA, 'log_sigma')
        # Allow incomplete batches by just taking the noise that is needed
        noise = self.parameters[0][:noise_level.shape[0], :, :, :]
        # noise = self.theano_rng.normal(noise_level.shape)
        kl = (
            self.prior_noise_level - noise_level 
            + 0.5 * (
                tensor.exp(2 * noise_level)
                + (pre_noise - self.prior_mean) ** 2
                ) / tensor.exp(2 * self.prior_noise_level)
            - 0.5
            )
        application_call.add_auxiliary_variable(kl, roles=[NITS], name='nits')
        return pre_noise + tensor.exp(noise_level) * noise

    def get_dim(self, name):
        if name == 'input_':
            return self.convolution.get_dim(name)
        if name == 'output':
            return self.convolution.get_dim(name)
        if name == 'nits':
            return self.convolution.get_dim('output')
        return super(NoisyConvolutional2, self).get_dim(name)

    @property
    def num_output_channels(self):
        return self.num_filters