Пример #1
0
    def __init__(self,
                 incoming,
                 W_h=init.GlorotUniform(),
                 b_h=init.Constant(0.),
                 W_t=init.GlorotUniform(),
                 b_t=init.Constant(0.),
                 nonlinearity=nonlinearities.rectify,
                 **kwargs):
        super(HighwayDenseLayer, self).__init__(incoming, **kwargs)
        self.nonlinearity = (nonlinearities.identity
                             if nonlinearity is None else nonlinearity)

        num_inputs = int(np.prod(self.input_shape[1:]))

        self.W_h = self.add_param(W_h, (num_inputs, num_inputs), name="W_h")
        if b_h is None:
            self.b_h = None
        else:
            self.b_h = self.add_param(b_h, (num_inputs, ),
                                      name="b_h",
                                      regularizable=False)

        self.W_t = self.add_param(W_t, (num_inputs, num_inputs), name="W_t")
        if b_t is None:
            self.b_t = None
        else:
            self.b_t = self.add_param(b_t, (num_inputs, ),
                                      name="b_t",
                                      regularizable=False)
Пример #2
0
    def __init__(self,
                 incoming,
                 num_labels,
                 mask_input=None,
                 W_h=init.GlorotUniform(),
                 W_c=init.GlorotUniform(),
                 b=init.Constant(0.),
                 **kwargs):
        # This layer inherits from a MergeLayer, because it can have two
        # inputs - the layer input, and the mask.
        # We will just provide the layer input as incomings, unless a mask input was provided.
        self.input_shape = incoming.output_shape
        incomings = [incoming]
        self.mask_incoming_index = -1
        if mask_input is not None:
            incomings.append(mask_input)
            self.mask_incoming_index = 1

        super(TreeAffineCRFLayer, self).__init__(incomings, **kwargs)
        self.num_labels = num_labels
        dim_inputs = self.input_shape[2]

        # add parameters
        self.W_h = self.add_param(W_h, (dim_inputs, self.num_labels),
                                  name='W_h')

        self.W_c = self.add_param(W_c, (dim_inputs, self.num_labels),
                                  name='W_c')

        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (self.num_labels, ),
                                    name='b',
                                    regularizable=False)
Пример #3
0
 def __init__(self, incoming, seq_len, original_features, num_units, filter_width, pooling='f', **kwargs):
     assert pooling in ['f', 'fo', 'ifo']
     self.pooling = pooling
     
     self.incoming = incoming
     self.seq_len = seq_len
     self.original_features = original_features
     self.num_units = num_units
     self.filter_width = filter_width
     self.internal_seq_len = seq_len + filter_width - 1
     super(QRNNLayer, self).__init__(incoming, **kwargs)
     
     self.Z_W = self.add_param(init.GlorotUniform(),
                               (self.num_units, 1, self.filter_width, self.original_features), name="Z_W")
     self.F_W = self.add_param(init.GlorotUniform(),
                               (self.num_units, 1, self.filter_width, self.original_features), name="F_W")
     
     self.hid_init = self.add_param(init.Constant(0.), (1, self.num_units), name="hid_init",
                                     trainable=False, regularizable=False)
     
     if self.pooling == 'fo' or self.pooling == 'ifo':
         self.O_W = self.add_param(init.GlorotUniform(),
                               (self.num_units, 1, self.filter_width, self.original_features), name="O_W")
     if self.pooling == 'ifo':
         self.I_W = self.add_param(init.GlorotUniform(),
                               (self.num_units, 1, self.filter_width, self.original_features), name="I_W")
def ptb_lstm(input_var, vocabulary_size, hidden_size, seq_len, num_layers,
             dropout, batch_size):
    l_input = L.InputLayer(shape=(batch_size, seq_len), input_var=input_var)
    l_embed = L.EmbeddingLayer(l_input,
                               vocabulary_size,
                               hidden_size,
                               W=init.Uniform(1.0))
    l_lstms = []
    for i in range(num_layers):
        l_lstm = L.LSTMLayer(l_embed if i == 0 else l_lstms[-1],
                             hidden_size,
                             ingate=L.Gate(W_in=init.GlorotUniform(),
                                           W_hid=init.Orthogonal()),
                             forgetgate=L.Gate(W_in=init.GlorotUniform(),
                                               W_hid=init.Orthogonal(),
                                               b=init.Constant(1.0)),
                             cell=L.Gate(
                                 W_in=init.GlorotUniform(),
                                 W_hid=init.Orthogonal(),
                                 W_cell=None,
                                 nonlinearity=lasagne.nonlinearities.tanh),
                             outgate=L.Gate(W_in=init.GlorotUniform(),
                                            W_hid=init.Orthogonal()))
        l_lstms.append(l_lstm)
    l_drop = L.DropoutLayer(l_lstms[-1], dropout)
    l_out = L.DenseLayer(l_drop, num_units=vocabulary_size, num_leading_axes=2)
    l_out = L.ReshapeLayer(
        l_out,
        (l_out.output_shape[0] * l_out.output_shape[1], l_out.output_shape[2]))
    l_out = L.NonlinearityLayer(l_out,
                                nonlinearity=lasagne.nonlinearities.softmax)
    return l_out
Пример #5
0
    def __init__(
            self,
            incomings,
            num_units,
            nonlinearity=LN.tanh,
            name=None,
            W_xh=LI.GlorotUniform(),
            W_hh=LI.GlorotUniform(),
            b=LI.Constant(0.),
            h0=LI.Constant(0.),
        # h0_trainable=False,
    ):

        super().__init__(incomings, name=name)

        input_shape = self.input_shapes[0][1:]
        input_dim = np.int(np.prod(input_shape))
        # NOTE: for now, not set up to train this...probably never need to,
        # but should make it noisy.
        self.h0 = self.add_param(h0, (num_units, ),
                                 name="h0",
                                 trainable=False,
                                 regularizable=False)

        self.W_xh = self.add_param(W_xh, (input_dim, num_units), name="W_xh")
        self.W_hh = self.add_param(W_hh, (num_units, num_units), name="W_hh")
        self.b = self.add_param(b, (num_units, ),
                                name="b",
                                regularizable=False)

        self.num_units = num_units
        self.nonlinearity = nonlinearity
Пример #6
0
    def __init__(self, incoming, num_units, hidden_nonlinearity,
                 name=None,
                 W_init=LI.GlorotUniform(), b_init=LI.Constant(0.), Wi_init=LI.GlorotUniform(),
                 hidden_init=LI.Constant(0.), hidden_init_trainable=True, **kwargs):

        if hidden_nonlinearity is None:
            hidden_nonlinearity = NL.identity

        super(RecurrentLayer, self).__init__(incoming, name=name)

        input_shape = self.input_shape[2:]

        input_dim = ext.flatten_shape_dim(input_shape)
        # self._name = name
        # initial hidden state
        self.h0 = self.add_param(hidden_init, (num_units,), name="h0", trainable=hidden_init_trainable,
                                 regularizable=False)
        # Weights from input to hidden
        self.W_xh = self.add_param(Wi_init, (input_dim, num_units), name="W_xh")
        self.b_h = self.add_param(b_init, (num_units,), name="b_h", regularizable=False)
        # Recurrent weights
        self.W_hh = self.add_param(W_init, (num_units, num_units), name="W_hh")

        self.num_units = num_units
        self.nonlinearity = hidden_nonlinearity
Пример #7
0
 def _buildConv(self):
     layer = layers.InputLayer(shape=(None, 3, 32, 32), input_var=self.X)
     layer = layers.DropoutLayer(layer, p=0.2)
     layer = maxoutConv(layer,
                        num_filters=32 * 5,
                        ds=5,
                        filter_size=(5, 5),
                        stride=(1, 1),
                        pad='same')
     layer = layers.DropoutLayer(layer, p=0.5)
     layer = maxoutConv(layer,
                        num_filters=32 * 5,
                        ds=5,
                        filter_size=(5, 5),
                        stride=(1, 1),
                        pad='same')
     layer = layers.flatten(layer, outdim=2)  # 不加入展开层也可以,DenseLayer自动展开
     layer = layers.DropoutLayer(layer, p=0.5)
     layer = layers.DenseLayer(layer,
                               num_units=256,
                               W=init.GlorotUniform(),
                               b=init.Constant(0.),
                               nonlinearity=nonlinearities.rectify)
     layer = layers.DropoutLayer(layer, p=0.5)
     layer = layers.DenseLayer(layer,
                               num_units=10,
                               W=init.GlorotUniform(),
                               b=init.Constant(0.),
                               nonlinearity=nonlinearities.softmax)
     return layer
def build_model(crop_value):
    L = [
        (layers.InputLayer, {
            'shape': (None, 3, 64 - 2 * crop_value, 64 - 2 * crop_value)
        }),
        (layers.Conv2DLayer, {
            'num_filters': 64,
            'filter_size': (3, 3),
            'pad': 0
        }),
        (layers.MaxPool2DLayer, {
            'pool_size': (2, 2)
        }),
        (layers.Conv2DLayer, {
            'num_filters': 128,
            'filter_size': (2, 2),
            'pad': 0
        }),
        (layers.MaxPool2DLayer, {
            'pool_size': (2, 2)
        }),
        (layers.Conv2DLayer, {
            'num_filters': 128,
            'filter_size': (2, 2),
            'pad': 0
        }),
        (layers.MaxPool2DLayer, {
            'pool_size': (4, 4)
        }),
        (layers.DenseLayer, {
            'num_units': 512,
            'nonlinearity': nonlinearities.leaky_rectify,
            'W': init.GlorotUniform(gain='relu')
        }),
        (layers.DropoutLayer, {
            'p': 0.5
        }),
        (layers.DenseLayer, {
            'num_units': 512,
            'nonlinearity': nonlinearities.leaky_rectify,
            'W': init.GlorotUniform(gain='relu')
        }),
        (layers.DenseLayer, {
            'num_units': 18,
            'nonlinearity': nonlinearities.softmax
        }),
    ]

    net = NeuralNet(
        layers=L,
        update=adagrad,
        update_learning_rate=0.01,
        use_label_encoder=True,
        verbose=1,
        max_epochs=100,
        batch_iterator_train=FlipBatchIterator(batch_size=128),
        on_epoch_finished=[EarlyStopping(patience=50, criterion='valid_loss')])
    return net
Пример #9
0
    def __init__(self,
                 output_dim,
                 hidden_sizes,
                 hidden_nonlinearity,
                 output_nonlinearity,
                 hidden_W_init=LI.GlorotUniform(),
                 hidden_b_init=LI.Constant(0.),
                 output_W_init=LI.GlorotUniform(),
                 output_b_init=LI.Constant(0.),
                 name=None,
                 input_var=None,
                 input_layer=None,
                 input_shape=None,
                 batch_norm=False):

        Serializable.quick_init(self, locals())

        if name is None:
            prefix = ""
        else:
            prefix = name + "_"

        if input_layer is None:
            l_in = L.InputLayer(shape=(None, ) + input_shape,
                                input_var=input_var)
        else:
            l_in = input_layer
        self._layers = [l_in]
        l_hid = l_in
        for idx, hidden_size in enumerate(hidden_sizes):
            l_hid = L.DenseLayer(
                l_hid,
                num_units=hidden_size,
                nonlinearity=hidden_nonlinearity,
                name="%shidden_%d" % (prefix, idx),
                W=hidden_W_init,
                b=hidden_b_init,
            )
            if batch_norm:
                l_hid = L.batch_norm(l_hid)
            self._layers.append(l_hid)

        l_out = L.DenseLayer(
            l_hid,
            num_units=output_dim,
            nonlinearity=output_nonlinearity,
            name="%soutput" % (prefix, ),
            W=output_W_init,
            b=output_b_init,
        )
        self._layers.append(l_out)
        self._l_in = l_in
        self._l_out = l_out
        # self._input_var = l_in.input_var
        self._output = L.get_output(l_out)
        LasagnePowered.__init__(self, [l_out])
Пример #10
0
    def __init__(self,
                 input_shape,
                 output_dim,
                 hidden_sizes,
                 hidden_nonlinearity,
                 output_nonlinearity,
                 hidden_W_init=LI.GlorotUniform(),
                 hidden_b_init=LI.Constant(0.),
                 output_W_init=LI.GlorotUniform(),
                 output_b_init=LI.Constant(0.),
                 name=None,
                 input_var=None,
                 input_layer=None):

        if name is None:
            prefix = ""
        else:
            prefix = name + "_"

        if input_layer is None:
            l_in = L.InputLayer(shape=(None, ) + input_shape,
                                input_var=input_var)
        else:
            l_in = input_layer
        self._layers = [l_in]
        l_hid = l_in
        for idx, hidden_size in enumerate(hidden_sizes):
            l_hid = L.DenseLayer(
                l_hid,
                num_units=hidden_size,
                nonlinearity=hidden_nonlinearity,
                name="%shidden_%d" % (prefix, idx),
                W=hidden_W_init,
                b=hidden_b_init,
            )
            self._layers.append(l_hid)
        l_out = L.DenseLayer(
            l_hid,
            num_units=output_dim,
            nonlinearity=output_nonlinearity,
            name="%soutput" % (prefix, ),
            W=output_W_init,
            b=output_b_init,
        )
        self._layers.append(l_out)
        self._l_in = l_in
        self._l_out = l_out
        self._input_var = l_in.input_var
        self._output = L.get_output(l_out)
Пример #11
0
 def _forward(self):
     net = {}
     net['input'] = layers.InputLayer(shape=(None, 1, 28, 28),
                                      input_var=self.X)
     net['dense'] = layers.DenseLayer(net['input'],
                                      num_units=2048,
                                      W=init.GlorotUniform(),
                                      b=init.Constant(0.),
                                      nonlinearity=nonlinearities.rectify)
     net['out'] = layers.DenseLayer(net['dense'],
                                    num_units=10,
                                    W=init.GlorotUniform(),
                                    b=None,
                                    nonlinearity=nonlinearities.softmax)
     return net
Пример #12
0
 def __init__(self,
              incoming,
              num_units,
              W=init.GlorotUniform(),
              b=init.Constant(0.),
              nonlinearity=nonlinearities.rectify,
              num_leading_axes=1,
              logit_posterior_mean=None,
              logit_posterior_std=None,
              interval=[-10.0, 0.1],
              shared_axes=(),
              noise_samples=None,
              **kwargs):
     super(DenseLogNormalDropoutLayer, self).__init__(incoming,
                                                      num_units,
                                                      W,
                                                      b,
                                                      nonlinearity,
                                                      num_leading_axes,
                                                      shared_axes=(),
                                                      noise_samples=None,
                                                      **kwargs)
     self.logit_posterior_mean = logit_posterior_mean
     self.logit_posterior_std = logit_posterior_std
     self.interval = interval
     self.init_params()
Пример #13
0
 def __init__(self,
              incoming,
              num_units,
              W=init.GlorotUniform(),
              b=init.Constant(0.),
              nonlinearity=nonlinearities.rectify,
              num_leading_axes=1,
              p=0.5,
              log_sigma2=None,
              shared_axes=(),
              noise_samples=None,
              **kwargs):
     super(DenseGaussianDropoutLayer, self).__init__(incoming,
                                                     num_units,
                                                     W,
                                                     b,
                                                     nonlinearity,
                                                     num_leading_axes,
                                                     p,
                                                     shared_axes=(),
                                                     noise_samples=None,
                                                     **kwargs)
     self.p = p
     self.log_sigma2 = log_sigma2
     self.init_params()
Пример #14
0
    def __init__(self,
                 incoming,
                 num_units_per_var,
                 nonlinearity=nonlinearities.softmax,
                 **kwargs):
        super(MultivariateDenseLayer, self).__init__(incoming, **kwargs)

        self.nonlinearity = (nonlinearities.identity
                             if nonlinearity is None else nonlinearity)

        self.num_units_per_var = num_units_per_var
        self.num_vars = len(num_units_per_var)

        num_inputs = int(np.prod(self.input_shape[1:]))

        # generate Wi and bi
        for i in range(self.num_vars):
            # W
            mem_str = "W%d" % (i)
            if mem_str not in kwargs:
                # default values
                kwargs[mem_str] = init.GlorotUniform()

                self.__dict__[mem_str] = \
                    self.add_param(kwargs[mem_str], (num_inputs, num_units_per_var[i]), name=mem_str)

            # b
            mem_str = "b%d" % (i)
            if mem_str not in kwargs:
                # default values
                kwargs[mem_str] = init.Constant(0.)

                self.__dict__[mem_str] = \
                    self.add_param(kwargs[mem_str], (num_units_per_var[i],), name=mem_str, regularizable=False)
Пример #15
0
    def __init__(self,
                 incoming,
                 num_labels,
                 mask_input=None,
                 W=init.GlorotUniform(),
                 b=init.Constant(0.),
                 **kwargs):
        # This layer inherits from a MergeLayer, because it can have two
        # inputs - the layer input, and the mask.
        # We will just provide the layer input as incomings, unless a mask
        # input was provided.

        self.input_shape = incoming.output_shape
        incomings = [incoming]
        self.mask_incoming_index = -1
        if mask_input is not None:
            incomings.append(mask_input)
            self.mask_incoming_index = 1

        super(CRFLayer, self).__init__(incomings, **kwargs)
        self.num_labels = num_labels + 1
        self.pad_label_index = num_labels

        num_inputs = self.input_shape[2]
        self.W = self.add_param(W,
                                (num_inputs, self.num_labels, self.num_labels),
                                name="W")

        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b,
                                    (self.num_labels, self.num_labels),
                                    name="b",
                                    regularizable=False)
    def __init__(self,
                 incoming,
                 num_slices,
                 num_features,
                 direction='col',
                 W=init.GlorotUniform(gain='relu'),
                 nonlinearity=nonlinearities.rectify,
                 **kwargs):
        super(DenseLayerTensorDot, self).__init__(incoming, **kwargs)
        self.nonlinearity = (nonlinearities.identity
                             if nonlinearity is None else nonlinearity)
        self.num_inputslices = self.input_shape[1]
        self.num_slices = num_slices
        self.num_inputfeatures = self.input_shape[3]
        self.num_features = num_features
        self.batch_size = self.input_shape[0]
        self.num_rows = self.input_shape[2]

        self.direction = direction
        if direction == 'col':
            self.W = self.add_param(
                W, (num_slices, num_features, self.num_inputslices,
                    self.num_inputfeatures),
                name="W4D_TensorDot_col")
            self.axes = [[1, 3], [2, 3]]
        elif direction == 'row':
            self.W = self.add_param(W, (num_slices, num_features,
                                        self.num_inputslices, self.num_rows),
                                    name="W4D_TensorDot_row")
            self.axes = [[1, 2], [2, 3]]
        else:
            raise ValueError("`direction` has to be either `row` or `col`.")
Пример #17
0
 def __init__(self,
              incoming,
              num_filters,
              filter_size,
              stride=(1, 1, 1),
              pad=0,
              untie_biases=False,
              W=init.GlorotUniform(),
              b=init.Constant(0.),
              nonlinearity=nonlinearities.rectify,
              flip_filters=True,
              convolution=T.nnet.conv3d,
              **kwargs):
     BaseConvLayer.__init__(self,
                            incoming,
                            num_filters,
                            filter_size,
                            stride,
                            pad,
                            untie_biases,
                            W,
                            b,
                            nonlinearity,
                            flip_filters,
                            n=3,
                            **kwargs)
     self.convolution = convolution
Пример #18
0
    def __init__(self,
                 incoming,
                 num_units,
                 W=init.GlorotUniform(),
                 b=init.Constant(0.),
                 nonlinearity=nonlinearities.rectify,
                 **kwargs):
        """A convenience DenseLayer that cooperates with recurrent layers.

        Recurrent layers work on 3-dimensional data (batch size x time
        x number of units). By default, Lasagne DenseLayer flattens
        data to 2 dimensions. We could reshape the data or we could
        just use this RNNDenseLayer, which is more convenient.

        For documentation, refer to Lasagne's DenseLayer documenation.
        """
        super(RNNDenseLayer, self).__init__(incoming, **kwargs)
        self.nonlinearity = (nonlinearities.identity
                             if nonlinearity is None else nonlinearity)

        self.num_units = num_units

        num_inputs = self.input_shape[2]

        self.W = self.add_param(W, (num_inputs, num_units), name="W")
        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (num_units, ),
                                    name="b",
                                    regularizable=False)
    def __init__(self,
                 incoming,
                 p_order,
                 W=init.GlorotUniform(),
                 num_leading_axes=1,
                 **kwargs):

        super(PolynomialLayer, self).__init__(incoming, **kwargs)

        if num_leading_axes >= len(self.input_shape):
            raise ValueError(
                "Got num_leading_axes=%d for a %d-dimensional input, "
                "leaving no trailing axes for the dot product." %
                (num_leading_axes, len(self.input_shape)))
        elif num_leading_axes < -len(self.input_shape):
            raise ValueError(
                "Got num_leading_axes=%d for a %d-dimensional input, "
                "requesting more trailing axes than there are input "
                "dimensions." % (num_leading_axes, len(self.input_shape)))
        self.num_leading_axes = num_leading_axes

        if any(s is None for s in self.input_shape[num_leading_axes:]):
            raise ValueError(
                "A PolynomialLayer requires a fixed input shape (except for "
                "the leading axes). Got %r for num_leading_axes=%d." %
                (self.input_shape, self.num_leading_axes))
        num_inputs = int(np.prod(self.input_shape[num_leading_axes:]))

        self.p_order = p_order
        self.W = self.add_param(W, (p_order + 1, num_inputs), name="W")
Пример #20
0
    def __init__(self,
                 incoming,
                 n_slots,
                 d_slots,
                 C=init.GlorotUniform(),
                 M=init.Normal(),
                 b=init.Constant(0.),
                 nonlinearity_final=nonlinearities.identity,
                 **kwargs):
        super(MemoryLayer, self).__init__(incoming, **kwargs)

        self.nonlinearity_final = nonlinearity_final
        self.n_slots = n_slots
        self.d_slots = d_slots

        num_inputs = int(np.prod(self.input_shape[1:]))

        self.C = self.add_param(C, (num_inputs, n_slots),
                                name="C")  # controller
        self.M = self.add_param(M, (n_slots, d_slots),
                                name="M")  # memory slots
        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (n_slots, ),
                                    name="b",
                                    regularizable=False)
Пример #21
0
    def __init__(self,
                 incoming,
                 num_units,
                 untie_biases=False,
                 W=init.GlorotUniform(),
                 b=init.Constant(0.),
                 nonlinearity=nonlinearities.rectify,
                 **kwargs):
        super(NINLayer, self).__init__(incoming, **kwargs)
        self.nonlinearity = (nonlinearities.identity
                             if nonlinearity is None else nonlinearity)

        self.num_units = num_units
        self.untie_biases = untie_biases

        num_input_channels = self.input_shape[1]

        self.W = self.add_param(W, (num_input_channels, num_units), name="W")
        if b is None:
            self.b = None
        else:
            if self.untie_biases:
                biases_shape = (num_units, ) + self.output_shape[2:]
            else:
                biases_shape = (num_units, )
            self.b = self.add_param(b,
                                    biases_shape,
                                    name="b",
                                    regularizable=False)
Пример #22
0
 def __init__(self,
              incoming,
              num_filters,
              filter_size,
              stride=(1, 1),
              crop=0,
              untie_biases=False,
              W=init.GlorotUniform(),
              b=init.Constant(0.),
              nonlinearity=nonlinearities.rectify,
              flip_filters=False,
              **kwargs):
     super(TransposedConv2DLayer, self).__init__(incoming,
                                                 num_filters,
                                                 filter_size,
                                                 stride,
                                                 crop,
                                                 untie_biases,
                                                 W,
                                                 b,
                                                 nonlinearity,
                                                 flip_filters,
                                                 n=2,
                                                 **kwargs)
     # rename self.pad to self.crop:
     self.crop = self.pad
     del self.pad
Пример #23
0
 def __init__(self,
              incoming,
              num_filters,
              filter_size,
              stride=(1, 1, 1),
              crop=0,
              untie_biases=False,
              W=init.GlorotUniform(),
              b=init.Constant(0.),
              nonlinearity=nonlinearities.rectify,
              flip_filters=False,
              convolution=T.nnet.ConvTransp3D,
              output_size=None,
              **kwargs):
     super(Conv3DLayerTransposed, self).__init__(incoming,
                                                 num_filters,
                                                 filter_size,
                                                 stride,
                                                 crop,
                                                 untie_biases,
                                                 W,
                                                 b,
                                                 nonlinearity,
                                                 flip_filters,
                                                 n=3,
                                                 **kwargs)
     self.crop = self.pad
     del self.pad
     self.convolution = convolution
     self.output_size = output_size
Пример #24
0
    def __init__(self,
                 incoming,
                 num_filters,
                 filter_size,
                 dilation=1,
                 untie_biases=False,
                 W=init.GlorotUniform(),
                 b=init.Constant(0.),
                 nonlinearity=nonlinearities.rectify,
                 flip_filters=False,
                 convolution=conv.conv1d_mc0,
                 **kwargs):

        self.dilation = dilation

        pre_pad = (filter_size - 1) * dilation

        filter_size += (filter_size - 1) * (dilation - 1)

        l_pad = PadLayer(incoming, batch_ndim=2, width=[(pre_pad, 0)])

        super(DilatedConv1DLayer, self).__init__(incoming=l_pad,
                                                 num_filters=num_filters,
                                                 filter_size=filter_size,
                                                 stride=1,
                                                 pad=0,
                                                 untie_biases=untie_biases,
                                                 W=W,
                                                 b=b,
                                                 nonlinearity=nonlinearity,
                                                 flip_filters=flip_filters,
                                                 convolution=convolution,
                                                 **kwargs)
Пример #25
0
    def __init__(self,
                 incoming,
                 num_units,
                 W=init.GlorotUniform(),
                 b=init.Constant(0.),
                 nonlinearity=nonlinearities.rectify,
                 num_leading_axes=1,
                 p=0.5,
                 logit_p=None,
                 temp=0.1,
                 shared_axes=(),
                 noise_samples=None,
                 **kwargs):
        super(DenseConcreteDropoutLayer, self).__init__(incoming,
                                                        num_units,
                                                        W,
                                                        b,
                                                        nonlinearity,
                                                        num_leading_axes,
                                                        p,
                                                        shared_axes=(),
                                                        noise_samples=None,
                                                        **kwargs)

        self.temp = temp
        self.logit_p = logit_p
        self.init_params()
Пример #26
0
    def __init__(self, incoming, num_units, hidden_nonlinearity,
                 gate_nonlinearity=LN.sigmoid, name=None,
                 W_init=LI.GlorotUniform(), b_init=LI.Constant(0.),
                 hidden_init=LI.Constant(0.), hidden_init_trainable=True):

        if hidden_nonlinearity is None:
            hidden_nonlinearity = LN.identity

        if gate_nonlinearity is None:
            gate_nonlinearity = LN.identity

        super(GRULayer, self).__init__(incoming, name=name)

        input_shape = self.input_shape[2:]

        input_dim = ext.flatten_shape_dim(input_shape)
        # self._name = name
        # Weights for the initial hidden state
        self.h0 = self.add_param(hidden_init, (num_units,), name="h0", trainable=hidden_init_trainable,
                                 regularizable=False)
        # Weights for the reset gate
        self.W_xr = self.add_param(W_init, (input_dim, num_units), name="W_xr")
        self.W_hr = self.add_param(W_init, (num_units, num_units), name="W_hr")
        self.b_r = self.add_param(b_init, (num_units,), name="b_r", regularizable=False)
        # Weights for the update gate
        self.W_xu = self.add_param(W_init, (input_dim, num_units), name="W_xu")
        self.W_hu = self.add_param(W_init, (num_units, num_units), name="W_hu")
        self.b_u = self.add_param(b_init, (num_units,), name="b_u", regularizable=False)
        # Weights for the cell gate
        self.W_xc = self.add_param(W_init, (input_dim, num_units), name="W_xc")
        self.W_hc = self.add_param(W_init, (num_units, num_units), name="W_hc")
        self.b_c = self.add_param(b_init, (num_units,), name="b_c", regularizable=False)
        self.gate_nonlinearity = gate_nonlinearity
        self.num_units = num_units
        self.nonlinearity = hidden_nonlinearity
Пример #27
0
    def __init__(self,
                 incoming,
                 num_labels,
                 mask_input=None,
                 W=init.GlorotUniform(),
                 b=init.Constant(0.),
                 **kwargs):
        self.input_shape = incoming.output_shape
        incomings = [incoming]
        self.mask_incoming_index = -1
        if mask_input is not None:
            incomings.append(mask_input)
            self.mask_incoming_index = 1

        super(CRFLayer, self).__init__(incomings, **kwargs)
        self.num_labels = num_labels + 1
        self.pad_label_index = num_labels

        num_inputs = self.input_shape[2]
        self.W = self.add_param(W,
                                (num_inputs, self.num_labels, self.num_labels),
                                name="W")

        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (self.num_labels, self.num_labels),
                                    name="b",
                                    regularizable=False)
Пример #28
0
 def __init__(self,
              incoming,
              num_filters,
              filter_size,
              stride=(1, 1),
              crop=0,
              untie_biases=False,
              W=init.GlorotUniform(),
              b=init.Constant(0.),
              nonlinearity=nonlinearities.rectify,
              flip_filters=False,
              output_size=None,
              **kwargs):
     # output_size must be set before calling the super constructor
     if (not isinstance(output_size, T.Variable)
             and output_size is not None):
         output_size = as_tuple(output_size, 2, int)
     self.output_size = output_size
     super(TransposedConv2DLayer, self).__init__(incoming,
                                                 num_filters,
                                                 filter_size,
                                                 stride,
                                                 crop,
                                                 untie_biases,
                                                 W,
                                                 b,
                                                 nonlinearity,
                                                 flip_filters,
                                                 n=2,
                                                 **kwargs)
     # rename self.pad to self.crop:
     self.crop = self.pad
     del self.pad
Пример #29
0
    def __init__(self,
                 Period=init.Uniform((10, 100)),
                 Shift=init.Uniform((0., 1000.)),
                 On_End=init.Constant(0.05),
                 Event_W=init.GlorotUniform(),
                 Event_b=init.Constant(0.),
                 out_W=init.GlorotUniform(),
                 out_b=init.Constant(0.)):

        self.Period = Period
        self.Shift = Shift
        self.On_End = On_End
        self.Event_W = Event_W
        self.Event_b = Event_b
        self.out_W = out_W
        self.out_b = out_b
Пример #30
0
    def __init__(self,
                 incoming_vertex,
                 incoming_edge,
                 num_filters,
                 filter_size,
                 W=init.GlorotUniform(),
                 b=init.Constant(0.),
                 nonlinearity=nonlinearities.rectify,
                 **kwargs):
        self.vertex_shape = incoming_vertex.output_shape
        self.edge_shape = incoming_edge.output_shape

        self.input_shape = incoming_vertex.output_shape
        incomings = [incoming_vertex, incoming_edge]
        self.vertex_incoming_index = 0
        self.edge_incoming_index = 1
        super(GraphConvLayer, self).__init__(incomings, **kwargs)
        if nonlinearity is None:
            self.nonlinearity = nonlinearities.identity
        else:
            self.nonlinearity = nonlinearity

        self.num_filters = num_filters
        self.filter_size = filter_size

        self.W = self.add_param(W, self.get_W_shape(), name="W")
        if b is None:
            self.b = None
        else:
            self.b = self.add_param(b, (num_filters, ),
                                    name="b",
                                    regularizable=False)