Пример #1
0
    def __init__(self,
                 in_size,
                 out_size,
                 lateral_init=None,
                 upward_init=None,
                 bias_init=0,
                 forget_bias_init=0):
        super(LNStatelessLSTM,
              self).__init__(upward=linear.Linear(in_size,
                                                  4 * out_size,
                                                  initialW=0),
                             lateral=linear.Linear(out_size,
                                                   4 * out_size,
                                                   initialW=0,
                                                   nobias=True),
                             upward_ln=LayerNormalization(),
                             lateral_ln=LayerNormalization(),
                             output_ln=LayerNormalization())
        self.state_size = out_size
        self.lateral_init = lateral_init
        self.upward_init = upward_init
        self.bias_init = bias_init
        self.forget_bias_init = forget_bias_init

        if in_size is not None:
            self._initialize_params()
Пример #2
0
 def __init__(self, in_size, out_size):
     super(SGU, self).__init__(
         W_xh=linear.Linear(in_size, out_size),
         W_zxh=linear.Linear(out_size, out_size),
         W_xz=linear.Linear(in_size, out_size),
         W_hz=linear.Linear(out_size, out_size),
     )
Пример #3
0
 def __init__(self, use_cudnn=True, conv4_out_channels=384):
     super(AlexOWT, self).__init__(
         conv1=convolution_2d.Convolution2D(3,
                                            64,
                                            11,
                                            stride=4,
                                            pad=2,
                                            use_cudnn=use_cudnn),
         conv2=convolution_2d.Convolution2D(64,
                                            192,
                                            5,
                                            pad=2,
                                            use_cudnn=use_cudnn),
         conv3=convolution_2d.Convolution2D(192,
                                            384,
                                            3,
                                            pad=1,
                                            use_cudnn=use_cudnn),
         conv4=convolution_2d.Convolution2D(384,
                                            conv4_out_channels,
                                            3,
                                            pad=1,
                                            use_cudnn=use_cudnn),
         conv5=convolution_2d.Convolution2D(conv4_out_channels,
                                            256,
                                            3,
                                            pad=1,
                                            use_cudnn=use_cudnn),
         fc6=linear.Linear(9216, 4096),
         fc7=linear.Linear(4096, 4096),
         fc8=linear.Linear(4096, 1000))
Пример #4
0
    def __init__(self,
                 in_size,
                 out_size=None,
                 lateral_init=None,
                 upward_init=None,
                 bias_init=None,
                 forget_bias_init=None):
        if out_size is None:
            out_size, in_size = in_size, None

        super(LSTMBase, self).__init__()
        if bias_init is None:
            bias_init = 0
        if forget_bias_init is None:
            forget_bias_init = 1
        self.state_size = out_size
        self.lateral_init = lateral_init
        self.upward_init = upward_init
        self.bias_init = bias_init
        self.forget_bias_init = forget_bias_init

        with self.init_scope():
            self.upward = linear.Linear(in_size, 4 * out_size, initialW=0)
            self.lateral = linear.Linear(out_size,
                                         4 * out_size,
                                         initialW=0,
                                         nobias=True)
            if in_size is not None:
                self._initialize_params()
Пример #5
0
    def __init__(self,
                 n_units,
                 n_inputs=None,
                 n_units2=None,
                 initU=None,
                 initW=None,
                 initV=None,
                 bias_init=0):
        """
        :param n_units: Number of hidden units
        :param n_inputs: Number of input units
        :param initU: Input-to-hidden weight matrix initialization
        :param initW: Hidden-to-hidden weight matrix initialization
        :param bias_init: Bias initialization
        """

        if n_inputs is None:
            n_inputs = n_units

        # H0 takes care of the initial hidden-to-hidden input for t=0
        super(ElmanBaseFB, self).__init__(
            U=linear.Linear(n_inputs,
                            n_units,
                            initialW=initU,
                            initial_bias=bias_init),
            W=linear.Linear(n_units, n_units, initialW=initW, nobias=True),
            V=linear.Linear(n_units2, n_units, initialW=initV, nobias=True),
            H0=Offset(n_units),
        )
Пример #6
0
 def __init__(self, in_size, out_size):
     super(LSTM, self).__init__(
         upward=linear.Linear(in_size, 4 * out_size),
         lateral=linear.Linear(out_size, 4 * out_size, nobias=True),
     )
     self.state_size = out_size
     self.reset_state()
Пример #7
0
    def __init__(self,
                 in_size,
                 out_size,
                 lateral_init=None,
                 upward_init=None,
                 bias_init=0,
                 forget_bias_init=0):
        super(LSTMBase, self).__init__(
            upward=linear.Linear(in_size, 4 * out_size, initialW=0),
            lateral=linear.Linear(out_size,
                                  4 * out_size,
                                  initialW=0,
                                  nobias=True),
        )
        self.state_size = out_size

        for i in six.moves.range(0, 4 * out_size, out_size):
            initializers.init_weight(self.lateral.W.data[i:i + out_size, :],
                                     lateral_init)
            initializers.init_weight(self.upward.W.data[i:i + out_size, :],
                                     upward_init)

        a, i, f, o = lstm._extract_gates(
            self.upward.b.data.reshape(1, 4 * out_size, 1))
        initializers.init_weight(a, bias_init)
        initializers.init_weight(i, bias_init)
        initializers.init_weight(f, forget_bias_init)
        initializers.init_weight(o, bias_init)
Пример #8
0
 def __init__(self, n_units, n_inputs=None):
     if n_inputs is None:
         n_inputs = n_units
     super(GRUBase2, self).__init__(
         W_r_z=linear.Linear(n_inputs + n_units, n_units * 2),
         W_h=linear.Linear(n_inputs + n_units, n_units),
     )
     self.n_units = n_units
Пример #9
0
 def __init__(self, in_size, out_size):
     super(ChildSumTreeLSTM, self).__init__(
         W_x=linear.Linear(in_size, 4 * out_size),
         W_h_aio=linear.Linear(out_size, 3 * out_size, nobias=True),
         W_h_f=linear.Linear(out_size, out_size, nobias=True),
     )
     self.in_size = in_size
     self.state_size = out_size
     utils.experimental('chainer.links.tree_lstm.py')
Пример #10
0
    def __init__(self, in_size, out_size):
        super(ChildSumTreeLSTM, self).__init__()
        with self.init_scope():
            self.W_x = linear.Linear(in_size, 4 * out_size)
            self.W_h_aio = linear.Linear(out_size, 3 * out_size, nobias=True)
            self.W_h_f = linear.Linear(out_size, out_size, nobias=True)

        self.in_size = in_size
        self.state_size = out_size
Пример #11
0
 def __init__(self, in_size, out_size, c_ratio=0.5, h_ratio=0.5):
     super(StatefulZoneoutLSTM, self).__init__(
         upward=linear.Linear(in_size, 4 * out_size),
         lateral=linear.Linear(out_size, 4 * out_size, nobias=True),
     )
     self.state_size = out_size
     self.c_ratio = c_ratio
     self.h_ratio = h_ratio
     self.reset_state()
Пример #12
0
 def __init__(self, in_size, out_size):
     super(StatefulPeepholeLSTM, self).__init__(
         upward=linear.Linear(in_size, 4 * out_size),
         lateral=linear.Linear(out_size, 4 * out_size, nobias=True),
         peep_i=linear.Linear(out_size, out_size, nobias=True),
         peep_f=linear.Linear(out_size, out_size, nobias=True),
         peep_o=linear.Linear(out_size, out_size, nobias=True),
     )
     self.state_size = out_size
     self.reset_state()
Пример #13
0
    def __init__(self, in_size, out_size, batch_norm_type='upward'):
        super(StatefulLinearRNN, self).__init__(upward=L.Linear(in_size, out_size),
                                                lateral=L.Linear(out_size, out_size))
        if batch_norm_type not in ('none', 'upward', 'lateral', 'output'):
            raise ValueError('Invalid batch_norm_type:{}'.format(batch_norm_type))
        self.batch_norm_type = batch_norm_type

        if batch_norm_type != 'none':
            batch_norm = B.BatchNormalization(out_size)
            self.add_link('batch_norm', batch_norm)

        self.reset_state()
Пример #14
0
    def __init__(self, in_out_size, nobias=False, activate=relu.relu,
                 init_Wh=None, init_Wt=None, init_bh=None, init_bt=-1):
        super(Highway, self).__init__()
        self.activate = activate

        with self.init_scope():
            self.plain = linear.Linear(
                in_out_size, in_out_size, nobias=nobias,
                initialW=init_Wh, initial_bias=init_bh)
            self.transform = linear.Linear(
                in_out_size, in_out_size, nobias=nobias,
                initialW=init_Wt, initial_bias=init_bt)
Пример #15
0
 def __init__(self, in_size, out_size, n_ary=2):
     assert (n_ary >= 2)
     super(NaryTreeLSTM,
           self).__init__(W_x=linear.Linear(in_size,
                                            (3 + n_ary) * out_size), )
     for i in range(1, n_ary + 1):
         self.add_link(
             'W_h{}'.format(i),
             linear.Linear(out_size, (3 + n_ary) * out_size, nobias=True))
     self.in_size = in_size
     self.state_size = out_size
     self.n_ary = n_ary
     utils.experimental('chainer.links.tree_lstm.py')
Пример #16
0
    def __init__(self, vocab_size=10, rnn_unit='LSTM'):
        embed = embed_id.EmbedID(vocab_size, 10)
        if rnn_unit == 'LSTM':
            rnns = link.ChainList(lstm.LSTM(10, 20), lstm.LSTM(20, 20))
        elif rnn_unit == 'GRU':
            rnns = link.ChainList(gru.StatefulGRU(20, 10),
                                  gru.StatefulGRU(20, 20))
        else:
            raise ValueError('Invalid RNN unit:{}'.format(rnn_unit))

        linears = link.ChainList(linear.Linear(20, 10),
                                 linear.Linear(10, vocab_size))
        super(BigLSTM, self).__init__(embed=embed, rnns=rnns, linears=linears)
Пример #17
0
    def __init__(self, in_size, out_size, n_ary=2):
        assert (n_ary >= 1)
        super(NaryTreeLSTM, self).__init__()
        with self.init_scope():
            self.W_x = linear.Linear(in_size, (3 + n_ary) * out_size)

            for i in range(1, n_ary + 1):
                l = linear.Linear(out_size, (3 + n_ary) * out_size,
                                  nobias=True)
                setattr(self, 'W_h{}'.format(i), l)
        self.in_size = in_size
        self.state_size = out_size
        self.n_ary = n_ary
Пример #18
0
    def __init__(self,
                 children,
                 in_size,
                 out_size,
                 lateral_init=None,
                 upward_init=None,
                 bias_init=0,
                 forget_bias_init=0):
        super(SLSTM, self).__init__(
            upward=linear.Linear(in_size, 4 * out_size, initialW=0))
        self.state_size = out_size
        self.n_children = children

        for i in range(0, 4 * out_size, out_size):
            initializers.init_weight(self.upward.W.data[i:i + out_size, :],
                                     upward_init)
        a, i, f, o = numpy_extract_gates(
            self.upward.b.data.reshape(1, 4 * out_size, 1))
        initializers.init_weight(a, bias_init)
        initializers.init_weight(i, bias_init)
        initializers.init_weight(f, forget_bias_init)
        initializers.init_weight(o, bias_init)

        #hidden unit gates for each child
        for i in range(self.n_children):
            self.add_link(
                self.U_I_H.format(i),
                linear.Linear(out_size,
                              out_size,
                              initialW=lateral_init,
                              nobias=True))
            self.add_link(
                self.U_O_H.format(i),
                linear.Linear(out_size,
                              out_size,
                              initialW=lateral_init,
                              nobias=True))
            self.add_link(
                self.U_A_H.format(i),
                linear.Linear(out_size,
                              out_size,
                              initialW=lateral_init,
                              nobias=True))

            for j in range(self.n_children):
                self.add_link(
                    self.U_F_H.format(i, j),
                    linear.Linear(out_size,
                                  out_size,
                                  initialW=lateral_init,
                                  nobias=True))
Пример #19
0
 def __init__(self, n_units, n_inputs=None):
     if n_inputs is None:
         n_inputs = n_units
     super(GRUBase, self).__init__(
         W_r_z_h=linear.Linear(n_inputs, n_units * 3),
         U_r_z=linear.Linear(n_units, n_units * 2),
         #             W_r=linear.Linear(n_inputs, n_units),
         #             U_r = linear.Linear(n_units, n_units),
         #             W_z=linear.Linear(n_inputs, n_units),
         #             U_z=linear.Linear(n_units, n_units),
         #             W=linear.Linear(n_inputs, n_units),
         U=linear.Linear(n_units, n_units),
     )
     self.n_units = n_units
Пример #20
0
 def __init__(self, use_cudnn=True):
     super(VGG_A, self).__init__(
         conv1=convolution_2d.Convolution2D(3, 64, 3, pad=1, use_cudnn=use_cudnn),
         conv2=convolution_2d.Convolution2D(64, 128, 3, pad=1, use_cudnn=use_cudnn),
         conv3_1=convolution_2d.Convolution2D(128, 256, 3, pad=1, use_cudnn=use_cudnn),
         conv3_2=convolution_2d.Convolution2D(256, 256, 3, pad=1, use_cudnn=use_cudnn),
         conv4_1=convolution_2d.Convolution2D(256, 512, 3, pad=1, use_cudnn=use_cudnn),
         conv4_2=convolution_2d.Convolution2D(512, 512, 3, pad=1, use_cudnn=use_cudnn),
         conv5_1=convolution_2d.Convolution2D(512, 512, 3, pad=1, use_cudnn=use_cudnn),
         conv5_2=convolution_2d.Convolution2D(512, 512, 3, pad=1, use_cudnn=use_cudnn),
         fc6=linear.Linear(512 * 7 * 7, 4096),
         fc7=linear.Linear(4096, 4096),
         fc8=linear.Linear(4096, 1000)
     )
     self.use_cudnn = use_cudnn
Пример #21
0
    def __init__(self, in_size, out_size,
                 lateral_init=None, upward_init=None,
                 bias_init=0, forget_bias_init=0):
        super(LSTMBase, self).__init__(
            upward=linear.Linear(in_size, 4 * out_size, initialW=0),
            lateral=linear.Linear(out_size, 4 * out_size,
                                  initialW=0, nobias=True),
        )
        self.state_size = out_size
        self.lateral_init = lateral_init
        self.upward_init = upward_init
        self.bias_init = bias_init
        self.forget_bias_init = forget_bias_init

        if in_size is not None:
            self._initialize_params()
Пример #22
0
    def __init__(self, in_size, out_size, pool_size,
                 initialW=None, initial_bias=0):
        super(Maxout, self).__init__()

        linear_out_size = out_size * pool_size
        if initialW is not None:
            initialW = initialW.reshape(linear_out_size, in_size)

        if initial_bias is not None:
            if numpy.isscalar(initial_bias):
                initial_bias = numpy.full(
                    (linear_out_size,), initial_bias, dtype=numpy.float32)
            elif isinstance(initial_bias, (numpy.ndarray, cuda.ndarray)):
                initial_bias = initial_bias.reshape(linear_out_size)
            else:
                raise ValueError(
                    'initial bias must be float, ndarray, or None')

        with self.init_scope():
            self.linear = linear.Linear(
                in_size, linear_out_size,
                nobias=initial_bias is None, initialW=initialW,
                initial_bias=initial_bias)

        self.out_size = out_size
        self.pool_size = pool_size
Пример #23
0
    def __init__(self, in_size, out_size, c_ratio=0.5, h_ratio=0.5, **kwargs):
        argument.check_unexpected_kwargs(
            kwargs,
            train='train argument is not supported anymore. '
            'Use chainer.using_config')
        argument.assert_kwargs_empty(kwargs)

        super(StatefulZoneoutLSTM, self).__init__()
        self.state_size = out_size
        self.c_ratio = c_ratio
        self.h_ratio = h_ratio
        self.reset_state()

        with self.init_scope():
            self.upward = linear.Linear(in_size, 4 * out_size)
            self.lateral = linear.Linear(out_size, 4 * out_size, nobias=True)
Пример #24
0
 def __init__(self, use_cudnn=True):
     super(C3D, self).__init__(conv1a=Convolution3D(3,
                                                    64,
                                                    3,
                                                    pad=1,
                                                    use_cudnn=use_cudnn),
                               conv2a=Convolution3D(64,
                                                    128,
                                                    3,
                                                    pad=1,
                                                    use_cudnn=use_cudnn),
                               conv3a=Convolution3D(128,
                                                    256,
                                                    3,
                                                    pad=1,
                                                    use_cudnn=use_cudnn),
                               conv3b=Convolution3D(256,
                                                    256,
                                                    3,
                                                    pad=1,
                                                    use_cudnn=use_cudnn),
                               conv4a=Convolution3D(256,
                                                    512,
                                                    3,
                                                    pad=1,
                                                    use_cudnn=use_cudnn),
                               conv4b=Convolution3D(512,
                                                    512,
                                                    3,
                                                    pad=1,
                                                    use_cudnn=use_cudnn),
                               conv5a=Convolution3D(512,
                                                    512,
                                                    3,
                                                    pad=1,
                                                    use_cudnn=use_cudnn),
                               conv5b=Convolution3D(512,
                                                    512,
                                                    3,
                                                    pad=1,
                                                    use_cudnn=use_cudnn),
                               fc6=linear.Linear(4608, 4096),
                               fc7=linear.Linear(4096, 4096),
                               fc8=linear.Linear(4096, 487))
     self.use_cudnn = use_cudnn
Пример #25
0
 def __init__(self, n_units, n_inputs=None):
     if n_inputs is None:
         n_inputs = n_units
     super(PeepHoleLSTMBase, self).__init__(
         W_fh=linear.Linear(n_inputs, n_units),
         W_fc=linear.Linear(n_inputs, n_units),
         W_oh=linear.Linear(n_inputs, n_units),
         W_oc=linear.Linear(n_inputs, n_units),
         W_ch=linear.Linear(n_inputs, n_units),
         W_fx=linear.Linear(n_inputs, n_units),
         W_ox=linear.Linear(n_inputs, n_units),
         W_cx=linear.Linear(n_inputs, n_units),
     )
Пример #26
0
 def __init__(self, n_units, n_inputs=None, init=None, bias_init=None):
     if n_inputs is None:
         n_inputs = n_units
     super(GRUBase, self).__init__(
         W_r_z_h=linear.Linear(n_inputs,
                               n_units * 3,
                               initialW=init,
                               initial_bias=bias_init),
         U_r_z=linear.Linear(n_units,
                             n_units * 2,
                             initialW=init,
                             initial_bias=bias_init),
         #             W_r=linear.Linear(n_inputs, n_units),
         #             U_r = linear.Linear(n_units, n_units),
         #             W_z=linear.Linear(n_inputs, n_units),
         #             U_z=linear.Linear(n_units, n_units),
         #             W=linear.Linear(n_inputs, n_units),
         U=linear.Linear(n_units, n_units),
     )
     self.n_units = n_units
Пример #27
0
 def __init__(self,
              n_units,
              n_inputs=None,
              init=None,
              inner_init=None,
              bias_init=0):
     if n_inputs is None:
         n_inputs = n_units
     super(GRUBase, self).__init__(
         W_r=linear.Linear(n_inputs,
                           n_units,
                           initialW=init,
                           initial_bias=bias_init),
         U_r=linear.Linear(n_units,
                           n_units,
                           initialW=inner_init,
                           initial_bias=bias_init),
         W_z=linear.Linear(n_inputs,
                           n_units,
                           initialW=init,
                           initial_bias=bias_init),
         U_z=linear.Linear(n_units,
                           n_units,
                           initialW=inner_init,
                           initial_bias=bias_init),
         W=linear.Linear(n_inputs,
                         n_units,
                         initialW=init,
                         initial_bias=bias_init),
         U=linear.Linear(n_units,
                         n_units,
                         initialW=inner_init,
                         initial_bias=bias_init),
     )
Пример #28
0
 def __init__(self,
              in_size,
              out_size,
              init=None,
              inner_init=None,
              bias_init=None):
     super(GRUBase, self).__init__()
     with self.init_scope():
         self.W_r = linear.Linear(in_size,
                                  out_size,
                                  initialW=init,
                                  initial_bias=bias_init)
         self.U_r = linear.Linear(out_size,
                                  out_size,
                                  initialW=inner_init,
                                  initial_bias=bias_init)
         self.W_z = linear.Linear(in_size,
                                  out_size,
                                  initialW=init,
                                  initial_bias=bias_init)
         self.U_z = linear.Linear(out_size,
                                  out_size,
                                  initialW=inner_init,
                                  initial_bias=bias_init)
         self.W = linear.Linear(in_size,
                                out_size,
                                initialW=init,
                                initial_bias=bias_init)
         self.U = linear.Linear(out_size,
                                out_size,
                                initialW=inner_init,
                                initial_bias=bias_init)
Пример #29
0
    def __init__(self, in_size, out_size,
                 init=None,upward_init=None,lateral_init=None, inner_init=None, bias_init=0, forget_bias_init=0):
        super(LSTMBase, self).__init__(
            W_i=linear.Linear(in_size, out_size,initialW=upward_init, initial_bias=bias_init),
            U_i=linear.Linear(out_size, out_size,initialW=lateral_init, nobias=True),

            W_f=linear.Linear(in_size, out_size,initialW=upward_init, initial_bias=forget_bias_init),
            U_f=linear.Linear(out_size, out_size,initialW=lateral_init, nobias=True),

            W_a=linear.Linear(in_size, out_size,initialW=upward_init, initial_bias=bias_init),
            U_a=linear.Linear(out_size, out_size,initialW=lateral_init, nobias=True),

            W_o=linear.Linear(in_size, out_size,initialW=upward_init, initial_bias=bias_init),
            U_o=linear.Linear(out_size, out_size,initialW=lateral_init, nobias=True),
        )
        self.state_size = out_size
    def __init__(self,
                 in_size,
                 out_size=None,
                 lateral_init=None,
                 upward_init=None,
                 bias_init=None,
                 forget_bias_init=None,
                 h_fusion_init=None,
                 a_fusion_init=None):
        super(ActionConditionedLSTM,
              self).__init__(in_size, out_size, lateral_init, upward_init,
                             bias_init, forget_bias_init)

        if out_size is None:
            out_size, in_size = in_size, None
        self.h_fusion_init = h_fusion_init
        self.a_fusion_init = a_fusion_init

        with self.init_scope():
            self.Wh = linear.Linear(out_size,
                                    out_size,
                                    initialW=0,
                                    nobias=True)
            self.Wa = linear.Linear(None, out_size, initialW=0, nobias=True)