示例#1
0
文件: recurrent.py 项目: lchmo444/dlx
    def build(self):
        f_init = self.get_function('weight_init')
        f_inner_init = self.get_function('inner_init')
        f_bias_init = self.get_function('bias_init')
        f_forget_bias_init = self.get_function('forget_bias_init')

        _W_i = f_init((self.input_dim, self.output_dim))
        _U_i = f_inner_init((self.output_dim, self.output_dim))
        _b_i = f_bias_init((self.output_dim, ))

        _W_f = f_init((self.input_dim, self.output_dim))
        _U_f = f_inner_init((self.output_dim, self.output_dim))
        _b_f = f_forget_bias_init((self.output_dim, ))

        _W_c = f_init((self.input_dim, self.output_dim))
        _U_c = f_inner_init((self.output_dim, self.output_dim))
        _b_c = f_bias_init((self.output_dim, ))

        _W_o = f_init((self.input_dim, self.output_dim))
        _U_o = f_inner_init((self.output_dim, self.output_dim))
        _b_o = f_bias_init((self.output_dim, ), )

        self.W = TU.shared(numpy.concatenate([_W_i, _W_f, _W_c, _W_o], axis=1),
                           name=self.name + '_W')
        self.U = TU.shared(numpy.concatenate([_U_i, _U_f, _U_c, _U_o], axis=1),
                           name=self.name + '_U')
        self.b = TU.shared(numpy.concatenate([_b_i, _b_f, _b_c, _b_o]),
                           name=self.name + '_b')
        self.params = [self.W, self.U, self.b]
示例#2
0
文件: recurrent.py 项目: lxastro/dlx
    def build(self):
        f_init = self.get_function('weight_init')
        f_inner_init = self.get_function('inner_init')
        f_bias_init = self.get_function('bias_init')
        f_forget_bias_init = self.get_function('forget_bias_init')
         
        _W_i = f_init((self.input_dim, self.output_dim))
        _U_i = f_inner_init((self.output_dim, self.output_dim))
        _b_i = f_bias_init((self.output_dim,))

        _W_f = f_init((self.input_dim, self.output_dim))
        _U_f = f_inner_init((self.output_dim, self.output_dim))
        _b_f = f_forget_bias_init((self.output_dim,))

        _W_c = f_init((self.input_dim, self.output_dim))
        _U_c = f_inner_init((self.output_dim, self.output_dim))
        _b_c = f_bias_init((self.output_dim,))

        _W_o = f_init((self.input_dim, self.output_dim))
        _U_o = f_inner_init((self.output_dim, self.output_dim))
        _b_o = f_bias_init((self.output_dim,),)
        
        self.W = TU.shared(numpy.concatenate([_W_i, _W_f, _W_c, _W_o], axis=1), name=self.name + '_W')
        self.U = TU.shared(numpy.concatenate([_U_i, _U_f, _U_c, _U_o], axis=1), name=self.name + '_U')
        self.b = TU.shared(numpy.concatenate([_b_i, _b_f, _b_c, _b_o]), name=self.name + '_b')
        self.params = [self.W, self.U, self.b]
示例#3
0
    def build(self):
        self.W = TU.shared(self.get_function('weight_init')(
            (self.input_dim, self.output_dim)),
                           name=self.name + '_W')
        self.b = TU.shared(self.get_function('bias_init')((self.output_dim, )),
                           name=self.name + '_b')

        self.params = [self.W, self.b]
示例#4
0
    def build(self):
        f_init = self.get_function('weight_init')
        f_inner_init = self.get_function('inner_init')
        f_bias_init = self.get_function('bias_init')
        f_forget_bias_init = self.get_function('forget_bias_init')
        '''
        Attention hidden dense and projector
        '''
        self.W_h_att = TU.shared(f_init(
            (self.output_dim, self.attention_hidden_dim)),
                                 name=self.name + '_W_h_att')
        self.W_x_att = TU.shared(f_init(
            (self.input_dim, self.attention_hidden_dim)),
                                 name=self.name + '_W_x_att')
        self.W_ctx_att = TU.shared(f_init(
            (self.context_dim, self.attention_hidden_dim)),
                                   name=self.name + '_W_ctx_att')
        self.b_att = TU.shared(f_bias_init((self.attention_hidden_dim, )),
                               name=self.name + '_b_att')
        self.w_att_prj = TU.shared(f_init((self.attention_hidden_dim, 1)),
                                   name=self.name + '_w_att_prj')
        ''' 
        LSTM {W: x, U: h, V: weighted context}
        '''
        W_i = f_init((self.input_dim, self.output_dim))
        V_i = f_init((self.context_dim, self.output_dim))
        U_i = f_inner_init((self.output_dim, self.output_dim))
        b_i = f_bias_init((self.output_dim, ))

        W_f = f_init((self.input_dim, self.output_dim))
        V_f = f_init((self.context_dim, self.output_dim))
        U_f = f_inner_init((self.output_dim, self.output_dim))
        b_f = f_forget_bias_init((self.output_dim, ))

        W_c = f_init((self.input_dim, self.output_dim))
        V_c = f_init((self.context_dim, self.output_dim))
        U_c = f_inner_init((self.output_dim, self.output_dim))
        b_c = f_bias_init((self.output_dim, ))

        W_o = f_init((self.input_dim, self.output_dim))
        V_o = f_init((self.context_dim, self.output_dim))
        U_o = f_inner_init((self.output_dim, self.output_dim))
        b_o = f_bias_init((self.output_dim, ))

        # theano variables
        self.W = TU.shared(numpy.concatenate([W_i, W_f, W_c, W_o], axis=1),
                           name=self.name + '_W')
        self.V = TU.shared(numpy.concatenate([V_i, V_f, V_c, V_o], axis=1),
                           name=self.name + '_V')
        self.U = TU.shared(numpy.concatenate([U_i, U_f, U_c, U_o], axis=1),
                           name=self.name + '_U')
        self.b = TU.shared(numpy.concatenate([b_i, b_f, b_c, b_o]),
                           name=self.name + '_b')

        self.params = [
            self.W, self.V, self.U, self.b, self.W_h_att, self.W_ctx_att,
            self.b_att, self.w_att_prj
        ]
示例#5
0
文件: attention.py 项目: lxastro/dlx
    def build(self):
        f_init = self.get_function('weight_init')
        f_inner_init = self.get_function('inner_init')
        f_bias_init = self.get_function('bias_init')
        f_forget_bias_init = self.get_function('forget_bias_init')
        
        '''
        Attention hidden dense and projector
        '''
        self.W_h_att = TU.shared(f_init((self.output_dim, self.attention_hidden_dim)), name=self.name + '_W_h_att')
        self.W_x_att = TU.shared(f_init((self.input_dim, self.attention_hidden_dim)), name=self.name + '_W_x_att')
        self.W_ctx_att = TU.shared(f_init((self.context_dim, self.attention_hidden_dim)), name=self.name + '_W_ctx_att')
        self.b_att =  TU.shared(f_bias_init((self.attention_hidden_dim,)), name=self.name + '_b_att')
        self.w_att_prj = TU.shared(f_init((self.attention_hidden_dim, 1)), name=self.name + '_w_att_prj')

        ''' 
        LSTM {W: x, U: h, V: weighted context}
        '''
        W_i = f_init((self.input_dim, self.output_dim))
        V_i = f_init((self.context_dim, self.output_dim))
        U_i = f_inner_init((self.output_dim, self.output_dim))
        b_i = f_bias_init((self.output_dim,))

        W_f = f_init((self.input_dim, self.output_dim))
        V_f = f_init((self.context_dim, self.output_dim))
        U_f = f_inner_init((self.output_dim, self.output_dim))
        b_f = f_forget_bias_init((self.output_dim,))

        W_c = f_init((self.input_dim, self.output_dim))
        V_c = f_init((self.context_dim, self.output_dim))
        U_c = f_inner_init((self.output_dim, self.output_dim))
        b_c = f_bias_init((self.output_dim,))

        W_o = f_init((self.input_dim, self.output_dim))
        V_o = f_init((self.context_dim, self.output_dim))
        U_o = f_inner_init((self.output_dim, self.output_dim))
        b_o = f_bias_init((self.output_dim,))
        
        # theano variables
        self.W = TU.shared(numpy.concatenate([W_i, W_f, W_c, W_o], axis=1), name=self.name + '_W')
        self.V = TU.shared(numpy.concatenate([V_i, V_f, V_c, V_o], axis=1), name=self.name + '_V')
        self.U = TU.shared(numpy.concatenate([U_i, U_f, U_c, U_o], axis=1), name=self.name + '_U')
        self.b = TU.shared(numpy.concatenate([b_i, b_f, b_c, b_o]), name=self.name + '_b')
        
        self.params = [self.W, self.V, self.U, self.b, self.W_h_att, self.W_ctx_att, self.b_att, self.w_att_prj]
示例#6
0
文件: core.py 项目: lxastro/dlx
    def build(self):
        self.W = TU.shared(self.get_function('weight_init')((self.input_dim, self.output_dim)), name=self.name+'_W')
        self.b = TU.shared(self.get_function('bias_init')((self.output_dim,)), name=self.name+'_b')

        self.params = [self.W, self.b]