Esempio n. 1
0
    def __init__(self,
                 style_number,
                 style_dim,
                 hidden_status_dim,
                 tparams=None,
                 prefix='style'):
        """
        Init the style parameter: init_params.
        """
        self.W_BBeta = theano.shared(value=util.uniform_random_weight(
            (style_dim, style_dim), 0.01),
                                     name=self._p(prefix, 'W_BBeta'),
                                     borrow=True)
        self.W_hBeta = theano.shared(value=util.uniform_random_weight(
            (hidden_status_dim, style_dim), 0.01),
                                     name=self._p(prefix, 'W_hBeta'),
                                     borrow=True)
        self.B = theano.shared(value=util.uniform_random_weight(
            (style_number, style_dim), 0.01),
                               name=self._p(prefix, 'B'),
                               borrow=True)
        self.vb = theano.shared(util.uniform_random_weight((style_dim, ),
                                                           0.01),
                                name=self._p(prefix, 'vb'),
                                borrow=True)

        # parameters of the model
        self.params = [self.W_BBeta, self.W_hBeta, self.B, self.vb]
        if not tparams is None:
            tparams[self._p(prefix, 'W_BBeta')] = self.W_BBeta
            tparams[self._p(prefix, 'W_hBeta')] = self.W_hBeta
            tparams[self._p(prefix, 'B')] = self.B
            tparams[self._p(prefix, 'vb')] = self.vb
        else:
            print " tparams is None"
Esempio n. 2
0
 def __init__(self, style_number, style_dim, hidden_status_dim, tparams=None, prefix='style'):
     """
     Init the style parameter: init_params.
     """
     self.W_BBeta = theano.shared(
         value=util.uniform_random_weight((style_dim, style_dim), 0.1),
         name=self._p(prefix, 'W_BBeta'),
         borrow=True
     )
     self.W_hBeta = theano.shared(
         value=util.uniform_random_weight((hidden_status_dim, style_dim), 0.1),
         name=self._p(prefix, 'W_hBeta'),
         borrow=True
     )
     self.B = theano.shared(
         value=util.uniform_random_weight((style_number, style_dim), 0.1),
         name=self._p(prefix, 'B'),
         borrow=True
     )
     self.vb = theano.shared(
                             util.uniform_random_weight((style_dim,), 0.1),
                             name=self._p(prefix, 'vb'),
                             borrow=True
                             )
     
     # parameters of the model
     self.params = [self.W_BBeta, self.W_hBeta, self.B, self.vb]
     if not tparams is None:
         tparams[self._p(prefix, 'W_BBeta')] = self.W_BBeta
         tparams[self._p(prefix, 'W_hBeta')] = self.W_hBeta
         tparams[self._p(prefix, 'B')] = self.B
         tparams[self._p(prefix, 'vb')] = self.vb
     else:
         print " tparams is None"
 def __init__(self, base_dim, refer_dim, tparams, prefix="maxout"):
     self.W_t = theano.shared(
         value=util.uniform_random_weight((refer_dim, 2 * refer_dim), 0.1),
         name=self._p(prefix, 'W_t'),
         borrow=True
     )
     self.W_o = theano.shared(
         value=util.uniform_random_weight((base_dim, refer_dim), 0.1),
         name=self._p(prefix, 'W_o'),
         borrow=True
     )
     
     
     # initialize the biases b as a vector of n_out 0s
     self.b = theano.shared(
         value=numpy.zeros(
             (2 * refer_dim,),
             dtype=config.globalFloatType()
         ),
         name=self._p(prefix, 'b'),
         borrow=True
     )
     
     
     # parameters of the model
     self.params = [self.W_t, self.W_o, self.b]
     if not tparams is None:
         tparams[self._p(prefix, 'W_t')] = self.W_t
         tparams[self._p(prefix, 'W_o')] = self.W_o
         tparams[self._p(prefix, 'b')] = self.b
     else:
         print " tparams is None"
Esempio n. 4
0
    def __init__(self, word_embedding_dim, hidden_status_dim, tparams=None, prefix='GRU'):
        """
        Init the GRU parameter: init_params.
        Updation in GRU :
            step1. r(t) = f(W_r dot x(t) + U_r dot h(t-1) + C_r dot h_last).
            step2. z(t) = f(W_z dot x(t) + U_z dot h(t-1) + C_z dot h_last).
            step3. h_wave(t) = f(W dot x(t) + U dot (r(t) * h(t-1)) + C dot h_last).
            step4. h(t) = (1-z(t)) * h(t-1) + z(t) * h_wave(t).
        We can combine W and C into one tensor W
        """
        self.hidden_status_dim = hidden_status_dim
        
        self.params = tparams
        self.prefix = prefix
        self.word_embedding_dim = word_embedding_dim
        W_bound = 0.1

        # combine step1~3 W dot t, so W's dimension is (word_embedding_dim, hidden_status_dim, 3)
        W = uniform_random_weight(size=(word_embedding_dim, 3 * hidden_status_dim), bound=W_bound)
        # combine step1~2 U dot h, so U's dimension is (hidden_status_dim, 2)
        # connot combine step1~3, so split U_rh
        
        U = uniform_random_weight(size=(hidden_status_dim, 2 * hidden_status_dim), bound=W_bound)
        U_rh = uniform_random_weight(size=(hidden_status_dim, hidden_status_dim), bound=W_bound)
        U_union = uniform_random_weight(size=(3 * hidden_status_dim, hidden_status_dim), bound=W_bound)
        
        
        if tparams is not None: 
            tparams[self._p(prefix, 'W')] = theano.shared(W, name=self._p(prefix, 'W'))
            tparams[self._p(prefix, 'U')] = theano.shared(U, name=self._p(prefix, 'U'))
            tparams[self._p(prefix, 'U_rh')] = theano.shared(U_rh, name=self._p(prefix, 'U_rh'))
            tparams[self._p(prefix, 'U_union')] = theano.shared(U_union, name=self._p(prefix, 'U_union'))
        else:
            print ' tparams is None'
Esempio n. 5
0
    def __init__(self,
                 word_embedding_dim,
                 hidden_status_dim,
                 tparams=None,
                 prefix='RNN'):
        """
        Init the RNN parameter: init_params.
        Updation in GRU :
            step1. h(t) = f(W_r dot x(t) + U_r dot h(t-1)).
        We can combine W and C into one tensor W
        """
        self.hidden_status_dim = hidden_status_dim
        self.params = tparams
        self.prefix = prefix
        W_bound = 0.01

        # combine step1~3 W dot t, so W's dimension is (word_embedding_dim, hidden_status_dim)
        W = uniform_random_weight(size=(word_embedding_dim, hidden_status_dim),
                                  bound=W_bound)
        # combine step1~2 U dot h, so U's dimension is (hidden_status_dim, 2)
        # connot combine step1~3, so split U_rh
        U = numpy.concatenate([ortho_weight(hidden_status_dim)], axis=1)

        if tparams is not None:
            tparams[self._p(prefix,
                            'W')] = theano.shared(W, name=self._p(prefix, 'W'))
            tparams[self._p(prefix,
                            'U')] = theano.shared(U, name=self._p(prefix, 'U'))
        else:
            print 'tparams is None'
Esempio n. 6
0
    def __init__(self, word_embedding_dim, hidden_status_dim, encoder_hidden_dim,
                 tparams=None, prefix='Attention'):
        """
        Init the GRU parameter: init_params.
        Updation in GRU :
            step1. r(t) = f(W_r dot x(t) + U_r dot h(t-1) + C_r dot h_last).
            step2. z(t) = f(W_z dot x(t) + U_z dot h(t-1) + C_z dot h_last).
            step3. h_wave(t) = f(W dot x(t) + U dot (r(t) * h(t-1)) + C dot h_last).
            step4. h(t) = (1-z(t)) * h(t-1) + z(t) * h_wave(t).
        We can combine W and C into one tensor W
        """
        self.hidden_status_dim = hidden_status_dim
        self.params = tparams
        self.prefix = prefix
        W_bound = numpy.sqrt(6. / (hidden_status_dim))

        # combine step1~3 W dot t, so W's dimension is (word_embedding_dim, hidden_status_dim, 3)
        W = uniform_random_weight(size=(hidden_status_dim, hidden_status_dim), bound=W_bound)
        # combine step1~2 U dot h, so U's dimension is (hidden_status_dim, 2)
        # connot combine step1~3, so split U_rh
        U = numpy.concatenate([ortho_weight(hidden_status_dim)]*int(encoder_hidden_dim/hidden_status_dim),
                              axis=0)
        # U = uniform_random_weight(height=2*hidden_status_dim, width=hidden_status_dim, bound=W_bound)
        va = numpy.zeros((hidden_status_dim,), dtype=globalFloatType())
        
        if tparams is not None: 
            tparams[self._p(prefix, 'W')] = theano.shared(W, name=self._p(prefix, 'W'))
            tparams[self._p(prefix, 'U')] = theano.shared(U, name=self._p(prefix, 'U'))
            tparams[self._p(prefix, 'va')] = theano.shared(va, name=self._p(prefix, 'va'))
        else:
            print ' tparams is None'
Esempio n. 7
0
    def __init__(self, word_embedding_dim, hidden_status_dim, encoder_hidden_dim,
                 tparams=None, prefix='Attention'):
        """
        Init the GRU parameter: init_params.
        Updation in GRU :
            step1. r(t) = f(W_r dot x(t) + U_r dot h(t-1) + C_r dot h_last).
            step2. z(t) = f(W_z dot x(t) + U_z dot h(t-1) + C_z dot h_last).
            step3. h_wave(t) = f(W dot x(t) + U dot (r(t) * h(t-1)) + C dot h_last).
            step4. h(t) = (1-z(t)) * h(t-1) + z(t) * h_wave(t).
        We can combine W and C into one tensor W
        """
        self.hidden_status_dim = hidden_status_dim
        self.params = tparams
        self.prefix = prefix
        W_bound = 0.01

        # combine step1~3 W dot t, so W's dimension is (word_embedding_dim, hidden_status_dim, 3)
        W = uniform_random_weight(size=(hidden_status_dim, hidden_status_dim), bound=W_bound)
        # combine step1~2 U dot h, so U's dimension is (hidden_status_dim, 2)
        # connot combine step1~3, so split U_rh
        U = numpy.concatenate([ortho_weight(hidden_status_dim)]*int(encoder_hidden_dim/hidden_status_dim),
                              axis=0)
        # U = uniform_random_weight(height=2*hidden_status_dim, width=hidden_status_dim, bound=W_bound)
        va = numpy.zeros((hidden_status_dim,), dtype=globalFloatType())
        
        if tparams is not None: 
            tparams[self._p(prefix, 'W')] = theano.shared(W, name=self._p(prefix, 'W'))
            tparams[self._p(prefix, 'U')] = theano.shared(U, name=self._p(prefix, 'U'))
            tparams[self._p(prefix, 'va')] = theano.shared(va, name=self._p(prefix, 'va'))
        else:
            print ' tparams is None'
Esempio n. 8
0
    def __init__(self, word_embedding_dim, hidden_status_dim, tparams=None, prefix='GRU'):
        """
        Init the GRU parameter: init_params.
        Updation in GRU :
            step1. r(t) = f(W_r dot x(t) + U_r dot h(t-1) + C_r dot h_last).
            step2. z(t) = f(W_z dot x(t) + U_z dot h(t-1) + C_z dot h_last).
            step3. h_wave(t) = f(W dot x(t) + U dot (r(t) * h(t-1)) + C dot h_last).
            step4. h(t) = (1-z(t)) * h(t-1) + z(t) * h_wave(t).
        We can combine W and C into one tensor W
        """
        self.hidden_status_dim = hidden_status_dim
        self.params = tparams
        self.prefix = prefix
        W_bound = numpy.sqrt(6. / (hidden_status_dim + word_embedding_dim))

        # combine step1~3 W dot t, so W's dimension is (word_embedding_dim, hidden_status_dim, 3)
        W = uniform_random_weight(size=(word_embedding_dim, 3 * hidden_status_dim), bound=W_bound)
        # combine step1~2 U dot h, so U's dimension is (hidden_status_dim, 2)
        # connot combine step1~3, so split U_rh
        U = numpy.concatenate([ortho_weight(hidden_status_dim),
                               ortho_weight(hidden_status_dim)], axis=1)
        U_rh = ortho_weight(hidden_status_dim)
        
        if tparams is not None: 
            tparams[self._p(prefix, 'W')] = theano.shared(W, name=self._p(prefix, 'W'))
            tparams[self._p(prefix, 'U')] = theano.shared(U, name=self._p(prefix, 'U'))
            tparams[self._p(prefix, 'U_rh')] = theano.shared(U_rh, name=self._p(prefix, 'U_rh'))
        else:
            print ' tparams is None'
 def __init__(self, base_dim, refer_dim, tparams, prefix="maxout"):
     self.W_t = theano.shared(value=util.uniform_random_weight(
         (refer_dim, 2 * refer_dim), 0.1),
                              name=self._p(prefix, 'W_t'),
                              borrow=True)
     self.W_o = theano.shared(value=util.uniform_random_weight(
         (base_dim, refer_dim), 0.1),
                              name=self._p(prefix, 'W_o'),
                              borrow=True)
     # parameters of the model
     self.params = [self.W_t, self.W_o]
     if not tparams is None:
         tparams[self._p(prefix, 'W_t')] = self.W_t
         tparams[self._p(prefix, 'W_o')] = self.W_o
     else:
         print " tparams is None"
Esempio n. 10
0
 def __init__(self,
              word_embedding_dim,
              hidden_status_dim,
              encoder_hidden_dim,
              tparams,
              prefix='Decoder',
              node_type=GRUNode):
     """
     Init the Decoder parameter: init_params.
     """
     self.params = tparams
     W_bound = numpy.sqrt(6. / (hidden_status_dim))
     W = uniform_random_weight(size=(hidden_status_dim, hidden_status_dim),
                               bound=W_bound)
     # if tparams is not None:
     #     tparams[self._p(prefix, 'Ws')] = theano.shared(W, name=self._p(prefix, 'Ws'))
     self.hidden_status_dim = hidden_status_dim
     self.prefix = prefix
     self.node = node_type(word_embedding_dim=word_embedding_dim,
                           hidden_status_dim=hidden_status_dim,
                           tparams=tparams,
                           prefix=self._p(self.prefix, 'GRU'))
     self.attention_node = AttentionNode(
         word_embedding_dim=word_embedding_dim,
         hidden_status_dim=hidden_status_dim,
         encoder_hidden_dim=encoder_hidden_dim,
         tparams=tparams,
         prefix=self._p(self.prefix, 'Attention'))
Esempio n. 11
0
 def __init__(self, base_dim, refer_dim, tparams, prefix="maxout"):
     self.W_t = theano.shared(
         value=util.uniform_random_weight((refer_dim, 2 * refer_dim), 0.1),
         name=self._p(prefix, 'W_t'),
         borrow=True
     )
     self.W_o = theano.shared(
         value=util.uniform_random_weight((base_dim, refer_dim), 0.1),
         name=self._p(prefix, 'W_o'),
         borrow=True
     )
     # parameters of the model
     self.params = [self.W_t, self.W_o]
     if not tparams is None:
         tparams[self._p(prefix, 'W_t')] = self.W_t
         tparams[self._p(prefix, 'W_o')] = self.W_o
     else:
         print " tparams is None"
Esempio n. 12
0
 def __init__(self, n_words, word_embedding_dim, represent, isfirst, tparams, prefix):
     """
     Init the Memory parameter: init_params.
     """
     self.word_embedding_dim = word_embedding_dim
     self.params = tparams
     self.prefix = prefix
     EmbA = uniform_random_weight(size=(n_words, word_embedding_dim), bound=0.01)
     EmbB = uniform_random_weight(size=(n_words, word_embedding_dim), bound=0.01)
     EmbC = uniform_random_weight(size=(n_words, word_embedding_dim), bound=0.01)
     TemA = uniform_random_weight(size=(500, word_embedding_dim), bound=0.01)
     TemC = uniform_random_weight(size=(500, word_embedding_dim), bound=0.01)
     
     if tparams is not None: 
         tparams[self._p(prefix, 'EmbA')] = theano.shared(EmbA, name=self._p(prefix, 'EmbA'))
         if isfirst == True :
             tparams[self._p(prefix, 'EmbB')] = theano.shared(EmbB, name=self._p(prefix, 'EmbB'))
         tparams[self._p(prefix, 'EmbC')] = theano.shared(EmbC, name=self._p(prefix, 'EmbC'))
         if represent == 'PE' :
             tparams[self._p(prefix, 'TemA')] = theano.shared(TemA, name=self._p(prefix, 'TemA'))
             tparams[self._p(prefix, 'TemC')] = theano.shared(TemC, name=self._p(prefix, 'TemC'))
Esempio n. 13
0
    def __init__(self, base_dim, refer_dim, tparams, prefix="maxout"):
        self.W_t = theano.shared(value=util.uniform_random_weight(
            (refer_dim, 2 * refer_dim), 0.01),
                                 name=self._p(prefix, 'W_t'),
                                 borrow=True)
        self.W_o = theano.shared(value=util.uniform_random_weight(
            (base_dim, refer_dim), 0.01),
                                 name=self._p(prefix, 'W_o'),
                                 borrow=True)

        # initialize the biases b as a vector of n_out 0s
        self.b = theano.shared(value=numpy.zeros(
            (2 * refer_dim, ), dtype=config.globalFloatType()),
                               name=self._p(prefix, 'b'),
                               borrow=True)

        # parameters of the model
        self.params = [self.W_t, self.W_o, self.b]
        if not tparams is None:
            tparams[self._p(prefix, 'W_t')] = self.W_t
            tparams[self._p(prefix, 'W_o')] = self.W_o
            tparams[self._p(prefix, 'b')] = self.b
        else:
            print " tparams is None"
 def __init__(self, word_embedding_dim, hidden_status_dim, encoder_hidden_dim,
              tparams, prefix='Decoder', node_type=GRUNode):
     """
     Init the Decoder parameter: init_params.
     """
     self.params = tparams
     W_bound = numpy.sqrt(6. / (hidden_status_dim))
     W = uniform_random_weight(size=(hidden_status_dim, hidden_status_dim), bound=W_bound)
     # if tparams is not None: 
     #     tparams[self._p(prefix, 'Ws')] = theano.shared(W, name=self._p(prefix, 'Ws'))
     self.hidden_status_dim = hidden_status_dim
     self.prefix = prefix
     self.node = node_type(word_embedding_dim=word_embedding_dim,
                           hidden_status_dim=hidden_status_dim,
                           tparams=tparams, prefix=self._p(self.prefix, 'GRU'))
     self.attention_node = AttentionNode(word_embedding_dim=word_embedding_dim,
                                         hidden_status_dim=hidden_status_dim,
                                         encoder_hidden_dim = encoder_hidden_dim,
                                         tparams=tparams, prefix=self._p(self.prefix, 'Attention'))