def __init__(self, config, vocab):
        self._config = config
        self._vocab = vocab
        self._name = self._config.model.get('errche_decoder_name')
        # vocabulary size of the target sequence
        self._targ_voc = self._vocab.errche_vocab_size_targ
        # index of the END token  in the vocabulary
        self._id_end = self._config.dataset.get('id_end')
        # embeding dim of the target sequence
        self._embeding_dim_targ = self._config.model.get('errche_embeding_dims_target')
        # dim of encoder
        self._rnn_encoder_dim = self._config.model.get('errche_rnn_encoder_dim')
        # dim of decoder
        self._rnn_decoder_dim = self._config.model.get('errche_rnn_decoder_dim')
        # dim of attention
        self._att_dim = self._config.model.get('att_dim')
        assert self._rnn_encoder_dim * 2 == self._rnn_decoder_dim, \
            "Encoder BiRnn out dim is the double encoder dim and it must be equal with decoder dim"

        self._tiles = 1 if self._config.model.decoding == 'greedy' else self._config.model.beam_size

        self._embedding_table_traget = tf.get_variable(
            "targ_vocab_embeding", dtype=tf.float32, shape=[self._targ_voc, self._embeding_dim_targ],
            initializer=embedding_initializer())
        self._start_token = tf.squeeze(
            input=self._embedding_table_traget[0, :],
            name='targ_start_flage')
    def __init__(self, config, vocab, trainable):
        self._config = config
        self._vocab = vocab
        self._name = self._config.model.get('errche_encoder_name')
        # dim of the encoder, due to the birnn, the output of the encoder is 2*encoder_dim
        self._rnn_encoder_dim = self._config.model.get(
            'errche_rnn_encoder_dim')
        self._embeding_dim_source = self._config.model.get(
            'errche_embeding_dims_source')
        # vocabulary size of the source sequecn
        self._source_voc = self._vocab.errche_vocab_size_source
        self.is_training = trainable

        self._embedding_table_source = tf.get_variable(
            "source_vocab_embeding",
            dtype=tf.float32,
            shape=[self._source_voc, self._embeding_dim_source],
            initializer=embedding_initializer())
Esempio n. 3
0
    def __init__(self, config, vocab):
        self._config = config
        self._vocab = vocab
        self._name = self._config.model.get('decoder_name')
        self._vocabsize = self._vocab.vocab_size
        self._id_end = self._config.dataset.get('id_end')
        self._embeding_dim = self._config.model.get('embeding_dims')
        self._encoder_dim = self._config.model.get('rnn_encoder_dim')
        self._decoder_dim = self._config.model.get('rnn_decoder_dim')
        self._att_dim = self._config.model.get('att_dim')
        assert self._encoder_dim * 2 == self._decoder_dim, \
            "Encoder bilstm out dim is the double encoder dim and it must be equal with decoder dim"

        self._tiles = 1 if self._config.model.decoding == 'greedy' else self._config.model.beam_size

        self._vocab_embeding = tf.get_variable(
            "vocab_embeding",
            dtype=tf.float32,
            shape=[self._vocabsize, self._embeding_dim],
            initializer=embedding_initializer())
        self._start_token = tf.squeeze(input=self._vocab_embeding[0, :],
                                       name='start_flage')