Ejemplo n.º 1
0
    def __init__(self, hparams=None):
        ModuleBase.__init__(self, hparams)

        self.dropout = self._hparams.dropout
        self.bias = self._hparams.bias
        self.act = self._hparams.act
        self.concat = self._hparams.concat
        self.vars = {}
        #self.logging = True

        if self._hparams.neigh_input_dim is None:
            neigh_input_dim = self._hparams.input_dim

        if self._hparams.name is not None:
            name = '/' + self._hparams.name
        else:
            name = ''

        with tf.variable_scope(self.name + name + '_vars'):
            self.vars['weights'] = glorot(
                [neigh_input_dim, self._hparams.output_dim],
                name='neigh_weights')
            if self.bias:
                self.vars['bias'] = zeros([self._hparams.output_dim],
                                          name='bias')
        '''
        if self.logging:
            self._log_vars()
        '''

        self.input_dim = self._hparams.input_dim
        self.output_dim = self._hparams.output_dim
    def __init__(self, config_model, config_data):
        ModuleBase.__init__(self)
        self.config_model = config_model
        self.config_data = config_data

        with open(config_data.vocab_file, "rb") as f:
            id2w = pickle.load(f)
        self.id2w = id2w
        self.vocab_size = len(id2w)
        self.pad_token_id, self.bos_token_id = (0, 1)
        self.eos_token_id, self.unk_token_id = (2, 3)

        self.word_embedder = WordEmbedder(vocab_size=self.vocab_size,
                                          hparams=config_model.emb)
        self.pos_embedder = SinusoidsPositionEmbedder(
            position_size=config_data.max_decoding_length,
            hparams=config_model.position_embedder_hparams,
        )

        self.encoder = TransformerEncoder(hparams=config_model.encoder)
        self.decoder = TransformerDecoder(
            vocab_size=self.vocab_size,
            output_layer=self.word_embedder.embedding,
            hparams=config_model.decoder,
        )

        self.smoothed_loss_func = LabelSmoothingLoss(
            label_confidence=self.config_model.loss_label_confidence,
            tgt_vocab_size=self.vocab_size,
            ignore_index=0,
        )
Ejemplo n.º 3
0
    def __init__(self, embedding=None, vocab_size=None, hparams=None):
        ModuleBase.__init__(self, hparams)
        self._vocab_size = vocab_size
        self._embedding = None
        self.sampling_method = self._hparams.sampling_method
        with tf.variable_scope(self.variable_scope):
            if self._hparams.initializer:
                tf.get_variable_scope().set_initializer( \
                    layers.get_initializer(self._hparams.initializer))
            if self._hparams.position_embedder.name == 'sinusoids':
                self.position_embedder = \
                    position_embedders.SinusoidsSegmentalPositionEmbedder( \
                    self._hparams.position_embedder.hparams)

        if self._hparams.use_embedding:
            if embedding is None and vocab_size is None:
                raise ValueError("""If 'embedding' is not provided,
                    'vocab_size' must be specified.""")
            if isinstance(embedding, (tf.Tensor, tf.Variable)):
                self._embedding = embedding
            else:
                self._embedding = embedder_utils.get_embedding(
                    self._hparams.embedding,
                    embedding,
                    vocab_size,
                    variable_scope=self.variable_scope)
                self._embed_dim = shape_list(self._embedding)[-1]
                if self._hparams.zero_pad:
                    self._embedding = tf.concat( \
                        (tf.zeros(shape=[1, self._embed_dim]),\
                        self._embedding[1:, :]), 0)
            if self._vocab_size is None:
                self._vocab_size = self._embedding.get_shape().as_list()[0]
        self.output_layer = \
            self.build_output_layer(shape_list(self._embedding)[-1])
Ejemplo n.º 4
0
    def __init__(self,
                 cell=None,
                 vocab_size=None,
                 output_layer=None,
                 cell_dropout_mode=None,
                 hparams=None):
        ModuleBase.__init__(self, hparams)

        self._helper = None
        self._initial_state = None

        # Make rnn cell
        with tf.variable_scope(self.variable_scope):
            if cell is not None:
                self._cell = cell
            else:
                self._cell = layers.get_rnn_cell(self._hparams.rnn_cell,
                                                 cell_dropout_mode)
        self._beam_search_cell = None

        # Make the output layer
        self._output_layer, self._vocab_size = _make_output_layer(
            output_layer, vocab_size, self._hparams.output_layer_bias,
            self.variable_scope)

        self.max_decoding_length = None
Ejemplo n.º 5
0
    def __init__(self, train_data):

        ModuleBase.__init__(self)

        self.source_vocab_size = train_data.source_vocab.size
        self.target_vocab_size = train_data.target_vocab.size

        self.bos_token_id = train_data.target_vocab.bos_token_id
        self.eos_token_id = train_data.target_vocab.eos_token_id

        self.source_embedder = tx.modules.WordEmbedder(
            vocab_size=self.source_vocab_size, hparams=config_model.embedder)

        self.target_embedder = tx.modules.WordEmbedder(
            vocab_size=self.target_vocab_size, hparams=config_model.embedder)

        self.encoder = tx.modules.BidirectionalRNNEncoder(
            input_size=self.source_embedder.dim, hparams=config_model.encoder)

        self.decoder = tx.modules.AttentionRNNDecoder(
            encoder_output_size=self.encoder.cell_fw.hidden_size +
            self.encoder.cell_bw.hidden_size,
            input_size=self.target_embedder.dim + config_model.num_units,
            vocab_size=self.target_vocab_size,
            hparams=config_model.decoder)
Ejemplo n.º 6
0
    def __init__(self, vocab_size=None, output_layer=None, hparams=None):
        ModuleBase.__init__(self, hparams)

        with tf.variable_scope(self.variable_scope):
            if self._hparams.initializer:
                tf.get_variable_scope().set_initializer(
                    layers.get_initializer(self._hparams.initializer))

            # Make the output layer
            self._output_layer, self._vocab_size = _make_output_layer(
                output_layer, vocab_size, self._hparams.output_layer_bias,
                self.variable_scope)

            # Make attention and poswise networks
            self.multihead_attentions = {'self_att': [], 'encdec_att': []}
            self.poswise_networks = []
            for i in range(self._hparams.num_blocks):
                layer_name = 'layer_{}'.format(i)
                with tf.variable_scope(layer_name):
                    with tf.variable_scope("self_attention"):
                        multihead_attention = MultiheadAttentionEncoder(
                            self._hparams.multihead_attention)
                        self.multihead_attentions['self_att'].append(
                            multihead_attention)

                    if self._hparams.dim != \
                            multihead_attention.hparams.output_dim:
                        raise ValueError('The output dimenstion of '
                                         'MultiheadEncoder should be equal '
                                         'to the dim of TransformerDecoder')

                    with tf.variable_scope('encdec_attention'):
                        multihead_attention = MultiheadAttentionEncoder(
                            self._hparams.multihead_attention)
                        self.multihead_attentions['encdec_att'].append(
                            multihead_attention)

                    if self._hparams.dim != \
                            multihead_attention.hparams.output_dim:
                        raise ValueError('The output dimenstion of '
                                         'MultiheadEncoder should be equal '
                                         'to the dim of TransformerDecoder')

                    pw_net = FeedForwardNetwork(
                        hparams=self._hparams['poswise_feedforward'])
                    final_dim = pw_net.hparams.layers[-1]['kwargs']['units']
                    if self._hparams.dim != final_dim:
                        raise ValueError(
                            'The output dimenstion of '
                            '"poswise_feedforward" should be equal '
                            'to the "dim" of TransformerDecoder.')
                    self.poswise_networks.append(pw_net)

            # Built in _build()
            self.context = None
            self.context_sequence_length = None
            self.embedding = None
            self._helper = None
            self._cache = None
            self.max_decoding_length = None
Ejemplo n.º 7
0
    def __init__(self,
                 network=None,
                 network_kwargs=None,
                 hparams=None):
        ModuleBase.__init__(self, hparams=hparams)

        with tf.variable_scope(self.variable_scope):
            self._build_network(network, network_kwargs)
Ejemplo n.º 8
0
    def __init__(self, hparams=None):
        ModuleBase.__init__(self, hparams)

        self._layers = []
        self._layer_names = []
        self._layers_by_name = {}
        self._layer_outputs = []
        self._layer_outputs_by_name = {}
Ejemplo n.º 9
0
    def __init__(self, embedding, hparams=None):
        ModuleBase.__init__(self, hparams)

        with tf.variable_scope(self.variable_scope):
            if self._hparams.initializer:
                tf.get_variable_scope().set_initializer(
                    layers.get_initializer(self._hparams.initializer))

            if self._hparams.position_embedder_type == 'sinusoids':
                self.position_embedder = SinusoidsPositionEmbedder(
                    self._hparams.position_embedder_hparams)
            else:
                self.position_embedder = PositionEmbedder(
                    position_size=self._hparams.position_size,
                    hparams=self._hparams.position_embedder_hparams)

            self._embedding = embedding
            self._vocab_size = self._embedding.get_shape().as_list()[0]

            self.output_layer = \
                self._build_output_layer(shape_list(self._embedding)[-1])

            self.multihead_attentions = {'self_att': [], 'encdec_att': []}
            self.poswise_networks = []
            for i in range(self._hparams.num_blocks):
                layer_name = 'layer_{}'.format(i)
                with tf.variable_scope(layer_name):
                    with tf.variable_scope("self_attention"):
                        multihead_attention = MultiheadAttentionEncoder(
                            self._hparams.multihead_attention)
                        self.multihead_attentions['self_att'].append(
                            multihead_attention)
                    # pylint: disable=protected-access
                    if self._hparams.dim != \
                        multihead_attention._hparams.output_dim:
                        raise ValueError('The output dimenstion of '
                                         'MultiheadEncoder should be equal '
                                         'to the dim of TransformerDecoder')

                    with tf.variable_scope('encdec_attention'):
                        multihead_attention = MultiheadAttentionEncoder(
                            self._hparams.multihead_attention)
                        self.multihead_attentions['encdec_att'].append(
                            multihead_attention)
                    if self._hparams.dim != \
                        multihead_attention._hparams.output_dim:
                        raise ValueError('The output dimenstion of '
                                         'MultiheadEncoder should be equal '
                                         'to the dim of TransformerDecoder')

                    poswise_network = FeedForwardNetwork(
                        hparams=self._hparams['poswise_feedforward'])
                    if self._hparams.dim != \
                        poswise_network._hparams.layers[-1]['kwargs']['units']:
                        raise ValueError('The output dimenstion of '
                                         'FeedForwardNetwork should be equal '
                                         'to the dim of TransformerDecoder')
                    self.poswise_networks.append(poswise_network)
Ejemplo n.º 10
0
    def __init__(self,
                 hparams: Optional[Union[HParams, Dict[str, Any]]] = None):
        ModuleBase.__init__(self, hparams)

        self._layers = nn.ModuleList()
        self._layer_names: List[str] = []
        self._layers_by_name: Dict[str, nn.Module] = {}
        self._layer_outputs: List[torch.Tensor] = []
        self._layer_outputs_by_name: Dict[str, torch.Tensor] = {}
    def __init__(self, num_embeds=None, init_value=None, hparams=None):
        ModuleBase.__init__(self, hparams)

        if num_embeds is not None or init_value is not None:
            self._embedding = Parameter(
                embedder_utils.get_embedding(num_embeds, init_value, hparams))

            self._num_embeds = self._embedding.shape[0]

            self._dim = self._embedding.shape[1:]
            self._dim_rank = len(self._dim)
            if self._dim_rank == 1:
                self._dim = self._dim[0]
Ejemplo n.º 12
0
    def __init__(self, embedding, hparams=None):
        ModuleBase.__init__(self, hparams)

        with tf.variable_scope(self.variable_scope):
            if self._hparams.initializer:
                tf.get_variable_scope().set_initializer( \
                    layers.get_initializer(self._hparams.initializer))

            self.position_embedder = \
                SinusoidsPositionEmbedder(
                    self._hparams.position_embedder_hparams)

            self._embedding = embedding
            self._vocab_size = self._embedding.get_shape().as_list()[0]

        self.output_layer = \
            self._build_output_layer(shape_list(self._embedding)[-1])
Ejemplo n.º 13
0
    def __init__(self,
                 pretrained_model_name=None,
                 cache_dir=None,
                 hparams=None):
        ModuleBase.__init__(self, hparams)

        if pretrained_model_name:
            self.pretrained_model = bert_utils.\
                load_pretrained_model(pretrained_model_name,
                                      cache_dir)
        elif self._hparams.pretrained_model_name is not None:
            self.pretrained_model = bert_utils.\
                load_pretrained_model(self._hparams.pretrained_model_name,
                                      cache_dir)
        else:
            self.pretrained_model = None

        if self.pretrained_model:
            self.pretrained_model_hparams = bert_utils. \
                transform_bert_to_texar_config(self.pretrained_model)
Ejemplo n.º 14
0
    def __init__(self,
                 raw_memory_dim,
                 input_embed_fn=None,
                 output_embed_fn=None,
                 query_embed_fn=None,
                 hparams=None):
        ModuleBase.__init__(self, hparams)

        self._raw_memory_dim = raw_memory_dim

        self._n_hops = self._hparams.n_hops
        self._relu_dim = self._hparams.relu_dim
        self._memory_size = self._hparams.memory_size

        with tf.variable_scope(self.variable_scope):
            self._A, self._C, self._B, self._memory_dim = self._build_embed_fn(
                input_embed_fn, output_embed_fn, query_embed_fn)

            self.H = None
            if self.hparams.use_H:
                self.H = tf.get_variable(
                    name="H", shape=[self._memory_dim, self._memory_dim])
    def __init__(self,
                 cell=None,
                 vocab_size=None,
                 output_layer=None,
                 cell_dropout_mode=None,
                 hparams=None):
        ModuleBase.__init__(self, hparams)

        self._helper = None
        self._initial_state = None

        # Make rnn cell
        with tf.variable_scope(self.variable_scope):
            if cell is not None:
                self._cell = cell
            else:
                self._cell = layers.get_rnn_cell(self._hparams.rnn_cell,
                                                 cell_dropout_mode)
        self._beam_search_cell = None

        # Make the output layer
        self._vocab_size = vocab_size
        self._output_layer = output_layer
        if output_layer is None:
            if self._vocab_size is None:
                raise ValueError(
                    "Either `output_layer` or `vocab_size` must be provided. "
                    "Set `output_layer=tf.identity` if no output layer is "
                    "wanted.")
            with tf.variable_scope(self.variable_scope):
                self._output_layer = tf.layers.Dense(units=self._vocab_size)
        elif output_layer is not tf.identity:
            if not isinstance(output_layer, tf.layers.Layer):
                raise ValueError(
                    "`output_layer` must be either `tf.identity` or "
                    "an instance of `tf.layers.Layer`.")
 def __init__(self, output_size, hparams=None):
     ModuleBase.__init__(self, hparams)
     self._output_size = output_size
 def __init__(self, rate=None, hparams=None):
     ModuleBase.__init__(self, hparams)
     self._rate = rate
Ejemplo n.º 18
0
    def __init__(self, H=None, hparams=None):
        ModuleBase.__init__(self, hparams)

        self._H = H
Ejemplo n.º 19
0
    def __init__(self, num_embeds=None, hparams=None):
        ModuleBase.__init__(self, hparams)

        self._num_embeds = num_embeds
Ejemplo n.º 20
0
 def __init__(self, hparams=None):
     ModuleBase.__init__(self, hparams)