def _create_tf_bot_embed(self, b_in: "tf.Tensor") -> "tf.Tensor": """Create embedding bot vector.""" b = train_utils.create_tf_fnn( b_in, self.hidden_layers_sizes["bot"], self.droprate["bot"], self.C2, self._is_training, layer_name_suffix="bot", ) return train_utils.create_tf_embed( b, self.embed_dim, self.C2, self.similarity_type, layer_name_suffix="bot" )
def _create_tf_dial(self, a_in) -> Tuple["tf.Tensor", "tf.Tensor"]: """Create dialogue level embedding and mask.""" # mask different length sequences # if there is at least one `-1` it should be masked mask = tf.sign(tf.reduce_max(self.a_in, -1) + 1) a = train_utils.create_tf_fnn( a_in, self.hidden_layers_sizes["pre_dial"], self.droprate["dial"], self.C2, self._is_training, layer_name_suffix="pre_dial", ) self.attention_weights = {} hparams = train_utils.create_t2t_hparams( self.num_transformer_layers, self.transformer_size, self.num_heads, self.droprate["dial"], self.pos_encoding, self.max_seq_length, self._is_training, ) a = train_utils.create_t2t_transformer_encoder(a, mask, self.attention_weights, hparams, self.C2, self._is_training) if isinstance(self.featurizer, MaxHistoryTrackerFeaturizer): # pick last label if max history featurizer is used a = a[:, -1:, :] mask = mask[:, -1:] dial_embed = train_utils.create_tf_embed(a, self.embed_dim, self.C2, self.similarity_type, layer_name_suffix="dial") return dial_embed, mask
def _create_tf_embed_fnn( self, x_in: "tf.Tensor", layer_sizes: List[int], fnn_name: Text, embed_name: Text, ) -> "tf.Tensor": """Create nn with hidden layers and name""" x = train_utils.create_tf_fnn( x_in, layer_sizes, self.droprate, self.C2, self._is_training, layer_name_suffix=fnn_name, ) return train_utils.create_tf_embed( x, self.embed_dim, self.C2, self.similarity_type, layer_name_suffix=embed_name, )