def __init__(self, config): self.vocab_size = int(config['vocabulary_size']) self.emb_size = int(config['embedding_dim']) self.rnn_hidden_size = int(config['rnn_hidden_size']) self.hidden_size = int(config['hidden_size']) self.left_name, self.seq_len1 = config['left_slots'][0] self.right_name, self.seq_len2 = config['right_slots'][0] self.task_mode = config['training_mode'] self.emb_layer = layers.EmbeddingEnhancedLayer(self.vocab_size, self.emb_size, zero_pad=True, scale=False) self.rnn = layers.LSTMLayer(self.rnn_hidden_size) self.extract = layers.ExtractLastLayer() if self.task_mode == "pointwise": self.n_class = int(config['n_class']) self.fc1_layer = layers.FCLayer(self.rnn_hidden_size * 2, self.hidden_size) self.fc2_layer = layers.FCLayer(self.hidden_size, self.n_class) elif self.task_mode == "pairwise": self.fc1_layer = layers.FCLayer(self.rnn_hidden_size * 1, self.hidden_size) self.cos_layer = layers.CosineLayer() else: logging.error("training mode not supported")
def __init__(self, config): self.vocab_size = int(config['vocabulary_size']) self.emb_size = int(config['embedding_dim']) self.rnn_hidden_size = int(config['rnn_hidden_size']) self.hidden_size = int(config['hidden_size']) self.left_name, self.seq_len1 = config['left_slots'][0] self.right_name, self.seq_len2 = config['right_slots'][0] self.emb_layer = layers.EmbeddingEnhancedLayer(self.vocab_size, self.emb_size, zero_pad=True, scale=False) self.rnn = layers.LSTMLayer(self.rnn_hidden_size) self.extract = layers.ExtractLastLayer() self.n_class = int(config['n_class']) self.fc1_layer = layers.FCLayer(self.rnn_hidden_size * 2, self.hidden_size) self.fc2_layer = layers.FCLayer(self.hidden_size, self.n_class)