Esempio n. 1
0
 def predict(self, left, right):
     """
     Forward network
     """
     # embedding layer
     emb_layer = layers.EmbeddingLayer(self.dict_size, self.emb_dim, "emb")
     left_emb = emb_layer.ops(left)
     right_emb = emb_layer.ops(right)
     # Presentation context
     lstm_layer = layers.DynamicLSTMLayer(self.lstm_dim, "lstm")
     left_lstm = lstm_layer.ops(left_emb)
     right_lstm = lstm_layer.ops(right_emb)
     last_layer = layers.SequenceLastStepLayer()
     left_last = last_layer.ops(left_lstm)
     right_last = last_layer.ops(right_lstm)
     # matching layer
     if self.task_mode == "pairwise":
         fc_layer = layers.FCLayer(self.hidden_dim, None, "fc")
         left_fc = fc_layer.ops(left_last)
         right_fc = fc_layer.ops(right_last)
         cos_sim_layer = layers.CosSimLayer()
         pred = cos_sim_layer.ops(left_fc, right_fc)
         return left_fc, pred
     else:
         concat_layer = layers.ConcatLayer(1)
         concat = concat_layer.ops([left_last, right_last])
         fc_layer = layers.FCLayer(self.hidden_dim, None, "fc")
         concat_fc = fc_layer.ops(concat)
         softmax_layer = layers.FCLayer(2, "softmax", "cos_sim")
         pred = softmax_layer.ops(concat_fc)
         return left_last, pred
Esempio n. 2
0
 def predict(self, left, right):
     """
     Forward network
     """
     # embedding layer
     emb_layer = layers.EmbeddingLayer(self.dict_size, self.emb_dim, "emb")
     left_emb = emb_layer.ops(left)
     right_emb = emb_layer.ops(right)
     # Presentation context
     gru_layer = layers.DynamicGRULayer(self.gru_dim, "gru")
     left_gru = gru_layer.ops(left_emb)
     right_gru = gru_layer.ops(right_emb)
     last_layer = layers.SequenceLastStepLayer()
     left_last = last_layer.ops(left_gru)
     right_last = last_layer.ops(right_gru)
     # matching layer
     if self.task_mode == "pairwise":
         relu_layer = layers.FCLayer(self.hidden_dim, "relu", "relu")
         left_relu = relu_layer.ops(left_last)
         right_relu = relu_layer.ops(right_last)
         cos_sim_layer = layers.CosSimLayer()
         pred = cos_sim_layer.ops(left_relu, right_relu)
         return left_relu, pred
     else:
         concat_layer = layers.ConcatLayer(1)
         concat = concat_layer.ops([left_last, right_last])
         relu_layer = layers.FCLayer(self.hidden_dim, "relu", "relu")
         concat_fc = relu_layer.ops(concat)
         softmax_layer = layers.FCLayer(2, "softmax", "cos_sim")
         pred = softmax_layer.ops(concat_fc)
         return left_last, pred
Esempio n. 3
0
 def predict(self, left, right):
     """
     Forward network
     """
     # embedding layer
     emb_layer = layers.EmbeddingLayer(self.dict_size, self.emb_dim, "emb")
     left_emb = emb_layer.ops(left)
     right_emb = emb_layer.ops(right)
     # Presentation context
     cnn_layer = layers.SequenceConvPoolLayer(self.filter_size,
                                              self.num_filters, "conv")
     left_cnn = cnn_layer.ops(left_emb)
     right_cnn = cnn_layer.ops(right_emb)
     # matching layer
     if self.task_mode == "pairwise":
         relu_layer = layers.FCLayer(self.hidden_dim, "relu", "relu")
         left_relu = relu_layer.ops(left_cnn)
         right_relu = relu_layer.ops(right_cnn)
         cos_sim_layer = layers.CosSimLayer()
         pred = cos_sim_layer.ops(left_relu, right_relu)
         return left_relu, pred
     else:
         concat_layer = layers.ConcatLayer(1)
         concat = concat_layer.ops([left_cnn, right_cnn])
         relu_layer = layers.FCLayer(self.hidden_dim, "relu", "relu")
         concat_fc = relu_layer.ops(concat)
         softmax_layer = layers.FCLayer(2, "softmax", "cos_sim")
         pred = softmax_layer.ops(concat_fc)
         return left_cnn, pred
Esempio n. 4
0
 def predict(self, left, right):
     """
     Forward network
     """
     # embedding layer
     emb_layer = layers.EmbeddingLayer(self.dict_size, self.emb_dim, "emb")
     left_emb = emb_layer.ops(left)
     right_emb = emb_layer.ops(right)
     # Presentation context
     pool_layer = layers.SequencePoolLayer("sum")
     left_pool = pool_layer.ops(left_emb)
     right_pool = pool_layer.ops(right_emb)
     softsign_layer = layers.SoftsignLayer()
     left_soft = softsign_layer.ops(left_pool)
     right_soft = softsign_layer.ops(right_pool)
     # matching layer
     if self.task_mode == "pairwise":
         bow_layer = layers.FCLayer(self.bow_dim, "relu", "fc")
         print(left.shape)
         left_bow = bow_layer.ops(left_soft)
         print(left_bow.shape)
         right_bow = bow_layer.ops(right_soft)
         cos_sim_layer = layers.CosSimLayer()
         pred = cos_sim_layer.ops(left_bow, right_bow)
         return left_bow, pred
     else:
         concat_layer = layers.ConcatLayer(1)
         concat = concat_layer.ops([left_soft, right_soft])
         bow_layer = layers.FCLayer(self.bow_dim, "relu", "fc")
         concat_fc = bow_layer.ops(concat)
         softmax_layer = layers.FCLayer(2, "softmax", "cos_sim")
         pred = softmax_layer.ops(concat_fc)
         return left_soft, pred
Esempio n. 5
0
 def __init__(self, conf_dict):
     """
     initialize
     """
     super(BOW, self).__init__()
     self.dict_size = conf_dict["dict_size"]
     self.task_mode = conf_dict["task_mode"]
     self.emb_dim = conf_dict["net"]["emb_dim"]
     self.bow_dim = conf_dict["net"]["bow_dim"]
     self.seq_len = conf_dict["seq_len"]
     self.emb_layer = layers.EmbeddingLayer(self.dict_size, self.emb_dim, "emb").ops()
     self.bow_layer = Linear(self.bow_dim, self.bow_dim)
     self.bow_layer_po = layers.FCLayer(self.bow_dim, None, "fc").ops()
     self.softmax_layer = layers.FCLayer(2, "softmax", "cos_sim").ops()
Esempio n. 6
0
 def __init__(self, conf_dict):
     """
     initialize
     """
     super(LSTM,self).__init__()
     self.dict_size = conf_dict["dict_size"]
     self.task_mode = conf_dict["task_mode"]
     self.emb_dim = conf_dict["net"]["emb_dim"]
     self.lstm_dim = conf_dict["net"]["lstm_dim"]
     self.hidden_dim = conf_dict["net"]["hidden_dim"]
     self.emb_layer = layers.EmbeddingLayer(self.dict_size, self.emb_dim, "emb").ops()
     self.lstm_layer = layers.DynamicLSTMLayer(self.lstm_dim, "lstm").ops()
     self.fc_layer = layers.FCLayer(self.hidden_dim, None, "fc").ops()
     self.softmax_layer = layers.FCLayer(2, "softmax", "cos_sim").ops()
     self.proj_layer = Linear(input_dim = self.hidden_dim, output_dim=self.lstm_dim*4)
     self.seq_len = conf_dict["seq_len"]
Esempio n. 7
0
 def __init__(self, conf_dict):
     """
     initialize
     """
     super(CNN, self).__init__()
     self.dict_size = conf_dict["dict_size"]
     self.task_mode = conf_dict["task_mode"]
     self.emb_dim = conf_dict["net"]["emb_dim"]
     self.filter_size = conf_dict["net"]["filter_size"]
     self.num_filters = conf_dict["net"]["num_filters"]
     self.hidden_dim = conf_dict["net"]["hidden_dim"]
     self.seq_len = conf_dict["seq_len"]
     self.channels = 1
     
     # layers
     self.emb_layer = layers.EmbeddingLayer(self.dict_size, self.emb_dim, "emb").ops()
     self.fc_layer = layers.FCLayer(self.hidden_dim, None, "fc").ops()
     self.softmax_layer = layers.FCLayer(2, "softmax", "cos_sim").ops()
     self.cnn_layer = layers.SimpleConvPool(
         self.channels,
         self.num_filters,
         self.filter_size)
Esempio n. 8
0
    def __init__(self, config):
        """
        initialize
        """
        super(MMDNN, self).__init__()

        self.vocab_size = int(config['dict_size'])
        self.emb_size = int(config['net']['embedding_dim'])
        self.lstm_dim = int(config['net']['lstm_dim'])
        self.kernel_size = int(config['net']['num_filters'])
        self.win_size1 = int(config['net']['window_size_left'])
        self.win_size2 = int(config['net']['window_size_right'])
        self.dpool_size1 = int(config['net']['dpool_size_left'])
        self.dpool_size2 = int(config['net']['dpool_size_right'])
        self.hidden_size = int(config['net']['hidden_size'])
        self.seq_len = int(config["seq_len"])
        self.seq_len1 = self.seq_len
        #int(config['max_len_left'])
        self.seq_len2 = self.seq_len
        #int(config['max_len_right'])
        self.task_mode = config['task_mode']
        self.zero_pad = True
        self.scale = False

        if int(config['match_mask']) != 0:
            self.match_mask = True
        else:
            self.match_mask = False

        if self.task_mode == "pointwise":
            self.n_class = int(config['n_class'])
            self.out_size = self.n_class
        elif self.task_mode == "pairwise":
            self.out_size = 1
        else:
            logging.error("training mode not supported")

        # layers
        self.emb_layer = pd_layers.EmbeddingLayer(
            self.vocab_size,
            self.emb_size,
            name="word_embedding",
            padding_idx=(0 if self.zero_pad else None)).ops()
        self.fw_in_proj = Linear(input_dim=self.emb_size,
                                 output_dim=4 * self.lstm_dim,
                                 param_attr=fluid.ParamAttr(name="fw_fc.w"),
                                 bias_attr=False)
        self.lstm_layer = pd_layers.DynamicLSTMLayer(self.lstm_dim,
                                                     "lstm").ops()
        self.rv_in_proj = Linear(input_dim=self.emb_size,
                                 output_dim=4 * self.lstm_dim,
                                 param_attr=fluid.ParamAttr(name="rv_fc.w"),
                                 bias_attr=False)
        self.reverse_layer = pd_layers.DynamicLSTMLayer(self.lstm_dim,
                                                        is_reverse=True).ops()

        self.conv = Conv2D(num_channels=1,
                           num_filters=self.kernel_size,
                           stride=1,
                           padding=(int(self.seq_len1 / 2),
                                    int(self.seq_len2 // 2)),
                           filter_size=(self.seq_len1, self.seq_len2),
                           bias_attr=fluid.ParamAttr(
                               initializer=fluid.initializer.Constant(0.1)))

        self.pool_layer = Pool2D(pool_size=[
            int(self.seq_len1 / self.dpool_size1),
            int(self.seq_len2 / self.dpool_size2)
        ],
                                 pool_stride=[
                                     int(self.seq_len1 / self.dpool_size1),
                                     int(self.seq_len2 / self.dpool_size2)
                                 ],
                                 pool_type="max")
        self.fc_layer = pd_layers.FCLayer(self.hidden_size, "tanh", "fc").ops()
        self.fc1_layer = pd_layers.FCLayer(self.out_size, "softmax",
                                           "fc1").ops()