def forward(self, input_data): q_emb_var, q_len, hs_emb_var, hs_len, col_emb_var, col_len, col_name_len, gt_col = input_data B = len(q_len) if self.use_bert: q_enc = self.q_bert(q_emb_var, q_len) else: q_enc, _ = run_lstm(self.q_lstm, q_emb_var, q_len) hs_enc, _ = run_lstm(self.hs_lstm, hs_emb_var, hs_len) col_enc, _ = col_tab_name_encode(col_emb_var, col_name_len, col_len, self.col_lstm) # get target/predicted column's embedding # col_emb: (B, hid_dim) col_emb = [] for b in range(B): col_emb.append(col_enc[b, gt_col[b]]) col_emb = torch.stack(col_emb) q_weighted = plain_conditional_weighted_num(self.q_att, q_enc, q_len, col_emb) hs_weighted = plain_conditional_weighted_num(self.hs_att, hs_enc, hs_len, col_emb) hv_score = self.hv_out( self.hv_out_q(q_weighted) + int(self.use_hs) * self.hv_out_hs(hs_weighted) + self.hv_out_c(col_emb)) SIZE_CHECK(hv_score, [B, 2]) return hv_score
def forward(self, input_data): q_emb_var, q_len, hs_emb_var, hs_len, col_emb_var, col_len, col_name_len, gt_col = input_data B = len(q_len) if self.use_bert: q_enc = self.q_bert(q_emb_var, q_len) else: q_enc, _ = run_lstm(self.q_lstm, q_emb_var, q_len) hs_enc, _ = run_lstm(self.hs_lstm, hs_emb_var, hs_len) col_enc, _ = col_tab_name_encode(col_emb_var, col_name_len, col_len, self.col_lstm) # get target/predicted column's embedding # col_emb: (B, hid_dim) col_emb = [] for b in range(B): col_emb.append(col_enc[b, gt_col[b]]) col_emb = torch.stack(col_emb) # [B, dim] q_weighted = plain_conditional_weighted_num(self.q_att, q_enc, q_len, col_emb) # Same as the above, compute SQL history embedding weighted by column attentions hs_weighted = plain_conditional_weighted_num(self.hs_att, hs_enc, hs_len, col_emb) # dat_score: (B, 4) dat_score = self.dat_out(self.dat_out_q(q_weighted) + int(self.use_hs)* self.dat_out_hs(hs_weighted) + self.dat_out_c(col_emb)) return dat_score
def forward(self, input_data): q_emb_var, q_len, hs_emb_var, hs_len, col_emb_var, col_len, col_name_len, gt_col = input_data B = len(q_len) if self.use_bert: q_enc = self.q_bert(q_emb_var, q_len) else: q_enc, _ = run_lstm(self.q_lstm, q_emb_var, q_len) hs_enc, _ = run_lstm(self.hs_lstm, hs_emb_var, hs_len) col_enc, _ = col_tab_name_encode(col_emb_var, col_name_len, col_len, self.col_lstm) # get target/predicted column's embedding # col_emb: (B, hid_dim) col_emb = [] for b in range(B): col_emb.append(col_enc[b, gt_col[b]]) col_emb = torch.stack(col_emb) # Predict op number q_weighted_num = plain_conditional_weighted_num( self.q_num_att, q_enc, q_len, col_emb) # Same as the above, compute SQL history embedding weighted by column attentions hs_weighted_num = plain_conditional_weighted_num( self.hs_num_att, hs_enc, hs_len, col_emb) # op_num_score: (B, 2) op_num_score = self.op_num_out( self.op_num_out_q(q_weighted_num) + int(self.use_hs) * self.op_num_out_hs(hs_weighted_num) + self.op_num_out_c(col_emb)) SIZE_CHECK(op_num_score, [B, 2]) # Compute attention values between selected column and question tokens. q_weighted = plain_conditional_weighted_num(self.q_att, q_enc, q_len, col_emb) # Same as the above, compute SQL history embedding weighted by column attentions hs_weighted = plain_conditional_weighted_num(self.hs_att, hs_enc, hs_len, col_emb) # Compute prediction scores # op_score: (B, 10) op_score = self.op_out( self.op_out_q(q_weighted) + int(self.use_hs) * self.op_out_hs(hs_weighted) + self.op_out_c(col_emb)) SIZE_CHECK(op_score, [B, 11]) score = (op_num_score, op_score) return score
def forward(self, input_data): q_emb_var, q_len, hs_emb_var, hs_len, col_emb_var, col_len, col_name_len, gt_col = input_data B = len(q_len) if self.use_bert: q_enc = self.q_bert(q_emb_var, q_len) else: q_enc, _ = run_lstm(self.q_lstm, q_emb_var, q_len) hs_enc, _ = run_lstm(self.hs_lstm, hs_emb_var, hs_len) col_enc, _ = col_tab_name_encode(col_emb_var, col_name_len, col_len, self.col_lstm) col_emb = [] for b in range(B): col_emb.append(col_enc[b, gt_col[b]]) col_emb = torch.stack(col_emb) # Predict agg number q_weighted_num = plain_conditional_weighted_num( self.q_num_att, q_enc, q_len, col_emb) # Same as the above, compute SQL history embedding weighted by column attentions hs_weighted_num = plain_conditional_weighted_num( self.hs_num_att, hs_enc, hs_len, col_emb) agg_num_score = self.agg_num_out( self.agg_num_out_q(q_weighted_num) + int(self.use_hs) * self.agg_num_out_hs(hs_weighted_num) + self.agg_num_out_c(col_emb)) SIZE_CHECK(agg_num_score, [B, 4]) # Predict aggregators q_weighted = plain_conditional_weighted_num(self.q_att, q_enc, q_len, col_emb) # Same as the above, compute SQL history embedding weighted by column attentions hs_weighted = plain_conditional_weighted_num(self.hs_att, hs_enc, hs_len, col_emb) # agg_score: (B, 5) agg_score = self.agg_out( self.agg_out_q(q_weighted) + int(self.use_hs) * self.agg_out_hs(hs_weighted) + self.agg_out_c(col_emb)) score = (agg_num_score, agg_score) return score