def forward(self, data, criterion, config, usegpu, acc_result=None): user = data['users'] item = data['music'] label = data['label'] if not self.model == 'MLP': embed_user_GMF = self.embed_user_GMF(user) embed_item_GMF = self.embed_item_GMF(item) output_GMF = embed_user_GMF * embed_item_GMF if not self.model == 'GMF': embed_user_MLP = self.embed_user_MLP(user) embed_item_MLP = self.embed_item_MLP(item) interaction = torch.cat((embed_user_MLP, embed_item_MLP), -1) output_MLP = self.MLP_layers(interaction) if self.model == 'GMF': concat = output_GMF elif self.model == 'MLP': concat = output_MLP else: concat = torch.cat((output_GMF, output_MLP), -1) self.output = self.predict_layer(concat) loss = criterion(self.output, label) accu, accu_result = calc_accuracy(self.output, label, config, acc_result) return { "loss": loss, "accuracy": accu, "result": torch.max(self.output, dim=1)[1].cpu().numpy(), "x": self.output, "accuracy_result": acc_result }
def forward(self, data, criterion, config, usegpu, acc_result=None): x = data['input'] labels = data['label'] x = x.view(x.shape[0], 1, -1, self.data_size) conv_out = [] gram = self.min_gram for conv in self.convs: y = self.relu(conv(x)) y = torch.max(y, dim=2)[0].view(x.shape[0], -1) conv_out.append(y) gram += 1 conv_out = torch.cat(conv_out, dim=1) y = self.fc(conv_out) if self.multi: y = self.sigmoid(y) loss = criterion(y, labels, weights=None) accu, acc_result = calc_accuracy(y, labels, config, None) return { "loss": loss, "accuracy": accu, "result": torch.ge(y, 0.5).cpu().numpy(), "x": y, "accuracy_result": acc_result }
def forward(self, data, criterion, config, usegpu, acc_result=None): x = data['input'] labels = data['label'] x = x.view(x.shape[0], 1, -1, self.data_size) conv_out = [] gram = self.min_gram for conv in self.convs: y = self.relu(conv(x)) conv_out.append(y) gram += 1 conv_out = torch.cat(conv_out, dim=1).squeeze(3).permute(0, 2, 1) #attention attn = nn.Softmax(dim = 1)(self.fc1(conv_out)).permute(0, 2, 1) attn_out = self.fc2(torch.bmm(attn, conv_out)).squeeze(2) y = self.sigmoid(attn_out) loss = criterion(y, labels, weights = None) accu, acc_result = calc_accuracy(y, labels, config, None) return {"loss": loss, "accuracy": accu, "result": torch.ge(y, 0.5).cpu().numpy(), "x": y, "accuracy_result": acc_result}
def forward(self, data, criterion, config, usegpu, acc_result=None): # print(data.keys()) description = data['description'] skills = data['skills'] label = data['label'] work = data['work_history'] desc = self.embs(description) # print(desc) # skills = self.skill_encoder(skills) work = work.view(work.shape[0] * self.work_num, work.shape[2]) work = self.embs(work) work, _ = self.work_encoder(work) work, _ = torch.max(work, dim=1) # batch_size * work_num, hidden_size work = work.view(desc.shape[0], self.work_num, config.getint('model', 'hidden_size')) desc, _ = self.desc_encoder(desc) skills = self.skill_encoder(skills, desc, work) desc = torch.max(desc, dim=1)[0] #print('desc', desc.shape) feature = torch.cat([desc, skills], dim=1) y = self.fc(feature) loss = criterion(y, label) accu, acc_result = calc_accuracy(y, label, config, acc_result) return { "loss": loss, "accuracy": accu, "result": torch.max(y, dim=1)[1].cpu().numpy(), "x": y, "accuracy_result": acc_result }
def forward(self, data, criterion, config, usegpu, mode="train"): x = data['input'] labels = data['label'] y, _ = self.bert(x, output_all_encoded_layers=False) y = y.view(y.size()[0], -1) y = self.fc(y) if self.multi: y = self.sigmoid(y) loss = criterion(y, labels, weights = None) accu, acc_result = calc_accuracy(y, labels, config) return {"loss": loss, "accuracy": accu, "result": torch.max(y, dim=1)[1].cpu().numpy(), "x": y, "accuracy_result": acc_result}
def forward(self, data, criterion, config, usegpu, acc_result = None): user = data['users'] music = data['music'] label = data['label'] self.emb = self.field_encoder(user,music) # None*(F*K) self.emb = self.emb.reshape((-1,self.field_num,self.field_size)) # None *F *K # # -----------linear part ------------------------ self.y_first_order= torch.sum(self.emb,dim=2) self.y_first_order= self.dropout(self.y_first_order) # # -----------CIN part---------------------------- y_cin=self.CIN(self.emb) # # -----------DNN part---------------------------- self.y_deep=self.emb.reshape((-1,self.field_num*self.field_size)) self.y_deep=self.dropout(self.y_deep) self.y_deep=self.mlp(self.y_deep) # print("self.y_first_order,self.y_cin,self.y_deep:{},{},{}".format(self.y_first_order.shape,self.y_cin.shape,self.y_deep.shape)) self.output=torch.cat([self.y_first_order,y_cin,self.y_deep],dim=1) self.output=self.final(self.output) loss = criterion(self.output, label) accu, accu_result = calc_accuracy(self.output, label, config, acc_result) return {"loss": loss, "accuracy": accu, "result": torch.max(self.output, dim=1)[1].cpu().numpy(), "x": self.output, "accuracy_result": acc_result}
def forward(self, data, criterion, config, usegpu, acc_result = None): users = data['users'] candidate = data['candidate'] history = data['history'] labels = data['label'] score = data['score'] candidate, cweight = self.encoder(candidate, users) batch = labels.shape[0] k = history['id'].shape[1] for key in history: history[key] = history[key].view(batch * k, -1) history, hweight = self.encoder(history, users, True) history = history.view(batch, k, -1) interest, similarity = self.relation(history, candidate, score) # similarity = torch.mean(similarity, dim = 1).unsqueeze(1) similarity = torch.max(similarity, dim = 1)[0].unsqueeze(1) y1 = torch.cat([1 - similarity, similarity], dim = 1) y2 = self.out(torch.cat([interest, candidate], dim = 1)) # y2 = self.out(candidate) loss = criterion(y2, labels) + criterion(y1, labels)#+ self.relu(torch.mean(hweight) - 0.7) # - torch.mean(torch.log(torch.max(hweight.squeeze(), dim = 1)[0])) # loss = criterion(y, labels) - torch.mean(torch.log(torch.max(hweight.squeeze(), dim = 1)[0])) y = torch.softmax(y1, dim = 1) + torch.softmax(y2, dim = 1) y = y * 0.5 accu, acc_result = calc_accuracy(y, labels, config, acc_result) return {"loss": loss, "accuracy": accu, "result": torch.max(y, dim=1)[1].cpu().numpy(), "x": y, "accuracy_result": acc_result}
def forward(self, data, criterion, config, usegpu, acc_result=None): user = data['users'] music = data['music'] label = data['label'] user = self.user_encoder(user) music = self.music_encoder(music) s = user * music out_result = self.out(s) s = s.matmul(torch.transpose(self.memory, 0, 1)) s = torch.softmax(s, dim=1) rel = s.matmul(self.memory) score = user + rel - music #print(score.shape) score = torch.norm(score, dim=1) mask = (2 * label - 1).float() #print(mask.shape) #print(score.shape) loss = torch.mean(mask * score) # + criterion(out_result, label) accu, accu_result = calc_accuracy(out_result, label, config, acc_result) return { "loss": loss, "accuracy": accu, "result": torch.max(out_result, dim=1)[1].cpu().numpy(), "x": out_result, "accuracy_result": acc_result }
def forward(self, data, criterion, config, usegpu, acc_result=None): x = data["input"] labels = data["label"] x = x.view(config.getint("train", "batch_size"), -1, self.data_size) self.init_hidden(config, usegpu) self.hidden = self.transpose(self.hidden) lstm_out, self.hidden = self.lstm(x, self.hidden) lstm_out = torch.max(lstm_out, dim=1)[0] y = self.fc(lstm_out) loss = criterion(y, labels) accu, acc_result = calc_accuracy(y, labels, config, acc_result) return { "loss": loss, "accuracy": accu, "result": torch.max(y, dim=1)[1].cpu().numpy(), "x": y, "accuracy_result": acc_result }
def forward(self, data, criterion, config, usegpu, acc_result=None): user = data['users'] music = data['music'] label = data['label'] self.emb = self.field_encoder(user, music) # None*(F*K) self.emb = self.emb.reshape( (-1, self.field_num, self.field_size)) # None *F *K # # -----------FM part------------------------------ # ------------first order term--------- self.y_first_order = torch.sum(self.emb, dim=2) self.y_first_order = self.dropout(self.y_first_order) # ---------- second order term --------------- summed = torch.sum(self.emb, dim=1) self.summed_square = torch.mul(summed, summed) #None *K self.squared_sum = torch.sum(torch.mul(self.emb, self.emb), dim=1) #None *K self.y_second_order = 0.5 * (self.summed_square - self.squared_sum ) # None*K self.y_second_order = self.dropout(self.y_second_order) # # -----------DNN part---------------------------- self.y_deep = self.emb.reshape((-1, self.field_num * self.field_size)) self.y_deep = self.dropout(self.y_deep) self.y_deep = self.mlp(self.y_deep) self.output = torch.cat( [self.y_first_order, self.y_second_order, self.y_deep], dim=1) self.output = self.final(self.output) loss = criterion(self.output, label) accu, accu_result = calc_accuracy(self.output, label, config, acc_result) return { "loss": loss, "accuracy": accu, "result": torch.max(self.output, dim=1)[1].cpu().numpy(), "x": self.output, "accuracy_result": acc_result }