def val_epoc(tst,model,criterion): model.eval loss = 0 acc = 0 for text, offsets, clss in tst: text, offsets, clss = text.long().to(device), offsets.to(device), clss.to(device) with torch.no_grad(): output = model(text, offsets) loss = criterion(output, clss) loss += loss.item() acc += (output.argmax(1) == clss).sum().item() return loss , acc
def val_epoc(tst,model,criterion): model.eval() loss = 0 acc = 0 for text_ap, offsets_ap, text_cid,offsets_cid, clss in tst: text_ap, text_cid=text_ap.long().to(device),text_cid.long().to(device) offsets_ap, offsets_cid, clss = offsets_ap.to(device),offsets_cid.to(device), clss.to(device) with torch.no_grad(): output = model(text_ap, offsets_ap,text_cid, offsets_cid) loss = criterion(output, clss) loss += loss.item() acc += (output.argmax(1) == clss).sum().item() return loss , acc
def train_epoc(trn,model,criterion,optimizer,scheduler): # Train the model model.train() train_loss = 0 train_acc = 0 for text_ap, offsets_ap, text_cid,offsets_cid, clss in tqdm(trn): optimizer.zero_grad() text_ap, text_cid=text_ap.long().to(device),text_cid.long().to(device) offsets_ap, offsets_cid, clss = offsets_ap.to(device),offsets_cid.to(device), clss.to(device) output = model(text_ap, offsets_ap,text_cid, offsets_cid) loss = criterion(output, clss) train_loss += loss.item() loss.backward() optimizer.step() train_acc += (output.argmax(1) == clss).sum().item() # Adjust the learning rate scheduler.step() return train_loss , train_acc