optimizer = torch.optim.SGD(net.parameters(),lr = Flags.lr, momentum=0.9, nesterov=True) sensitivity_list = [] loss_list = [] epoch_valid = 0 for epoch in range(Flags.nepochs): loss_val = 0 print("\n ...Train at epoch " +str(epoch)) cont_iter = 0 for batch_id, (img1, img2, label) in tqdm(enumerate(trainLoader, 1)): net.train() if Flags.cuda: img1, img2, label = Variable(img1.cuda()), Variable(img2.cuda()), Variable(label.cuda()) else: img1, img2, label = Variable(img1), Variable(img2), Variable(label) optimizer.zero_grad() output = net.forward(img1, img2) loss = loss_MSE (output, label) loss_val += loss.item () optimizer.zero_grad() loss.backward() optimizer.step() cont_iter += 1 loss_epoch = loss_val/cont_iter loss_list.append(loss_epoch) plot_loss(loss_list,save_path) if epoch % Flags.valid_every == 0: net.eval() print("\n ...Valid") sensitivity_valid = [] for _, (valid1, valid2, label_valid) in tqdm(enumerate(validLoader, 1)): if Flags.cuda:
net.cuda() optimizer = torch.optim.Adam(net.parameters(),lr = Flags.lr ) optimizer.zero_grad() loss_val = 0 valid_list = [] for batch_id, (img1, img2, label) in tqdm(enumerate(trainLoader, 1)): print("\n ...Train at epoch " +str(batch_id)) net.train() if batch_id > Flags.max_iter: break if Flags.cuda: img1, img2, label = Variable(img1.cuda()), Variable(img2.cuda()), Variable(label.cuda()) else: img1, img2, label = Variable(img1), Variable(img2), Variable(label) optimizer.zero_grad() output = net.forward(img1, img2) loss = loss_BCE (output, label) loss_val += loss.item () loss.backward() optimizer.step() if batch_id % Flags.valid_every == 0: net.eval() list_err = [] print("\n ...Valid") r, e = 0, 0 for _, (valid1, valid2, label_valid) in tqdm(enumerate(validLoader, 1)): if Flags.cuda: test1, test2 = valid1.cuda(), valid2.cuda() else: test1, test2 = Variable(valid1), Variable(valid2) output = net.forward(test1, test2).data.cpu().numpy()