def test_model(train_i, model_path, chose, isvalidate, istrain=False): ''' root = '/home/wangke/ultrasound_data163/' img_labs = ['process_pic_163/', 'process_lab_163/'] model_path = 'D205_Res34_unet_res1_0.pkl' chose = 'D205' ''' batchsize = 1 i = train_i if chose == 'D205': root = '/home/wangke/ultrasound_data163/' img_labs = ['process_pic_163/', 'process_lab_163/'] # txt_test = 'D163N5fold'+str(train_i+1)+'_test.csv' txt_test = 'all_data.csv' # net_path = 'models/saved/D205/'+model_path excel_path = "models/pre_out/D205/" + model_path[:-4] + ".xlsx" save_picpath = 'models/pre_out/D205/pre_pic/' if chose == 'D23': root = '/home/wangke/ultrasound_data2/' img_labs = ['fuse_data_pic/', 'fuse_data_lab/'] txt_test = 'FuseDatafold' + str( train_i + 1) + '_test.csv' if isvalidate else 'fuse_data.csv' if istrain: txt_test = 'FuseDatafold' + str(train_i + 1) + '_train.csv' excel_path = "models/pre_out/D2/" + model_path[: -4] + "vali.xlsx" if istrain else "models/pre_out/D2/" + model_path[: -4] + "train.xlsx" save_picpath = 'models/pre_out/D2/pre_pic/' if chose == 'D4': root = '/home/wangke/ultrasound_data4/' img_labs = ['data_pic/', 'data_lab/'] # img_labs = ['process_pic/', 'process_lab/'] # txt_test = 'N5fold'+str(train_i+1)+'_test.csv' txt_test = 'all_data2.csv' # net_path = 'models/saved/D3/'+model_path excel_path = "models/pre_out/D4/" + model_path[:-4] + "test.xlsx" save_picpath = 'models/pre_out/D4/pre_pic/' net_path = 'models/saved/' + model_path.split( '_')[0][:2] + '/' + model_path # net_path = 'models/saved/D2/'+model_path net = torch.load(net_path) net.eval() # txt_test = 'D163N5fold'+str(train_i+1)+'_test.csv' file_list = pd.read_csv(root + txt_test, sep=',', usecols=[1]).values.tolist() file_list = [i[0] for i in file_list] trans_tensor = transforms.ToTensor() dice_all = [] miou_all = [] sen_all = [] ppv_all = [] spe_all = [] f1s_all = [] acc_all = [] outputs = [] for file_i in file_list: img = cv2.imread(root + img_labs[0] + file_i, cv2.IMREAD_GRAYSCALE) # img3 = cv2.equalizeHist(img).astype('float32') img = img.astype('float32') img /= 255. img2 = np.exp(-((img - 0.5) * (img - 0.5)) / (2 * np.std(img) * np.std(img))) # img = (img-0.26)/0.14 img = np.array([img, img2]) img = img.transpose(1, 2, 0) img_tensor = trans_tensor(img) img_tensor = img_tensor.unsqueeze(0) img_tensor = V(img_tensor.cuda()) lab = cv2.imread(root + img_labs[1] + file_i, cv2.IMREAD_GRAYSCALE) lab = lab.astype('float32') lab /= 255. lab_tensor = trans_tensor(lab) lab_tensor = lab_tensor.unsqueeze(0) # lab_tensor = V(lab_tensor.cuda()) with torch.no_grad(): out_final2, out_res, out_final = net(img_tensor) out = out_final.cpu().numpy() oredr = file_i[:-4] + model_path.split('_')[-1][:-4] # print(out.shape) out[out > 0.5] = 1 out[out <= 0.5] = 0 plt.imsave(save_picpath + oredr + '.png', out[0, 0, :, :], cmap='gray') # 计算结果 # loss = bce_loss(pre_map, F.interpolate(lab_tensor, size=[128, 128], mode='nearest')) + dice_loss(lab_tensor, pre_map) out_final[out_final > 0.5] = 1 out_final[out_final <= 0.5] = 0 dice_score = float(dice_coeff(lab_tensor, out_final.cpu(), False)) sen_score = float(sensitive(lab_tensor, out_final.cpu())) ppv_score = float(precision(lab_tensor, out_final.cpu())) acc_score = float(accuracy(lab_tensor, out_final.cpu())) spe_score = float(specificity(lab_tensor, out_final.cpu())) miou_score = float(m_iou(lab_tensor, out_final.cpu(), False)) ff1_score = float(f1_score(lab_tensor, out_final.cpu())) # print(float(dice_score)) # print((file_i, dice_score, miou_score, sen_score, ppv_score, acc_score, ff1_score, spe_score, 'fold_'+str(i+1)), ', /') outputs.append((file_i, dice_score, miou_score, sen_score, ppv_score, acc_score, ff1_score, spe_score, 'fold_' + str(i + 1))) dice_all.append(dice_score) miou_all.append(miou_score) sen_all.append(sen_score) ppv_all.append(ppv_score) acc_all.append(acc_score) f1s_all.append(ff1_score) spe_all.append(spe_score) dice_aver = sum(dice_all) / len(dice_all) miou_aver = sum(miou_all) / len(miou_all) sen_aver = sum(sen_all) / len(sen_all) ppv_aver = sum(ppv_all) / len(ppv_all) acc_aver = sum(acc_all) / len(acc_all) f1_aver = sum(f1s_all) / len(f1s_all) spe_aver = sum(spe_all) / len(spe_all) print('dice_score', dice_aver, 'miou_score', miou_aver, 'sen_aver', sen_aver, 'ppv_aver', ppv_aver, 'acc_aver', acc_aver, 'f1_aver', f1_aver, 'spe_aver', spe_aver) df = pd.DataFrame(outputs, columns=[ 'order', 'dice', 'miou', 'sen', 'ppv', 'acc', 'f1', 'spe', 'fold' ]) df.to_excel(excel_path, index=False)
def train(train_i=0): NAME = 'D2F5_fold'+str(train_i+1)+'_FPN.th' print(NAME) batchsize = 4 txt_train = 'N5fold'+str(train_i+1)+'_train.csv' txt_test = 'N5fold'+str(train_i+1)+'_test.csv' dataset_train = MyDataset(root='/home/wangke/ultrasound_data2/', txt_path=txt_train, transform=transforms.ToTensor(), target_transform=transforms.ToTensor()) dataset_test = MyDataset(root='/home/wangke/ultrasound_data2/', txt_path=txt_test, transform=transforms.ToTensor(), target_transform=transforms.ToTensor()) train_loader = torch.utils.data.DataLoader(dataset_train, batch_size=batchsize, shuffle=True, num_workers=2) test_loader = torch.utils.data.DataLoader(dataset_test, batch_size=batchsize, shuffle=False, num_workers=2, drop_last=True) mylog = open('logs/'+NAME+'.log', 'w') # model = FPN_Net(1, 1) # summary(model) slover = MyFrame(FPN_Net, dice_bce_loss, 2e-4) total_epoch = 100 no_optim = 0 train_epoch_best_loss = 10000 best_test_score = 0 for epoch in range(1, total_epoch+1): data_loader_iter = iter(train_loader) data_loader_test = iter(test_loader) train_epoch_loss = 0 index = 0 tic = time() train_score = 0 for img, mask in data_loader_iter: slover.set_input(img, mask) train_loss, pred = slover.optimize() train_score += dice_coeff(mask, pred.cpu().data, False) train_epoch_loss +=train_loss index +=1 test_sen = 0 test_ppv = 0 test_score = 0 test_acc = 0 test_spe = 0 test_f1s = 0 for img, mask in data_loader_test: slover.set_input(img, mask) pre_mask, _ = slover.test_batch() test_score += dice_coeff(mask, pre_mask, False) test_sen += sensitive(mask, pre_mask) test_ppv += precision(mask, pre_mask) test_acc += accuracy(mask, pre_mask) test_spe += specificity(mask, pre_mask) test_f1s += f1_score(mask, pre_mask) test_sen /= len(data_loader_test) test_ppv /= len(data_loader_test) test_score /= len(data_loader_test) test_acc /= len(data_loader_test) test_spe /= len(data_loader_test) test_f1s /= len(data_loader_test) if test_score>best_test_score: print('1. the dice score up to ', test_score, 'from ', best_test_score, 'saving the model', file=mylog, flush=True) print('1. the dice score up to ', test_score, 'from ', best_test_score, 'saving the model') best_test_score = test_score slover.save('./weights/'+NAME+'.th') train_epoch_loss = train_epoch_loss/len(data_loader_iter) train_score = train_score/len(data_loader_iter) print('epoch:', epoch, ' time:', int(time() - tic), 'train_loss:', train_epoch_loss.cpu().data.numpy(), 'train_score:', train_score, file=mylog, flush=True) print('test_dice_loss: ', test_score, 'test_sen: ', test_sen.numpy(), 'test_ppv: ', test_ppv.numpy(), 'test_acc: ', test_acc.numpy(), 'test_spe: ', test_spe.numpy(), 'test_f1s: ', test_f1s.numpy(), 'best_score is ', best_test_score, file=mylog, flush=True) print('********') print('epoch:', epoch, ' time:', int(time() - tic), 'train_loss:', train_epoch_loss.cpu().data.numpy(), 'train_score:', train_score) print('test_dice_loss: ', test_score, 'test_sen: ', test_sen.numpy(), 'test_ppv: ', test_ppv.numpy(), 'test_acc: ', test_acc.numpy(), 'test_spe: ', test_spe.numpy(), 'test_f1s: ', test_f1s.numpy(), 'best_score is ', best_test_score) if train_epoch_loss >= train_epoch_best_loss: no_optim +=1 else: no_optim =0 train_epoch_best_loss = train_epoch_loss print('Finish!', file=mylog, flush=True) print('Finish!') mylog.close()
def train_model(train_i, data_i, threshold, order, test_data1, test_data2): # data_i for training and validation, test_data for testing, 测试的话此数据集就全部用作测试,用all_data batchsize = 16 i = train_i # net = _Res34_unet().cuda() net = _Reback_v2().cuda() # net = _nonlocal_unet().cuda() netname = "res" if data_i == -1: epoch_num = 100 txt_train = 'FuseDatafold' + str(train_i + 1) + '_train.csv' NAME = 'D2/D23_Res34_unet_' + netname + str(i + 1) + '_' + str(order) # NAME = 'D2/D23_Res34_unet_'+netname+str(i+1)+'_'+str(order)+'1' NAME2 = 'D2/inter_pic/' print(NAME) dataset_train = MyDataset( root='/home/wangke/ultrasound_data2/', txt_path=txt_train, lab_pics=['fuse_data_pic/', 'fuse_data_lab/', 'inter_pic/'], istrain=True, transform=transforms.ToTensor(), target_transform=transforms.ToTensor(), pre_num=0) txt_validate = 'FuseDatafold' + str(train_i + 1) + '_test.csv' dataset_validate = MyDataset( root='/home/wangke/ultrasound_data2/', txt_path=txt_validate, lab_pics=['fuse_data_pic/', 'fuse_data_lab/', 'inter_pic/'], transform=transforms.ToTensor(), target_transform=transforms.ToTensor(), pre_num=0) if test_data1 == 205: txt_test = 'all_data.csv' dataset_test1 = MyDataset( root='/home/wangke/ultrasound_data163/', txt_path=txt_test, lab_pics=['process_pic_163/', 'process_lab_163/'], transform=transforms.ToTensor(), target_transform=transforms.ToTensor(), pre_num=0) if test_data2 == 205: txt_test = 'all_data.csv' dataset_test2 = MyDataset( root='/home/wangke/ultrasound_data163/', txt_path=txt_test, lab_pics=['process_pic_163/', 'process_lab_163/'], transform=transforms.ToTensor(), target_transform=transforms.ToTensor(), pre_num=0) if test_data1 == 4: txt_test = 'all_data2.csv' dataset_test1 = MyDataset(root='/home/wangke/ultrasound_data4/', txt_path=txt_test, lab_pics=['data_pic/', 'data_lab/'], transform=transforms.ToTensor(), target_transform=transforms.ToTensor(), pre_num=0) if test_data2 == 4: txt_test = 'all_data2.csv' dataset_test2 = MyDataset(root='/home/wangke/ultrasound_data4/', txt_path=txt_test, lab_pics=['data_pic/', 'data_lab/'], transform=transforms.ToTensor(), target_transform=transforms.ToTensor(), pre_num=0) train_loader = torch.utils.data.DataLoader(dataset_train, batch_size=batchsize, shuffle=True, num_workers=4, pin_memory=True) validate_loader = torch.utils.data.DataLoader(dataset_validate, batch_size=batchsize, shuffle=False, num_workers=4, pin_memory=True) test1_loader = torch.utils.data.DataLoader(dataset_test1, batch_size=batchsize, shuffle=False, num_workers=4, pin_memory=True) test2_loader = torch.utils.data.DataLoader(dataset_test2, batch_size=batchsize, shuffle=False, num_workers=4, pin_memory=True) num_no = 0 max_numno = 5 mylog = open('models/saved/' + NAME + '.log', 'w') total_epoch = epoch_num optimizer = torch.optim.Adam(params=net.parameters(), lr=1e-4, amsgrad=True, eps=1e-8) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1.0, gamma=0.95) best_validate_score = 0 best_validate_loss = 999 # loss_loss = LovaszHingeLoss() # loss_loss = dice_bce_loss2(batch=False) loss_loss = iou_loss2(batch=False) bce_loss = nn.BCELoss(size_average=True) save_mid = 0 # ssim_loss = SSIM(window_size=11, size_average=True) train_loss = [] test_loss2 = [] test2_loss2 = [] validate_loss2 = [] train_dice = [] test_dice = [] test2_dice = [] validate_dice = [] train_miou = [] test_miou = [] test2_miou = [] validate_miou = [] train_sc = 0 for epoch in range(1, total_epoch + 1): total_loss = 0 loss_final_all = 0 data_loader_iter = iter(train_loader) data_loader_validate = iter(validate_loader) data_loader_test = iter(test1_loader) data_loader_test2 = iter(test2_loader) tic = time() train_score = 0 train_miou_b = 0 isneedres = False net.train() for img, mask, id_img in data_loader_iter: # print(id_img) img = V(img.cuda(), requires_grad=False) mask_v = V(mask.cuda(), requires_grad=False) # res_v = V(res.cuda(), requires_grad=False) optimizer.zero_grad() # out_final, out_res = net(img, isneedres) # out_final, out_res = net(img) out_final2, out_res, out_final = net(img) # loss_all = dice_loss(mask_v, out_final) loss_all = loss_loss(mask_v, out_final) loss_step = loss_all loss_step += loss_loss(mask_v, out_final2) # print(train_sc-threshold) # # if train_sc>threshold: res_lab = torch.abs(torch.add(mask_v, torch.neg(out_final))) res_lab = res_lab.detach() # if save_mid==1: # for i, id_i in enumerate(id_img): # cv2.imwrite('models/saved/'+NAME2+id_i, res_lab[i,0,:,:].cpu().data.numpy()*255) # # plt.imsave('models/saved/'+NAME2+id_i+'.png', res_lab[i,0,:,:].cpu().data.numpy(), cmap='gray') loss_res = bce_loss( F.interpolate(out_res, size=(256, 256), mode='bilinear'), res_lab) # if loss_res<0.01:isneedres=True loss_step += loss_res # loss_all, loss_final = compute_loss(out_loss, out_final, mask_v) loss_step.backward() optimizer.step() total_loss += loss_all # loss_final_all += loss_final train_score += dice_coeff(mask, out_final.cpu().data, False) train_miou_b += m_iou(mask, out_final.cpu().data, False) total_loss = total_loss.cpu().data.numpy() / len(data_loader_iter) train_score = train_score / len(data_loader_iter) train_miou_b = train_miou_b / len(data_loader_iter) test_score = 0 test_loss = 0 test_miou_b = 0 test2_score = 0 test2_loss = 0 test2_miou_b = 0 test_final_loss = 0 validate_score = 0 validate_loss = 0 validate_miou_b = 0 net.eval() with torch.no_grad(): for img, mask, id_img in data_loader_test: img = V(img.cuda(), requires_grad=False) mask_v = V(mask.cuda(), requires_grad=False) # out_final, out_res = net(img, isneedres) # out_final, out_res = net(img) out_final2, out_res, out_final = net(img) loss_all = loss_loss(mask_v, out_final) # loss_all += loss_loss(mask_v, out_1) test_loss += loss_all test_score += dice_coeff(mask, out_final.cpu().data, False) test_miou_b += m_iou(mask, out_final.cpu().data, False) for img, mask, id_img in data_loader_test2: img = V(img.cuda(), requires_grad=False) mask_v = V(mask.cuda(), requires_grad=False) # out_final, out_res = net(img, isneedres) # out_final, out_res = net(img) out_final2, out_res, out_final = net(img) # loss_all += loss_loss(mask_v, out_1) loss_all = loss_loss(mask_v, out_final) test2_loss += loss_all test2_score += dice_coeff(mask, out_final.cpu().data, False) test2_miou_b += m_iou(mask, out_final.cpu().data, False) for img, mask, id_img in data_loader_validate: img = V(img.cuda(), requires_grad=False) mask_v = V(mask.cuda(), requires_grad=False) # res_v = V(res.cuda(), requires_grad=False) # out_final, out_res = net(img, isneedres) # out_final, out_res = net(img) out_final2, out_res, out_final = net(img) loss_all = loss_loss(mask_v, out_final) # if train_sc>threshold: # # if save_mid==1: # # for i, id_i in enumerate(id_img): # # # plt.imsave('models/saved/'+NAME2+id_i+'.png', res_lab[i,0,:,:].cpu().data.numpy(), cmap='gray') # # cv2.imwrite('models/saved/'+NAME2+id_i, res_lab[i,0,:,:].cpu().data.numpy()*255) # res_lab = torch.abs(torch.add(mask_v, torch.neg(out_final))) # loss_res = bce_loss(out_res, res_lab) # loss_all += loss_res validate_loss += loss_all validate_score += dice_coeff(mask, out_final.cpu().data, False) validate_miou_b += m_iou(mask, out_final.cpu().data, False) validate_score = validate_score / len(data_loader_validate) validate_loss = validate_loss.cpu().data.numpy() / len( data_loader_validate) validate_miou_b = validate_miou_b / len(data_loader_validate) test_score = test_score / len(data_loader_test) test_loss = test_loss.cpu().data.numpy() / len(data_loader_test) test_miou_b = test_miou_b / len(data_loader_test) test2_score = test2_score / len(data_loader_test2) test2_loss = test2_loss.cpu().data.numpy() / len(data_loader_test2) test2_miou_b = test2_miou_b / len(data_loader_test2) train_sc = validate_score if train_sc > threshold: # if save_mid==2: save_mid=-1 save_mid += 1 # print(train_sc, save_mid) train_loss.append(total_loss) test_loss2.append(test_loss) test2_loss2.append(test2_loss) validate_loss2.append(validate_loss) train_dice.append(train_score) test_dice.append(test_score) test2_dice.append(test2_score) validate_dice.append(validate_score) train_miou.append(train_miou_b) test_miou.append(test_miou_b) test2_miou.append(test2_miou_b) validate_miou.append(validate_miou_b) scheduler.step() # if validate_score>best_test_score: # best_validate_score = validate_score # torch.save(net, 'models/saved/'+NAME+'.pkl') # print('saved, ', best_validate_score, file=mylog, flush=True) # print('saved, ', best_validate_score) if validate_loss < best_validate_loss: best_validate_loss = validate_loss torch.save(net, 'models/saved/' + NAME + '.pkl') print('saved, ', best_validate_loss, file=mylog, flush=True) print('saved, ', best_validate_loss) num_no = 0 else: num_no += 1 if num_no >= max_numno: num_no = 0 net = torch.load('models/saved/' + NAME + '.pkl') print('loaded, ', best_validate_loss, file=mylog, flush=True) print('loaded, ', best_validate_loss) print('********', file=mylog, flush=True) print('epoch:', epoch, ' time:', int(time() - tic), 'train_loss:', total_loss, 'train_score:', train_score, 'train_miou:', train_miou_b, 'validate_loss:', validate_loss, 'validate_score:', validate_score, 'validate_miou:', validate_miou_b, 'test1_loss:', test_loss, 'test1_score:', test_score, 'test1_miou:', test_miou_b, 'test2_loss:', test2_loss, 'test2_score:', test2_score, 'test2_miou:', test2_miou_b, file=mylog, flush=True) print('********') print('epoch:', epoch, ' time:', int(time() - tic), 'train_loss:', total_loss, 'train_score:', train_score, 'train_miou:', train_miou_b, 'validate_loss:', validate_loss, 'validate_score:', validate_score, 'validate_miou:', validate_miou_b, 'test1_loss:', test_loss, 'test1_score:', test_score, 'test1_miou:', test_miou_b, 'test2_loss:', test2_loss, 'test2_score:', test2_score, 'test2_miou:', test2_miou_b) # plot # loss plt.figure() epochs = range(total_epoch) plt.plot(epochs, train_loss, 'b', label='train_loss') plt.plot(epochs, validate_loss2, 'c', label='validate_loss') plt.plot(epochs, test_loss2, 'r', label='test1_loss') plt.plot(epochs, test2_loss2, 'g', label='test2_loss') plt.xlabel('epoch') plt.ylabel('loss') plt.legend(loc="upper right") plt.savefig('models/saved/' + NAME + '_loss.png') plt.figure() plt.plot(epochs, train_dice, 'b', label='train_dice') plt.plot(epochs, validate_dice, 'c', label='validate_dice') plt.plot(epochs, test_dice, 'r', label='test1_dice') plt.plot(epochs, test2_dice, 'g', label='test2_dice') plt.xlabel('epoch') plt.ylabel('dice') plt.legend(loc="lower right") plt.savefig('models/saved/' + NAME + '_dice.png')
def fpn_Net_Train(train_i=0): NAME = 'fold'+str(train_i+1)+'3fpn-Net' model = FPN_Net(1, 1).cuda() model = torch.nn.DataParallel(model, device_ids=range(torch.cuda.device_count())) # model = FPN_Net(1, 1) # print(model) folds = data2() batch_size = 4 (x_train, y_train), (x_test, y_test) = load_numpy(folds, train_i) dataset = torch.utils.data.TensorDataset(x_train, y_train) data_loader = torch.utils.data.DataLoader( dataset, batch_size=batch_size, shuffle=True, num_workers=4) mylog = open('logs/' + NAME + '.log', 'w') tic = time() no_optim = 0 lr = 2e-4 total_epoch=300 train_epoch_best_loss=10000 best_test_score = 0 decay_factor = 1.5 optimizer = torch.optim.Adam(params=model.parameters(), lr=lr) loss_com = dice_bce_loss() for epoch in range(1, total_epoch + 1): data_loader_iter = iter(data_loader) train_epoch_loss = 0 train_score = 0 for img, mask in data_loader_iter: img = Variable(img.cuda(), volatile=False) mask = Variable(mask.cuda(), volatile=False) optimizer.zero_grad() pre = model.forward(img) loss = loss_com(mask, pre) loss.backward() optimizer.step() train_epoch_loss += loss train_score_b = dice_coeff(mask, pre, False) train_score += train_score_b*batch_size train_score /= x_train.size(0) train_epoch_loss /= len(data_loader_iter) test_img = Variable(x_test.cuda(), volatile=False) test_mask = Variable(y_test.cuda(), volatile=False) pre_test = model.forward(test_img) loss_test = loss_com(test_mask, pre_test) test_score = dice_coeff(test_mask, pre_test, False) if test_score > best_test_score: print('1. the dice score up to ', test_score, 'from ', best_test_score, 'saving the model', file=mylog, flush=True) print('1. the dice score up to ', test_score, 'from ', best_test_score, 'saving the model') best_test_score = test_score solver.save('./weights/' + NAME + '.th') print('********', file=mylog, flush=True) print('epoch:', epoch, ' time:', int(time() - tic), 'train_loss:', train_epoch_loss, 'train_score:', train_score, file=mylog, flush=True) print('test_loss:', loss_test, 'test_dice_score: ', test_score, 'best_score is ', best_test_score, file=mylog, flush=True) print('********') print('epoch:', epoch, ' time:', int(time() - tic), 'train_loss:', train_epoch_loss, 'train_score:', train_score) print('test_loss:', loss_test, 'test_dice_score: ', test_score, 'best_score is ', best_test_score) if train_epoch_loss >= train_epoch_best_loss: no_optim += 1 else: no_optim = 0 train_epoch_best_loss = train_epoch_loss if no_optim > Constants.NUM_UPDATE_LR: if solver.old_lr < 5e-7: break if solver.old_lr > 5e-5: model.load_state_dict(torch.load('./weights/' + NAME + '.th')) lr /= decay_factor for param_group in optimizer.param_groups: param_group['lr'] = lr print('Finish!', file=mylog, flush=True) print('Finish!') mylog.close()
def train(train_i=0): NAME = 'fold' + str(i + 1) + '_RDAUNet.th' # slover = MyFrame(FSP_Net, dice_bce_loss, 2e-4) slover = MyFrame(RDAUNet, dice_bce_loss, 2e-4) batchsize = 4 txt_train = 'fold' + str(train_i + 1) + '_train.csv' txt_test = 'fold' + str(train_i + 1) + '_test.csv' dataset_train = MyDataset(txt_path=txt_train, transform=transforms.ToTensor(), target_transform=transforms.ToTensor()) dataset_test = MyDataset(txt_path=txt_test, transform=transforms.ToTensor(), target_transform=transforms.ToTensor()) train_loader = torch.utils.data.DataLoader(dataset, batch_size=batchsize, shuffle=True, num_workers=2) test_loader = torch.utils.data.DataLoader(dataset, batch_size=batchsize, shuffle=False, num_workers=2) mylog = open('logs/' + NAME + '.log', 'w') total_epoch = 100 no_optim = 0 train_epoch_best_loss = 10000 best_test_score = 0 for epoch in range(1, total_epoch + 1): data_loder_iter = iter(train_loader) data_loder_test = iter(test_loader) train_epoch_loss = 0 index = 0 tic = time() train_score = 0 for img, mask in data_loder_iter: slover.set_input(img, mask) train_loss, pred = slover.optimize() train_score += dice_coeff(mask, pred, False) train_epoch_loss += train_loss index += 1 test_sen = 0 test_ppv = 0 test_score = 0 for img, mask in data_loder_test: slover.set_input(img, mask) pre_mask, _ = slover.test_batch() test_score += dice_coeff(y_test, pre_mask, False) test_sen += sensitive(y_test, pre_mask) test_ppv += positivepv(y_test, pre_mask) test_sen /= len(data_loder_test) test_ppv /= len(data_loder_test) test_score /= len(data_loder_test) if test_score > best_test_score: print('1. the dice score up to ', test_score, 'from ', best_test_score, 'saving the model', file=mylog, flush=True) print('1. the dice score up to ', test_score, 'from ', best_test_score, 'saving the model') best_test_score = test_score slover.save('./weights/' + NAME + '.th') train_epoch_loss = train_epoch_loss / len(data_loder_iter) train_score = train_score / len(data_loder_iter) print('epoch:', epoch, ' time:', int(time() - tic), 'train_loss:', train_epoch_loss.cpu().data.numpy(), 'train_score:', train_score, file=mylog, flush=True) print('test_dice_loss: ', test_score, 'test_sen: ', test_sen, 'test_ppv: ', test_ppv, 'best_score is ', best_test_score, file=mylog, flush=True) if train_epoch_loss >= train_epoch_best_loss: no_optim += 1 else: no_optim = 0 train_epoch_best_loss = train_epoch_loss print('Finish!', file=mylog, flush=True) print('Finish!') mylog.close()
def CE_Net_Train(train_i=0): NAME = 'fold' + str(i + 1) + '_6CE-Net' + Constants.ROOT.split('/')[-1] solver = MyFrame(CE_Net_, dice_bce_loss, 2e-4) batchsize = torch.cuda.device_count() * Constants.BATCHSIZE_PER_CARD #4 # For different 2D medical image segmentation tasks, please specify the dataset which you use # for examples: you could specify "dataset = 'DRIVE' " for retinal vessel detection. txt_train = 'fold' + str(train_i + 1) + '_train.csv' txt_test = 'fold' + str(train_i + 1) + '_test.csv' dataset_train = MyDataset(txt_path=txt_train, transform=transforms.ToTensor(), target_transform=transforms.ToTensor()) dataset_test = MyDataset(txt_path=txt_test, transform=transforms.ToTensor(), target_transform=transforms.ToTensor()) train_loader = torch.utils.data.DataLoader(dataset, batchsize=batchsize, shuffle=True, num_workers=2) test_loader = torch.utils.data.DataLoader(dataset, batchsize=batchsize, shuffle=False, num_workers=2) # start the logging files mylog = open('logs/' + NAME + '.log', 'w') no_optim = 0 total_epoch = Constants.TOTAL_EPOCH # 300 train_epoch_best_loss = Constants.INITAL_EPOCH_LOSS # 10000 best_test_score = 0 for epoch in range(1, total_epoch + 1): data_loader_iter = iter(train_loader) data_loader_test = iter(test_loader) train_epoch_loss = 0 index = 0 tic = time() # train for img, mask in data_loader_iter: solver.set_input(img, mask) train_loss, pred = solver.optimize() train_epoch_loss += train_loss index = index + 1 # test test_sen = 0 test_ppv = 0 test_score = 0 for img, mask in data_loader_test: solver.set_input(img, mask) pre_mask, _ = solver.test_batch() test_score += dice_coeff(y_test, pre_mask, False) test_sen += sensitive(y_test, pre_mask) # test_sen = test_sen.cpu().data.numpy() test_ppv += positivepv(y_test, pre_mask) # test_ppv = test_ppv.cpu().data.numpy() print(test_sen / len(data_loader_test), test_ppv / len(data_loader_test), test_score / len(data_loader_test)) # solver.set_input(x_test, y_test) # pre_mask, _ = solver.test_batch() # test_score = dice_coeff(y_test, pre_mask, False) # test_sen = sensitive(y_test, pre_mask) # test_sen = test_sen.cpu().data.numpy() # test_ppv = positivepv(y_test, pre_mask) # test_ppv = test_ppv.cpu().data.numpy() # print('111111111111111111111',type(test_score)) # # show the original images, predication and ground truth on the visdom. # show_image = (img + 1.6) / 3.2 * 255. # viz.img(name='images', img_=show_image[0, :, :, :]) # viz.img(name='labels', img_=mask[0, :, :, :]) # viz.img(name='prediction', img_=pred[0, :, :, :]) if test_score > best_test_score: print('1. the dice score up to ', test_score, 'from ', best_test_score, 'saving the model') best_test_score = test_score solver.save('./weights/' + NAME + '.th') train_epoch_loss = train_epoch_loss / len(data_loader_iter) # print(mylog, '********') print('epoch:', epoch, ' time:', int(time() - tic), 'train_loss:', train_epoch_loss.cpu().data.numpy(), file=mylog, flush=True) print('test_dice_loss: ', test_score, 'test_sen: ', test_sen, 'test_ppv: ', test_ppv, 'best_score is ', best_test_score, file=mylog, flush=True) print('********') print('epoch:', epoch, ' time:', int(time() - tic), 'train_loss:', train_epoch_loss.cpu().data.numpy()) print('test_dice_score: ', test_score, 'test_sen: ', test_sen, 'test_ppv: ', test_ppv, 'best_score is ', best_test_score) # print('train_loss:', train_epoch_loss) # print('SHAPE:', Constants.Image_size) if train_epoch_loss >= train_epoch_best_loss: no_optim += 1 else: no_optim = 0 train_epoch_best_loss = train_epoch_loss # solver.save('./weights/' + NAME + '.th') # if no_optim > Constants.NUM_EARLY_STOP: # print(mylog, 'early stop at %d epoch' % epoch) # print('early stop at %d epoch' % epoch) # break if no_optim > Constants.NUM_UPDATE_LR: if solver.old_lr < 5e-7: break if solver.old_lr > 5e-4: solver.load('./weights/' + NAME + '.th') solver.update_lr(1.5, factor=True, mylog=mylog) print('Finish!', file=mylog, flush=True) print('Finish!') mylog.close()
def train_model(train_i): batchsize = 4 i = train_i # NAME = 'F51_fold'+str(i+1)+'_UNet' # net = UNet(usegaussian=False).cuda() # NAME = 'GaPF5_fold'+str(i+1)+'_UNet' # net = UNet(usegaussian=True).cuda() # NAME = 'EMF5_NOpretrain_fold'+str(i+1)+'_FSPNet' # NAME = 'EMF5_fold'+str(i+1)+'_FSPNet' NAME = 'F5_fold' + str(i + 1) + '_FSPNet' net = FPN_Net(is_ema=False).cuda() # net.apply(weights_init) print(NAME) txt_train = 'D163N5fold' + str(train_i + 1) + '_train.csv' txt_test = 'D163N5fold' + str(train_i + 1) + '_test.csv' dataset_train = MyDataset(root='/home/wangke/ultrasound_data163/', txt_path=txt_train, transform=transforms.ToTensor(), target_transform=transforms.ToTensor()) dataset_test = MyDataset(root='/home/wangke/ultrasound_data163/', txt_path=txt_test, transform=transforms.ToTensor(), target_transform=transforms.ToTensor()) train_loader = torch.utils.data.DataLoader(dataset_train, batch_size=batchsize, shuffle=True, num_workers=2) test_loader = torch.utils.data.DataLoader(dataset_test, batch_size=batchsize, shuffle=False, num_workers=2) mylog = open('models/saved/' + NAME + '.log', 'w') total_epoch = 300 optimizer = torch.optim.Adam(params=net.parameters(), lr=1e-3) # scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1.0, gamma=0.95) best_test_score = 0 dice_loss = dice_bce_loss() for epoch in range(1, total_epoch): total_loss = 0 data_loader_iter = iter(train_loader) data_loader_test = iter(test_loader) tic = time() train_score = 0 net.train() for img, mask in data_loader_iter: img = V(img.cuda(), volatile=False) mask_v = V(mask.cuda(), volatile=False) optimizer.zero_grad() output = net(img) loss = dice_loss(mask_v, output) loss.backward() optimizer.step() total_loss += loss train_score += dice_coeff(mask, output.cpu().data, False) test_score = 0 test_loss = 0 net.eval() with torch.no_grad(): for img, mask in data_loader_test: # print(img.shape) img = V(img.cuda(), volatile=True) # mask_v = V(mask.cuda(), volatile=False) output = net(img) # test_loss += dice_loss(mask_v, output) # print(dice_coeff(mask, output.cpu().data, False)) test_score += dice_coeff(mask, output.cpu().data, False) total_loss = total_loss / len(data_loader_iter) train_score = train_score / len(data_loader_iter) test_score = test_score / len(data_loader_test) # test_loss = test_loss/len(data_loader_test) # scheduler.step() if test_score > best_test_score: best_test_score = test_score torch.save(net, 'models/saved/' + NAME + '.pkl') print('saved, ', best_test_score, file=mylog, flush=True) print('saved, ', best_test_score) print('********', file=mylog, flush=True) print('epoch:', epoch, ' time:', int(time() - tic), 'train_loss:', total_loss.cpu().data.numpy(), 'train_score:', train_score, 'test_score:', test_score, 'best_score is ', best_test_score, file=mylog, flush=True) print('********') print('epoch:', epoch, ' time:', int(time() - tic), 'train_loss:', total_loss.cpu().data.numpy(), 'train_score:', train_score, 'test_score:', test_score, 'best_score is ', best_test_score)
def Net_Train(train_i=0): # NAME = 'fold'+str(train_i+1)+'_25ATT-UNet' NAME = 'fold'+str(train_i+1)+'_25NEST-UNet' mylog = open('logs/' + NAME + '.log', 'w') print(NAME) print(NAME, file=mylog, flush=True) # model = AttU_Net(img_ch=1, output_ch=1).cuda() model = NestedUNet(in_ch=1, out_ch=1).cuda() # print(model) model = torch.nn.DataParallel(model, device_ids=range(torch.cuda.device_count())) # model = FPN_Net(1, 1) # print(model) folds = data2() test_data=folds[train_i] batch_size1 = 4 batch_size = 2 (x_train, y_train), (x_test, y_test) = load_numpy(folds, train_i) dataset = torch.utils.data.TensorDataset(x_train, y_train) data_loader = torch.utils.data.DataLoader( dataset, batch_size=batch_size1, shuffle=True, num_workers=4) # data_test = torch.utils.data.TensorDataset(x_test, y_test) # loader_test = torch.utils.data.DataLoader( # data_test, # batch_size=batch_size, # shuffle=True, # num_workers=4) no_optim = 0 lr = 2e-4 total_epoch=250 train_epoch_best_loss=10000 best_test_score = 0 decay_factor = 1.5 optimizer = torch.optim.Adam(params=model.parameters(), lr=lr) for epoch in range(1, total_epoch + 1): tic = time() data_loader_iter = iter(data_loader) # data_test_iter = iter(loader_test) train_epoch_loss = 0 train_score = 0 test_epoch_loss = 0 test_score = 0 test_sen = 0 test_ppv = 0 for img, mask in data_loader_iter: img = Variable(img.cuda(), volatile=False) mask = Variable(mask.cuda(), volatile=False) optimizer.zero_grad() pre = model.forward(img) loss = calc_loss(pre, mask) loss.backward() optimizer.step() train_epoch_loss += loss.data train_score_b = dice_coeff(mask, pre, False) train_score += train_score_b.data*batch_size1 train_score /= x_train.size(0) train_score = train_score.cpu().data.numpy() train_epoch_loss /= len(data_loader_iter) train_epoch_loss = train_epoch_loss.cpu().data.numpy() # print('epoch:', epoch, ' time:', int(time() - tic), 'train_loss:', train_epoch_loss, 'train_score:', train_score) with torch.no_grad(): # for img, mask in data_test_iter: img = Variable(x_test.cuda(), volatile=True) mask = Variable(y_test.cuda(), volatile=True) pre = model.forward(img) test_epoch_loss = calc_loss(pre, mask) # test_epoch_loss += loss.data test_score = dice_coeff(mask, pre, False) # test_score += test_score_b.data*batch_size pre[pre>0.5]=1 pre[pre<=0.5]=0 test_sen = sensitive(mask, pre) # test_sen += test_sen_b.data*batch_size test_ppv = positivepv(mask, pre) # test_ppv += test_ppv_b.data*batch_size # test_score /= x_test.size(0) test_score = test_score.cpu().data.numpy() # test_sen /= x_test.size(0) test_sen = test_sen.cpu().data.numpy() # test_ppv /= x_test.size(0) test_ppv = test_ppv.cpu().data.numpy() # test_epoch_loss /= len(data_test_iter) test_epoch_loss = test_epoch_loss.cpu().data.numpy() # x_test = Variable(x_test.cuda(), volatile=True) # pre_test = model.forward(x_test).cpu().data # loss_test = calc_loss(y_test, pre_test) # # loss_test = loss_test.cpu().data.numpy() # test_score = dice_coeff(y_test, pre_test, False) # # test_score = test_score.cpu().data.numpy() print('********', file=mylog, flush=True) print('epoch:', epoch, train_i, ' time:', int(time() - tic), 'train_loss:', train_epoch_loss, 'train_score:', train_score, end=' ', file=mylog, flush=True) print('test_loss:', test_epoch_loss, 'test_dice_score: ', test_score, 'test_sen: ', test_sen, 'test_ppv: ', test_ppv, 'best_score is ', best_test_score, file=mylog, flush=True) print('********') print('epoch:', epoch, train_i, ' time:', int(time() - tic), 'train_loss:', train_epoch_loss, 'train_score:', train_score, end=' ') print('test_loss:', test_epoch_loss, 'test_dice_score: ', test_score, 'test_sen: ', test_sen, 'test_ppv: ', test_ppv, 'best_score is ', best_test_score) if test_score > best_test_score: print('1. the dice score up to ', test_score, 'from ', best_test_score, 'saving the model', file=mylog, flush=True) print('1. the dice score up to ', test_score, 'from ', best_test_score, 'saving the model') best_test_score = test_score torch.save(model, './weights/' + NAME + '.pkl') if best_test_score>0.75: with torch.no_grad(): for test in test_data: img = test[0].reshape(1, 1, 256, 256).astype('float32') img = torch.from_numpy(img) img = Variable(img.cuda()) pre = model.forward(img).cpu().data.numpy() plt.imsave('../../model/fold/png_constract/'+test[-1]+'_fold_z5_nest.png', pre[0,0,:,:], cmap='gray') if train_epoch_loss >= train_epoch_best_loss: no_optim += 1 else: no_optim = 0 train_epoch_best_loss = train_epoch_loss if no_optim > 10: if lr < 1e-7: break if lr > 1e-5: # model.load_state_dict(torch.load('./weights/' + NAME + '.th')) lr /= decay_factor print ('update learning rate: %f -> %f' % (lr*decay_factor, lr), file=mylog, flush=True) print ('update learning rate: %f -> %f' % (lr*decay_factor, lr)) for param_group in optimizer.param_groups: param_group['lr'] = lr print('Finish!', file=mylog, flush=True) print('Finish!') mylog.close()