Exemplo n.º 1
0
def train(epoch, iter_start=0):
    global global_step, kt

    netHg.train()
    pbar = tqdm.tqdm(train_loader,
                     desc='Epoch %02d' % epoch,
                     dynamic_ncols=True)
    pbar_info = tqdm.tqdm(bar_format='{bar}{postfix}')
    for it, sample in enumerate(pbar, start=iter_start):
        global_step += 1
        if FLAGS.debug:
            image, masks, keypoints, heatmaps, img_ids = sample
        else:
            image, masks, keypoints, heatmaps = sample
        image = Variable(image)
        masks = Variable(masks)
        keypoints = Variable(keypoints)
        heatmaps = Variable(heatmaps)
        if FLAGS.cuda:
            image = image.cuda(async=FLAGS.pinMem)
            masks = masks.cuda(async=FLAGS.pinMem)
            keypoints = keypoints.cuda(async=FLAGS.pinMem)
            heatmaps = heatmaps.cuda(async=FLAGS.pinMem)

        outputs = netHg(image)
        push_loss, pull_loss, detection_loss = calc_loss(
            outputs, keypoints, heatmaps, masks)

        image_s = nn.functional.avg_pool2d(image, 4)
        inp_real = torch.cat([image_s, heatmaps], dim=1)
        d_real = netD(inp_real)
        loss_d_real = criterion_D(d_real, heatmaps)

        pred_heatmaps = outputs[:, -1, :17].squeeze(
            dim=1
        )  # Notice: manually assign the dimension to avoid unexcepted freeze
        inp_fake = torch.cat([image_s, pred_heatmaps], dim=1)
        d_fake = netD(inp_fake)
        loss_d_fake = criterion_D(d_fake, pred_heatmaps)

        loss_hg = 0
        toprint = ''
        sum_dict = {}
        for loss, weight, name in zip(
            [push_loss, pull_loss, detection_loss], [1e-3, 1e-3, 1],
            ['push_loss', 'pull_loss', 'detection_loss']):
            loss_temp = torch.mean(loss)
            sum_dict[name] = getValue(loss_temp)
            loss_temp *= weight
            loss_hg += loss_temp
            toprint += '{:.8f} '.format(getValue(loss_temp))

        loss_d = loss_d_real - kt * loss_d_fake
        loss_hg = loss_hg + FLAGS.lambda_G * loss_d_fake

        optimD.zero_grad()
        loss_d.backward(retain_graph=True)
        optimD.step()
        optimHg.zero_grad()
        loss_hg.backward()
        optimHg.step()

        # update kt
        loss_d_real_ = getValue(loss_d_real)
        loss_d_fake_ = getValue(loss_d_fake)
        balance = FLAGS.gamma * loss_d_real_ - loss_d_fake_
        kt = kt + FLAGS.kt_lr * balance
        kt = min(1, max(0, kt))
        measure = loss_d_real_ + abs(balance)

        # Summary
        sumWriter.add_scalar('loss_hg', loss_hg, global_step)
        for key, value in sum_dict.items():
            sumWriter.add_scalar(key, loss_temp, global_step)
        sumWriter.add_scalar('loss_d', loss_d, global_step)
        toprint += ', loss_d: {:.8f}'.format(getValue(loss_d))
        sumWriter.add_scalar('loss_d_real', loss_d_real, global_step)
        sumWriter.add_scalar('loss_d_fake', loss_d_fake, global_step)
        sumWriter.add_scalar('measure', measure, global_step)
        sumWriter.add_scalar('kt', kt, global_step)

        pbar_info.set_postfix_str(toprint)
        pbar_info.update()

        del outputs, push_loss, pull_loss, detection_loss, loss_hg, \
            d_real, d_fake, loss_d_real, loss_d_fake, loss_d

    pbar.close()
    pbar_info.close()
Exemplo n.º 2
0
    for x, y in train_loader:
        x, y = x.to(device), y.to(device)

        #If want to get the input images with their Augmentation - To check the data flowing in net
        #input_images(x, y, i, n_iter, k)

        # grid_img = torchvision.utils.make_grid(x)
        #writer1.add_image('images', grid_img, 0)

        # grid_lab = torchvision.utils.make_grid(y)

        opt.zero_grad()

        y_pred = model_test(x)
        lossT = calc_loss(y_pred, y)  # Dice_loss Used

        train_loss += lossT.item() * x.size(0)
        lossT.backward()
        #  plot_grad_flow(model_test.named_parameters(), n_iter)
        opt.step()
        x_size = lossT.item() * x.size(0)
        scheduler(opt, c, i)
        c = c + 1
        k = 2

    #    for name, param in model_test.named_parameters():
    #        name = name.replace('.', '/')
    #        writer1.add_histogram(name, param.data.cpu().numpy(), i + 1)
    #        writer1.add_histogram(name + '/grad', param.grad.data.cpu().numpy(), i + 1)
Exemplo n.º 3
0
import torchvision.utils as vutils
from losses import calc_loss, dice_loss, threshold_predictions_v,threshold_predictions_p

model=U_Net(3,1)
model.load_state_dict(torch.load("unet.pt"))
model=model.cuda()
TrainData=CaptchaData("D:\\UnetData\\img\\")
dataload=DataLoader(TrainData,38,4,drop_last=False)
optimizer = torch.optim.Adam(model.parameters(), lr=5e-4)#定义优化器
criterion = nn.BCEWithLogitsLoss()
for epoch in range(3000):
	for img,label in dataload:
		img=img.cuda()
		label=label.cuda()
		pred=model(img)
		optimizer.zero_grad()
		loss=calc_loss(pred,label.view(-1,1,256,256))
		loss.backward()
		optimizer.step()

	if(epoch%10==0):
		print(loss)
		pred=pred*255
		vutils.save_image(img.data,"img.jpg",)
		vutils.save_image(pred.data,"Result.jpg",)
		vutils.save_image(label.view(-1,1,256,256).data,"Label.jpg",)
		torch.save(model.state_dict(),"unet.pt")
		x=torch.rand(1,3,256,256,device="cuda")
		torch.onnx._export(model, x,"unet.onnx",export_params=True)

Exemplo n.º 4
0
 def loss_function(self, y_pre, y, **kwargs) -> dict:
     lossT = calc_loss(y_pre, y)
     return {'loss': lossT}
Exemplo n.º 5
0
 valid_loss = 0.0
 sum_precies = 0.0
 sum_recall = 0.0
 since = time.time()
 scheduler.step(i)
 lr = scheduler.get_lr()
 #######################################################
 # Training Data
 #######################################################
 model_test.train()
 k = 1
 for x, y in train_loader_1:
     x, y = x.to(device), y.to(device)
     opt.zero_grad()
     y_pred = model_test(x)
     lossT = calc_loss(y_pred, y)  # Dice_loss Used
     train_loss += lossT.item()
     CE_loss = F.binary_cross_entropy_with_logits(y_pred, y)
     crossen_loss += CE_loss.item()
     y_0_1 = F.sigmoid(y_pred)
     precies, recall = precies_recall(y_0_1, y)
     # print(lossT.item(),CE_loss.item(),precies,recall)
     sum_precies += precies
     sum_recall += recall
     # print(lossT.item())
     lossT.backward()
     #  plot_grad_flow(model_test.named_parameters(), n_iter)
     opt.step()
     # y_pred=torch.tensor(y_pred,dtype=torch.long)
     # y=torch.tensor(y,dtype=torch.long)
     # y=y.to(device)
Exemplo n.º 6
0
        ### single loss implementation
        #y_pred = model_test(x)
        #lossT = calc_loss(y_pred, y)     # Dice_loss Used

        ### supervision training implementation
        pred5, pred4, pred3, pred = model_test(x)
        y3 = F.interpolate(y, scale_factor=0.5)
        #print("inside train")

        y4 = F.interpolate(y, scale_factor=0.25)
        y5 = F.interpolate(y, scale_factor=0.125)
        #print(y3.shape)
        #print(y4.shape)
        #print(y5.shape)
        lossMain = calc_loss(pred, y)
        loss3 = calc_loss(pred3, y3)
        loss4 = calc_loss(pred4, y4)
        loss5 = calc_loss(pred5, y5)
        ### weight parameters need to be finetune
        ### apply a weight decay strategy !!! ??
        lossT = 0.35 * lossMain + 0.25 * loss3 + 0.25 * loss4 + 0.15 * loss5

        train_loss += lossT.item() * x.size(0)
        lossT.backward()
        #  plot_grad_flow(model_test.named_parameters(), n_iter)
        opt.step()
        x_size = lossT.item() * x.size(0)
        scheduler(opt, c, i)
        c = c + 1
        #k = 2
Exemplo n.º 7
0
def Net_Train(train_i=0):
    # NAME = 'fold'+str(train_i+1)+'_25ATT-UNet'
    NAME = 'fold'+str(train_i+1)+'_25NEST-UNet'
    mylog = open('logs/' + NAME + '.log', 'w')
    print(NAME)
    print(NAME, file=mylog, flush=True)
    # model = AttU_Net(img_ch=1, output_ch=1).cuda()
    model = NestedUNet(in_ch=1, out_ch=1).cuda()
    # print(model)
    model = torch.nn.DataParallel(model, device_ids=range(torch.cuda.device_count()))
    # model = FPN_Net(1, 1)
    # print(model)
    folds = data2()
    test_data=folds[train_i]
    batch_size1 = 4
    batch_size = 2
    (x_train, y_train), (x_test, y_test) = load_numpy(folds, train_i)
    dataset = torch.utils.data.TensorDataset(x_train, y_train)
    data_loader = torch.utils.data.DataLoader(
        dataset,
        batch_size=batch_size1,
        shuffle=True,
        num_workers=4)

    # data_test = torch.utils.data.TensorDataset(x_test, y_test)
    # loader_test = torch.utils.data.DataLoader(
    #     data_test,
    #     batch_size=batch_size,
    #     shuffle=True,
    #     num_workers=4)
  
    no_optim = 0
    lr = 2e-4
    total_epoch=250
    train_epoch_best_loss=10000
    best_test_score = 0
    decay_factor = 1.5
    optimizer = torch.optim.Adam(params=model.parameters(), lr=lr)

    for epoch in range(1, total_epoch + 1):
        tic = time()
        data_loader_iter = iter(data_loader)
        # data_test_iter = iter(loader_test)
        train_epoch_loss = 0
        train_score = 0
        test_epoch_loss = 0
        test_score = 0
        test_sen = 0
        test_ppv = 0

        for img, mask in data_loader_iter:
            img = Variable(img.cuda(), volatile=False)
            mask = Variable(mask.cuda(), volatile=False)
            optimizer.zero_grad()
            pre = model.forward(img)
            loss = calc_loss(pre, mask)
            loss.backward()
            optimizer.step()
            train_epoch_loss += loss.data
            train_score_b = dice_coeff(mask, pre, False)
            train_score += train_score_b.data*batch_size1
        train_score /= x_train.size(0)
        train_score = train_score.cpu().data.numpy()
        train_epoch_loss /= len(data_loader_iter)
        train_epoch_loss = train_epoch_loss.cpu().data.numpy()
        # print('epoch:', epoch, '    time:', int(time() - tic), 'train_loss:', train_epoch_loss, 'train_score:', train_score)
        
        with torch.no_grad():
            # for img, mask in data_test_iter:
            img = Variable(x_test.cuda(), volatile=True)
            mask = Variable(y_test.cuda(), volatile=True)
            pre = model.forward(img)
            test_epoch_loss = calc_loss(pre, mask)
            # test_epoch_loss += loss.data
            test_score = dice_coeff(mask, pre, False)
            # test_score += test_score_b.data*batch_size
            pre[pre>0.5]=1
            pre[pre<=0.5]=0
            test_sen = sensitive(mask, pre)
            # test_sen += test_sen_b.data*batch_size
            test_ppv = positivepv(mask, pre)
            # test_ppv += test_ppv_b.data*batch_size

            
        # test_score /= x_test.size(0)
        test_score = test_score.cpu().data.numpy()
        # test_sen /= x_test.size(0)
        test_sen = test_sen.cpu().data.numpy()
        # test_ppv /= x_test.size(0)
        test_ppv = test_ppv.cpu().data.numpy()
        # test_epoch_loss /= len(data_test_iter)
        test_epoch_loss = test_epoch_loss.cpu().data.numpy()

        # x_test = Variable(x_test.cuda(), volatile=True)
        # pre_test = model.forward(x_test).cpu().data
        # loss_test = calc_loss(y_test, pre_test)
        # # loss_test = loss_test.cpu().data.numpy()
        # test_score = dice_coeff(y_test, pre_test, False)
        # # test_score = test_score.cpu().data.numpy()
        print('********', file=mylog, flush=True)
        print('epoch:', epoch, train_i, ' time:', int(time() - tic), 'train_loss:', train_epoch_loss, 'train_score:', train_score, end='  ', file=mylog, flush=True)
        print('test_loss:', test_epoch_loss, 'test_dice_score: ', test_score, 'test_sen: ', test_sen, 'test_ppv: ', test_ppv, 'best_score is ', best_test_score, file=mylog, flush=True)

        print('********')
        print('epoch:', epoch, train_i, ' time:', int(time() - tic), 'train_loss:', train_epoch_loss, 'train_score:', train_score,  end='  ')
        print('test_loss:', test_epoch_loss, 'test_dice_score: ', test_score, 'test_sen: ', test_sen, 'test_ppv: ', test_ppv, 'best_score is ', best_test_score)

        if test_score > best_test_score:
            print('1. the dice score up to ', test_score, 'from ', best_test_score, 'saving the model', file=mylog, flush=True)
            print('1. the dice score up to ', test_score, 'from ', best_test_score, 'saving the model')
            best_test_score = test_score
            torch.save(model, './weights/' + NAME + '.pkl')
            if best_test_score>0.75:
                with torch.no_grad():
                    for test in test_data:
                        img = test[0].reshape(1, 1, 256, 256).astype('float32')
                        img = torch.from_numpy(img)
                        img = Variable(img.cuda())
                        pre = model.forward(img).cpu().data.numpy()
                        plt.imsave('../../model/fold/png_constract/'+test[-1]+'_fold_z5_nest.png', pre[0,0,:,:], cmap='gray')


        if train_epoch_loss >= train_epoch_best_loss:
            no_optim += 1
        else:
            no_optim = 0
            train_epoch_best_loss = train_epoch_loss

        if no_optim > 10:
            if lr < 1e-7:
                break
            if lr > 1e-5:
                # model.load_state_dict(torch.load('./weights/' + NAME + '.th'))
                lr /= decay_factor
                print ('update learning rate: %f -> %f' % (lr*decay_factor, lr), file=mylog, flush=True)
                print ('update learning rate: %f -> %f' % (lr*decay_factor, lr))
                for param_group in optimizer.param_groups:
                    param_group['lr'] = lr

    print('Finish!', file=mylog, flush=True)
    print('Finish!')
    mylog.close()