Esempio n. 1
0
def train(args, train_dataloader, valid_dataloader):

    if str(args.model).lower() == 'fcn32s':
        model = VGG16_FCN32s(n_classes=7)
    elif str(args.model).lower() == 'fcn8s':
        model = VGG16_FCN8s(n_classes=7)
    else:
        model = UNet(n_channels=3, n_classes=7)
    #model = nn.DataParallel(model, device_ids=['cuda:0','cuda:1'])
    model.to(args.device)

    # loss
    # 0.79, 0.14, 1.0, 0.73, 2.74, 1.04, 132, 0
    weight = torch.tensor([0.79, 0.14, 1.0, 0.73, 2.74, 1.04, 1.0])
    criterion = nn.CrossEntropyLoss(weight).to(args.device)

    # optim
    optimizer = optim.SGD(model.parameters(),
                          lr=1e-3,
                          momentum=0.9,
                          weight_decay=5e-4)
    if str(args.model) == 'fcn32s':
        milestones = [1, 10, 20, 50]
    elif str(args.model) == 'fcn8s':
        milestones = [1, 10, 20, 60]
    else:
        milestones = [25, 50, 80]
    train_scheduler = optim.lr_scheduler.MultiStepLR(
        optimizer, milestones=milestones, gamma=0.2)  #learning rate decay

    best_iou = 0
    for epoch in range(args.epochs):
        print(f"\tEpoch {epoch}")

        loss, acc, iou = _run_train(args, train_dataloader, model, criterion,
                                    optimizer)
        print("\t train loss:{:.5f}, acc:{:.3f}, iou:{:.2f}".format(
            loss, acc, iou))

        loss, acc, iou = _run_eval(args, valid_dataloader, model, criterion)
        print("\t valid loss:{:.5f}, acc:{:.3f}, iou:{:.2f}".format(
            loss, acc, iou))

        if epoch in milestones:
            torch.save(model.state_dict(),
                       f"./result/{epoch}_{args.model}.pth")
            print('\t [Info] save weights')
        if epoch > milestones[1] and iou > best_iou:
            best_iou = iou
            torch.save(model.state_dict(), f"./result/best_{args.model}.pth")
            print('\t [Info] save weights')
Esempio n. 2
0
def get_segment_model_and_criterion(device):
    """
    Create U-NET and changes fully connected layer.
    Parameters
    ----------
    device: torch.device
        Define CPU or GPU will be used for training
    Returns
    -------
    model architecture and criterion in tuple
    """
    model = UNet(n_channels=3, n_classes=1)
    model = model.to(device)
    criterion = dice
    return model, criterion
                                               pin_memory=True,
                                               drop_last=True)

    # Create validation dataset

    valid_data = dset.ImageFolder(root=args.valid_dir,
                                  transform=default_transform)
    valid_loader = torch.utils.data.DataLoader(valid_data,
                                               batch_size=64,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True,
                                               drop_last=True)

    model = UNet(n_channels=3, n_classes=3, bilinear=True)
    model.to(args.device)
    optimizer = torch.optim.Adam(params=model.parameters(),
                                 lr=args.lr,
                                 weight_decay=0)

    state_dict = {'itr': 0}

    if args.resume:
        print('Loading weights & resuming from iteration {}'.format(
            args.checkpoint))
        model, optimizer, logger = load_UNET_checkpoint(
            model, optimizer, '256', args)
        state_dict['itr'] = args.checkpoint

    for epoch in range(args.num_epochs):
        train_256(epoch, state_dict, model, optimizer, train_loader,
Esempio n. 4
0
    default_transform = transforms.Compose([
                            transforms.CenterCrop(args.image_size),
                            transforms.Resize(args.image_size),
                            transforms.ToTensor()
                        ])

    # Create train dataset
    train_dataset = dset.ImageFolder(root=args.train_dir, transform=default_transform)

    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size,
                                               shuffle=False, num_workers=args.workers,
                                               pin_memory=True, drop_last=True)

    # Create validation dataset
    valid_dataset = dset.ImageFolder(root=args.valid_dir, transform=default_transform)

    valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=args.batch_size,
                                               shuffle=False, num_workers=args.workers,
                                               pin_memory=True, drop_last=True)

    model_128 = UNet(n_channels=3, n_classes=3, bilinear=True)
    model_128.to(args.device)
    model_128 = load_UNET_weights(model_128, '128', args)
    
    model_256 = UNet(n_channels=3, n_classes=3, bilinear=True)
    model_256.to(args.device)
    model_256 = load_UNET_weights(model_256, '256', args)

    eval_unet_128_256(model_128, model_256, train_loader, 'train', args)
    eval_unet_128_256(model_128, model_256, valid_loader, 'valid', args)
        net.train()
        # 按照batch_size开始训练
        for image, label in train_loader:
            optimizer.zero_grad()
            # 将数据拷贝到device中
            image = image.to(device=device, dtype=torch.float32)
            label = label.to(device=device, dtype=torch.float32)
            # 使用网络参数,输出预测结果
            pred = net(image)
            # 计算loss
            loss = criterion(pred, label)
            print('Loss/train', loss.item())
            # 保存loss值最小的网络参数
            if loss < best_loss:
                best_loss = loss
                torch.save(net.state_dict(), 'best_model.pth')
            # 更新参数
            loss.backward()
            optimizer.step()


if __name__ == "__main__":
    # 选择设备,有cuda用cuda,没有就用cpu
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # 加载网络,图片单通道1,分类为1。
    net = UNet(n_channels=1, n_classes=1)
    # 将网络拷贝到deivce中
    net.to(device=device)
    # 指定训练集地址,开始训练
    data_path = "E:/AI_data/ISBI/data/train/"
    train_net(net, device, data_path)