Exemple #1
0
from torch.autograd import Variable
from unet import Unet
import dataset
from utils import *

parser = argparse.ArgumentParser(description='PyTorch Training')
parser.add_argument('--fp16', action='store_true',
                    help='Run model fp16 mode.')
args = parser.parse_args()

train_dataset = dataset.MarsDataset()
val_dataset  = dataset.MarsDataset(val=True)

train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=32, shuffle=True)
model = Unet(1, 1)
model.cuda()
lr=0.01
momentum=0.9

if args.fp16:
    assert torch.backends.cudnn.enabled, "fp16 mode requires cudnn backend to be enabled."
    model = network_to_half(model)
    param_copy = [param.clone().type(torch.cuda.FloatTensor).detach() for param in model.parameters()]
    for param in param_copy:
        param.requires_grad = True
 else:
  param_copy = list(model.parameters())
optimizer = torch.optim.SGD(param_copy, lr,momentum=momentum)

if args.fp16:
    model.zero_grad()
from unet import Unet
from dataset_brain import Dataset_brain_4
from loss import dice_loss
from loss import dice_score
torch.backends.cudnn.benchmark = True
torch.manual_seed(10)
file_path = './brats18_dataset/npy_test/test_t2.npy'
model_path = './weight/unet_4.pth'
train_data = Dataset_brain_4(file_path)
batch_size = 64
train_loader = data.DataLoader(dataset=train_data,
                               batch_size=batch_size,
                               num_workers=4)
unet = Unet(4)
unet.load_state_dict(torch.load(model_path))
unet.cuda()
unet.eval()

batch_score = 0
num_batch = 0
with torch.no_grad():
    for i, (img, label) in enumerate(train_loader):
        #     print(img.shape)
        seg = unet(img.float().cuda())
        seg = seg.cpu()
        seg[seg >= 0.5] = 1.
        seg[seg != 1] = 0.
        batch_score += dice_score(seg, label.float()).data.numpy()
        #     print(num_batch)
        num_batch += img.size(0)
        del seg, img, label
Exemple #3
0
loadtr = data.DataLoader(training_set,
                         batch_size=6,
                         shuffle=True,
                         num_workers=10,
                         worker_init_fn=worker_init_fn)
loadtest = data.DataLoader(test_set, batch_size=1, num_workers=10)
loadval = data.DataLoader(validation_set,
                          batch_size=6,
                          num_workers=10,
                          worker_init_fn=worker_init_fn)
# In[6]:

#model = Unet(skipDim, quantization_channels, residualDim,device)
model = Unet()
#model = nn.DataParallel(model)
model = model.cuda()
criterion = nn.MSELoss()
# in wavenet paper, they said crossentropyloss is far better than MSELoss
optimizer = optim.Adam(model.parameters(),
                       lr=1e-4,
                       weight_decay=1e-6,
                       betas=(0.5, 0.999))
# use adam to train

maxloss = np.zeros(50) + 100
# In[7]:
iteration = 0
start_epoch = 0
if continueTrain:  # if continueTrain, the program will find the checkpoints
    if os.path.isfile(resumefile):
        print("=> loading checkpoint '{}'".format(resumefile))
Exemple #4
0
    return options


if __name__ == '__main__':
    args = get_args()

    net = Unet(input_channels=3,
               input_width=480,
               input_height=360,
               n_classes=12)

    if args.load:
        net.load_state_dict(torch.load(args.load))
        print('Model loaded from {}'.format(args.load))

    if args.gpu:
        net.cuda()
    try:
        train_net(net=net,
                  epochs=args.epochs,
                  batch_size=args.batchsize,
                  lr=args.lr,
                  gpu=args.gpu,
                  img_scale=args.scale)
    except KeyboardInterrupt:
        torch.save(net.state_dict(), 'INTERRUPTED.pth')
        print('Saved interrupt')
        try:
            sys.exit(0)
        except SystemExit:
            os._exit(0)