def main(): args = parser.parse_args() save_path = 'Trainid_' + args.id writer = SummaryWriter(log_dir='runs/' + args.tag + str(time.time())) if not os.path.isdir(save_path): os.mkdir(save_path) os.mkdir(save_path + '/Checkpoint') train_dataset_path = 'data/train' val_dataset_path = 'data/valid' train_transform = transforms.Compose([ToTensor()]) val_transform = transforms.Compose([ToTensor()]) train_dataset = TrainDataset(path=train_dataset_path, transform=train_transform) val_dataset = TrainDataset(path=val_dataset_path, transform=val_transform) train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=4) val_dataloader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=4) size_train = len(train_dataloader) size_val = len(val_dataloader) print('Number of Training Images: {}'.format(size_train)) print('Number of Validation Images: {}'.format(size_val)) start_epoch = 0 model = Res(n_ch=4, n_classes=9) class_weights = torch.Tensor([1, 1, 1, 1, 1, 1, 1, 1, 0]).cuda() criterion = DiceLoss() criterion1 = torch.nn.CrossEntropyLoss(weight=class_weights) optimizer = torch.optim.Adam(model.parameters(), lr=args.lr) if args.gpu: model = model.cuda() criterion = criterion.cuda() criterion1 = criterion1.cuda() if args.resume is not None: weight_path = sorted(os.listdir(save_path + '/Checkpoint/'), key=lambda x: float(x[:-8]))[0] checkpoint = torch.load(save_path + '/Checkpoint/' + weight_path) start_epoch = checkpoint['epoch'] model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) print('Loaded Checkpoint of Epoch: {}'.format(args.resume)) for epoch in range(start_epoch, int(args.epoch) + start_epoch): adjust_learning_rate(optimizer, epoch) train(model, train_dataloader, criterion, criterion1, optimizer, epoch, writer, size_train) print('') val_loss = val(model, val_dataloader, criterion, criterion1, epoch, writer, size_val) print('') save_checkpoint( { 'epoch': epoch + 1, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict(), }, filename=save_path + '/Checkpoint/' + str(val_loss) + '.pth.tar') writer.export_scalars_to_json(save_path + '/log.json')
size, sampling_mode='center_val', deterministic=True), batch_size=1, pin_memory=True) optimizer = optim.Adam(net.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay) scheduler = CosineAnnealingLR(optimizer, T_max=args.restart * len(train_loader)) if is_cuda: net = net.cuda() bce_crit = bce_crit.cuda() dice_crit = dice_crit.cuda() def train(train_loader, epoch): net.train(True) reporter = Report() epoch_bce_loss = 0 epoch_dice_loss = 0 epoch_loss = 0 for inputs, labels in train_loader: optimizer.zero_grad() if is_cuda: inputs = inputs.cuda() labels = labels.cuda() outputs = sigmoid(net(inputs)) reporter.feed(outputs, labels)