def main(): """Create the model and start the training.""" h, w = map(int, args.input_size.split(',')) input_size = (h, w) h, w = map(int, args.input_size_target.split(',')) input_size_target = (h, w) cudnn.enabled = True from pytorchgo.utils.pytorch_utils import set_gpu set_gpu(args.gpu) # Create network if args.model == 'DeepLab': logger.info("adopting Deeplabv2 base model..") model = Res_Deeplab(num_classes=args.num_classes, multi_scale=False) if args.restore_from[:4] == 'http': saved_state_dict = model_zoo.load_url(args.restore_from) else: saved_state_dict = torch.load(args.restore_from) new_params = model.state_dict().copy() for i in saved_state_dict: # Scale.layer5.conv2d_list.3.weight i_parts = i.split('.') # print i_parts if not args.num_classes == 19 or not i_parts[1] == 'layer5': new_params['.'.join(i_parts[1:])] = saved_state_dict[i] # print i_parts model.load_state_dict(new_params) optimizer = optim.SGD(model.optim_parameters(args), lr=args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) elif args.model == "FCN8S": logger.info("adopting FCN8S base model..") from pytorchgo.model.MyFCN8s import MyFCN8s model = MyFCN8s(n_class=NUM_CLASSES) vgg16 = torchfcn.models.VGG16(pretrained=True) model.copy_params_from_vgg16(vgg16) optimizer = optim.SGD(model.parameters(), lr=args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) else: raise ValueError model.train() model.cuda() cudnn.benchmark = True # init D model_D1 = FCDiscriminator(num_classes=args.num_classes) model_D2 = FCDiscriminator(num_classes=args.num_classes) model_D1.train() model_D1.cuda() model_D2.train() model_D2.cuda() if SOURCE_DATA == "GTA5": trainloader = data.DataLoader(GTA5DataSet( args.data_dir, args.data_list, max_iters=args.num_steps * args.iter_size * args.batch_size, crop_size=input_size, scale=args.random_scale, mirror=args.random_mirror, mean=IMG_MEAN), batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=True) trainloader_iter = enumerate(trainloader) elif SOURCE_DATA == "SYNTHIA": trainloader = data.DataLoader(SynthiaDataSet( args.data_dir, args.data_list, LABEL_LIST_PATH, max_iters=args.num_steps * args.iter_size * args.batch_size, crop_size=input_size, scale=args.random_scale, mirror=args.random_mirror, mean=IMG_MEAN), batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=True) trainloader_iter = enumerate(trainloader) else: raise ValueError targetloader = data.DataLoader(cityscapesDataSet( max_iters=args.num_steps * args.iter_size * args.batch_size, crop_size=input_size_target, scale=False, mirror=args.random_mirror, mean=IMG_MEAN, set=args.set), batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=True) targetloader_iter = enumerate(targetloader) # implement model.optim_parameters(args) to handle different models' lr setting optimizer.zero_grad() optimizer_D1 = optim.Adam(model_D1.parameters(), lr=args.learning_rate_D, betas=(0.9, 0.99)) optimizer_D1.zero_grad() optimizer_D2 = optim.Adam(model_D2.parameters(), lr=args.learning_rate_D, betas=(0.9, 0.99)) optimizer_D2.zero_grad() bce_loss = torch.nn.BCEWithLogitsLoss() interp = nn.Upsample(size=(input_size[1], input_size[0]), mode='bilinear') interp_target = nn.Upsample(size=(input_size_target[1], input_size_target[0]), mode='bilinear') # labels for adversarial training source_label = 0 target_label = 1 best_mIoU = 0 model_summary([model, model_D1, model_D2]) optimizer_summary([optimizer, optimizer_D1, optimizer_D2]) for i_iter in tqdm(range(args.num_steps_stop), total=args.num_steps_stop, desc="training"): loss_seg_value1 = 0 loss_adv_target_value1 = 0 loss_D_value1 = 0 loss_seg_value2 = 0 loss_adv_target_value2 = 0 loss_D_value2 = 0 optimizer.zero_grad() lr = adjust_learning_rate(optimizer, i_iter) optimizer_D1.zero_grad() optimizer_D2.zero_grad() lr_D1 = adjust_learning_rate_D(optimizer_D1, i_iter) lr_D2 = adjust_learning_rate_D(optimizer_D2, i_iter) for sub_i in range(args.iter_size): ######################### train G # don't accumulate grads in D for param in model_D1.parameters(): param.requires_grad = False for param in model_D2.parameters(): param.requires_grad = False # train with source _, batch = trainloader_iter.next() images, labels, _, _ = batch images = Variable(images).cuda() pred2 = model(images) pred2 = interp(pred2) loss_seg2 = loss_calc(pred2, labels) loss = loss_seg2 # proper normalization loss = loss / args.iter_size loss.backward() loss_seg_value2 += loss_seg2.data.cpu().numpy()[0] / args.iter_size # train with target _, batch = targetloader_iter.next() images, _, _, _ = batch images = Variable(images).cuda() pred_target2 = model(images) pred_target2 = interp_target(pred_target2) D_out2 = model_D2(F.softmax(pred_target2)) loss_adv_target2 = bce_loss( D_out2, Variable( torch.FloatTensor( D_out2.data.size()).fill_(source_label)).cuda()) loss = args.lambda_adv_target2 * loss_adv_target2 loss = loss / args.iter_size loss.backward() loss_adv_target_value2 += loss_adv_target2.data.cpu().numpy( )[0] / args.iter_size ################################## train D # bring back requires_grad for param in model_D1.parameters(): param.requires_grad = True for param in model_D2.parameters(): param.requires_grad = True # train with source pred2 = pred2.detach() D_out2 = model_D2(F.softmax(pred2)) loss_D2 = bce_loss( D_out2, Variable( torch.FloatTensor( D_out2.data.size()).fill_(source_label)).cuda()) loss_D2 = loss_D2 / args.iter_size / 2 loss_D2.backward() loss_D_value2 += loss_D2.data.cpu().numpy()[0] # train with target pred_target2 = pred_target2.detach() D_out2 = model_D2(F.softmax(pred_target2)) loss_D2 = bce_loss( D_out2, Variable( torch.FloatTensor( D_out2.data.size()).fill_(target_label)).cuda()) loss_D2 = loss_D2 / args.iter_size / 2 loss_D2.backward() loss_D_value2 += loss_D2.data.cpu().numpy()[0] optimizer.step() optimizer_D1.step() optimizer_D2.step() if i_iter % 100 == 0: logger.info( 'iter = {}/{},loss_seg1 = {:.3f} loss_seg2 = {:.3f} loss_adv1 = {:.3f}, loss_adv2 = {:.3f} loss_D1 = {:.3f} loss_D2 = {:.3f}, lr={:.7f}, lr_D={:.7f}, best miou16= {:.5f}' .format(i_iter, args.num_steps_stop, loss_seg_value1, loss_seg_value2, loss_adv_target_value1, loss_adv_target_value2, loss_D_value1, loss_D_value2, lr, lr_D1, best_mIoU)) if i_iter % args.save_pred_every == 0 and i_iter != 0: logger.info("saving snapshot.....") cur_miou16 = proceed_test(model, input_size) is_best = True if best_mIoU < cur_miou16 else False if is_best: best_mIoU = cur_miou16 torch.save( { 'iteration': i_iter, 'optim_state_dict': optimizer.state_dict(), 'optim_D1_state_dict': optimizer_D1.state_dict(), 'optim_D2_state_dict': optimizer_D2.state_dict(), 'model_state_dict': model.state_dict(), 'model_D1_state_dict': model_D1.state_dict(), 'model_D2_state_dict': model_D2.state_dict(), 'best_mean_iu': cur_miou16, }, osp.join(logger.get_logger_dir(), 'checkpoint.pth.tar')) if is_best: import shutil shutil.copy( osp.join(logger.get_logger_dir(), 'checkpoint.pth.tar'), osp.join(logger.get_logger_dir(), 'model_best.pth.tar')) if i_iter >= args.num_steps_stop - 1: break
type=float, help='Gamma update for SGD') parser.add_argument('--visdom', default=False, type=str2bool, help='Use visdom to for loss visualization') parser.add_argument('--gpu', default=0, type=int, help='gpu') args = parser.parse_args() if args.cuda and torch.cuda.is_available(): torch.set_default_tensor_type('torch.cuda.FloatTensor') else: torch.set_default_tensor_type('torch.FloatTensor') from pytorchgo.utils.pytorch_utils import set_gpu set_gpu(args.gpu) logger.info(args) start_iter = 0 ssd_net = build_ssd('train', args.dim, num_classes) net = ssd_net """ if args.cuda: net = torch.nn.DataParallel(ssd_net) cudnn.benchmark = True """ if args.resume: logger.info('Resuming training, loading {}...'.format(args.resume))
def train(args): logger.auto_set_dir() from pytorchgo.utils.pytorch_utils import set_gpu set_gpu(args.gpu) # Setup Dataloader from pytorchgo.augmentation.segmentation import SubtractMeans, PIL2NP, RGB2BGR, PIL_Scale, Value255to0, ToLabel from torchvision.transforms import Compose, Normalize, ToTensor img_transform = Compose([ # notice the order!!! PIL_Scale(train_img_shape, Image.BILINEAR), PIL2NP(), RGB2BGR(), SubtractMeans(), ToTensor(), ]) label_transform = Compose([ PIL_Scale(train_img_shape, Image.NEAREST), PIL2NP(), Value255to0(), ToLabel() ]) val_img_transform = Compose([ PIL_Scale(train_img_shape, Image.BILINEAR), PIL2NP(), RGB2BGR(), SubtractMeans(), ToTensor(), ]) val_label_transform = Compose([ PIL_Scale(train_img_shape, Image.NEAREST), PIL2NP(), ToLabel(), # notice here, training, validation size difference, this is very tricky. ]) from pytorchgo.dataloader.pascal_voc_loader import pascalVOCLoader as common_voc_loader train_loader = common_voc_loader(split="train_aug", epoch_scale=1, img_transform=img_transform, label_transform=label_transform) validation_loader = common_voc_loader(split='val', img_transform=val_img_transform, label_transform=val_label_transform) n_classes = train_loader.n_classes trainloader = data.DataLoader(train_loader, batch_size=args.batch_size, num_workers=8, shuffle=True) valloader = data.DataLoader(validation_loader, batch_size=args.batch_size, num_workers=8) # Setup Metrics running_metrics = runningScore(n_classes) # Setup Model from pytorchgo.model.deeplabv1 import VGG16_LargeFoV from pytorchgo.model.deeplab_resnet import Res_Deeplab model = Res_Deeplab(NoLabels=n_classes, pretrained=True, output_all=False) from pytorchgo.utils.pytorch_utils import model_summary, optimizer_summary model_summary(model) def get_validation_miou(model): model.eval() for i_val, (images_val, labels_val) in tqdm(enumerate(valloader), total=len(valloader), desc="validation"): if i_val > 5 and is_debug == 1: break if i_val > 200 and is_debug == 2: break #img_large = torch.Tensor(np.zeros((1, 3, 513, 513))) #img_large[:, :, :images_val.shape[2], :images_val.shape[3]] = images_val output = model(Variable(images_val, volatile=True).cuda()) output = output pred = output.data.max(1)[1].cpu().numpy() #pred = output[:, :images_val.shape[2], :images_val.shape[3]] gt = labels_val.numpy() running_metrics.update(gt, pred) score, class_iou = running_metrics.get_scores() for k, v in score.items(): logger.info("{}: {}".format(k, v)) running_metrics.reset() return score['Mean IoU : \t'] model.cuda() # Check if model has custom optimizer / loss if hasattr(model, 'optimizer'): logger.warn("don't have customzed optimizer, use default setting!") optimizer = model.module.optimizer else: optimizer = torch.optim.SGD(model.optimizer_params(args.l_rate), lr=args.l_rate, momentum=0.99, weight_decay=5e-4) optimizer_summary(optimizer) if args.resume is not None: if os.path.isfile(args.resume): logger.info( "Loading model and optimizer from checkpoint '{}'".format( args.resume)) checkpoint = torch.load(args.resume) model.load_state_dict(checkpoint['model_state']) optimizer.load_state_dict(checkpoint['optimizer_state']) logger.info("Loaded checkpoint '{}' (epoch {})".format( args.resume, checkpoint['epoch'])) else: logger.info("No checkpoint found at '{}'".format(args.resume)) best_iou = 0 logger.info('start!!') for epoch in tqdm(range(args.n_epoch), total=args.n_epoch): model.train() for i, (images, labels) in tqdm(enumerate(trainloader), total=len(trainloader), desc="training epoch {}/{}".format( epoch, args.n_epoch)): if i > 10 and is_debug == 1: break if i > 200 and is_debug == 2: break cur_iter = i + epoch * len(trainloader) cur_lr = adjust_learning_rate(optimizer, args.l_rate, cur_iter, args.n_epoch * len(trainloader), power=0.9) images = Variable(images.cuda()) labels = Variable(labels.cuda()) optimizer.zero_grad() outputs = model(images) # use fusion score loss = CrossEntropyLoss2d_Seg(input=outputs, target=labels, class_num=n_classes) #for i in range(len(outputs) - 1): #for i in range(1): # loss = loss + CrossEntropyLoss2d_Seg(input=outputs[i], target=labels, class_num=n_classes) loss.backward() optimizer.step() if (i + 1) % 100 == 0: logger.info( "Epoch [%d/%d] Loss: %.4f, lr: %.7f, best mIoU: %.7f" % (epoch + 1, args.n_epoch, loss.data[0], cur_lr, best_iou)) cur_miou = get_validation_miou(model) if cur_miou >= best_iou: best_iou = cur_miou state = { 'epoch': epoch + 1, 'mIoU': best_iou, 'model_state': model.state_dict(), 'optimizer_state': optimizer.state_dict(), } torch.save(state, os.path.join(logger.get_logger_dir(), "best_model.pth"))
parser.add_argument('--cuda', default=True, type=str2bool, help='Use cuda to train model') parser.add_argument('--voc_root', default=Sim_ROOT, help='Location of VOC root directory') parser.add_argument('--cleanup', default=True, type=str2bool, help='Cleanup and remove results files following eval') args = parser.parse_args() from pytorchgo.utils.pytorch_utils import set_gpu set_gpu(2) if not os.path.exists(args.save_folder): os.mkdir(args.save_folder) if torch.cuda.is_available(): if args.cuda: torch.set_default_tensor_type('torch.cuda.FloatTensor') if not args.cuda: print( "WARNING: It looks like you have a CUDA device, but aren't using \ CUDA. Run with --cuda for optimal eval speed.") torch.set_default_tensor_type('torch.FloatTensor') else: torch.set_default_tensor_type('torch.FloatTensor')