def main(): logger.auto_set_dir() global args parser = argparse.ArgumentParser() parser.add_argument('--dataroot', default='/home/hutao/lab/pytorchgo/example/ROAD/data', help='Path to source dataset') parser.add_argument('--batchSize', type=int, default=1, help='input batch size') parser.add_argument('--max_epoch', type=int, default=max_epoch, help='Number of training iterations') parser.add_argument('--optimizer', type=str, default='Adam', help='Optimizer to use | SGD, Adam') parser.add_argument('--lr', type=float, default=base_lr, help='learning rate') parser.add_argument('--momentum', type=float, default=0.99, help='Momentum for SGD') parser.add_argument('--beta1', type=float, default=0.9, help='beta1 for adam. default=0.5') parser.add_argument('--weight_decay', type=float, default=0.0005, help='Weight decay') parser.add_argument('--model', type=str, default='vgg16') parser.add_argument('--gpu', type=int, default=1) args = parser.parse_args() print(args) gpu = args.gpu os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu) cuda = torch.cuda.is_available() torch.manual_seed(1337) if cuda: logger.info("random seed 1337") torch.cuda.manual_seed(1337) # Defining data loaders kwargs = { 'num_workers': 4, 'pin_memory': True, 'drop_last': True } if cuda else {} train_loader = torch.utils.data.DataLoader(torchfcn.datasets.SYNTHIA( 'SYNTHIA', args.dataroot, split='train', transform=True, image_size=image_size), batch_size=args.batchSize, shuffle=True, **kwargs) val_loader = torch.utils.data.DataLoader(torchfcn.datasets.CityScapes( 'cityscapes', args.dataroot, split='val', transform=True, image_size=image_size), batch_size=1, shuffle=False) target_loader = torch.utils.data.DataLoader(torchfcn.datasets.CityScapes( 'cityscapes', args.dataroot, split='train', transform=True, image_size=image_size), batch_size=args.batchSize, shuffle=True) if cuda: torch.set_default_tensor_type('torch.cuda.FloatTensor') if args.model == "vgg16": model = origin_model = torchfcn.models.Seg_model(n_class=class_num) vgg16 = torchfcn.models.VGG16(pretrained=True) model.copy_params_from_vgg16(vgg16) model_fix = torchfcn.models.Seg_model(n_class=class_num) model_fix.copy_params_from_vgg16(vgg16) for param in model_fix.parameters(): param.requires_grad = False elif args.model == "deeplabv2": # TODO may have problem! model = origin_model = torchfcn.models.Res_Deeplab( num_classes=class_num, image_size=image_size) saved_state_dict = model_zoo.load_url(Deeplabv2_restore_from) new_params = model.state_dict().copy() for i in saved_state_dict: # Scale.layer5.conv2d_list.3.weight i_parts = i.split('.') # print i_parts if not class_num == 19 or not i_parts[1] == 'layer5': new_params['.'.join(i_parts[1:])] = saved_state_dict[i] # print i_parts model.load_state_dict(new_params) model_fix = torchfcn.models.Res_Deeplab(num_classes=class_num, image_size=image_size) model_fix.load_state_dict(new_params) else: raise ValueError("only support vgg16, deeplabv2!") netD = torchfcn.models.Domain_classifer(reverse=True) netD.apply(weights_init) model_summary([model, netD]) if cuda: model = model.cuda() netD = netD.cuda() # Defining optimizer if args.optimizer == 'SGD': raise ValueError("SGD is not prepared well..") optim = torch.optim.SGD([ { 'params': get_parameters(model, bias=False) }, { 'params': get_parameters(model, bias=True), 'lr': args.lr * 2, 'weight_decay': args.weight_decay }, ], lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay) elif args.optimizer == 'Adam': if args.model == "vgg16": optim = torch.optim.Adam([ { 'params': get_parameters(model, bias=False), 'weight_decay': args.weight_decay }, { 'params': get_parameters(model, bias=True), 'lr': args.lr * 2, 'weight_decay': args.weight_decay }, ], lr=args.lr, betas=(args.beta1, 0.999)) elif args.model == "deeplabv2": optim = torch.optim.Adam(origin_model.optim_parameters(args.lr), lr=args.lr, betas=(args.beta1, 0.999), weight_decay=args.weight_decay) else: raise else: raise ValueError('Invalid optmizer argument. Has to be SGD or Adam') optimD = torch.optim.Adam(netD.parameters(), lr=dis_lr, weight_decay=args.weight_decay, betas=(0.7, 0.999)) optimizer_summary([optim, optimD]) trainer = MyTrainer_ROAD(cuda=cuda, model=model, model_fix=model_fix, netD=netD, optimizer=optim, optimizerD=optimD, train_loader=train_loader, target_loader=target_loader, val_loader=val_loader, batch_size=args.batchSize, image_size=image_size, loss_print_interval=LOSS_PRINT_INTERVAL) trainer.epoch = 0 trainer.iteration = 0 trainer.train()
def main(): logger.auto_set_dir() parser = argparse.ArgumentParser() parser.add_argument('--dataroot', default='/home/hutao/lab/pytorchgo/example/ROAD/data', help='Path to source dataset') parser.add_argument('--batchSize', type=int, default=1, help='input batch size') parser.add_argument('--num_iters', type=int, default=100000, help='Number of training iterations') parser.add_argument('--optimizer', type=str, default='Adam', help='Optimizer to use | SGD, Adam') parser.add_argument('--lr', type=float, default=1.0e-5, help='learning rate') parser.add_argument('--momentum', type=float, default=0.99, help='Momentum for SGD') parser.add_argument('--beta1', type=float, default=0.9, help='beta1 for adam. default=0.5') parser.add_argument('--weight_decay', type=float, default=0.0005, help='Weight decay') parser.add_argument( '--interval_validate', type=int, default=3000, help= 'Period for validation. Model is validated every interval_validate iterations' ) parser.add_argument('--gpu', type=int, default=2) args = parser.parse_args() print(args) gpu = args.gpu os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu) cuda = torch.cuda.is_available() torch.manual_seed(1337) if cuda: logger.info("random seed 1337") torch.cuda.manual_seed(1337) # Defining data loaders kwargs = { 'num_workers': 4, 'pin_memory': True, 'drop_last': True } if cuda else {} train_loader = torch.utils.data.DataLoader(torchfcn.datasets.SYNTHIA( 'SYNTHIA', args.dataroot, split='train', transform=True, image_size=image_size), batch_size=args.batchSize, shuffle=True, **kwargs) val_loader = torch.utils.data.DataLoader(torchfcn.datasets.CityScapes( 'cityscapes', args.dataroot, split='val', transform=True, image_size=image_size), batch_size=1, shuffle=False) target_loader = torch.utils.data.DataLoader(torchfcn.datasets.CityScapes( 'cityscapes', args.dataroot, split='train', transform=True, image_size=image_size), batch_size=args.batchSize, shuffle=True) # Defining models start_epoch = 0 start_iteration = 0 if cuda: torch.set_default_tensor_type('torch.cuda.FloatTensor') model = torchfcn.models.Seg_model(n_class=class_num) model_fix = torchfcn.models.Seg_model(n_class=class_num) for param in model_fix.parameters(): param.requires_grad = False netD = torchfcn.models.Domain_classifer(reverse=True) netD.apply(weights_init) model_summary(model_fix) model_summary(netD) vgg16 = torchfcn.models.VGG16(pretrained=True) model.copy_params_from_vgg16(vgg16) if cuda: model = model.cuda() netD = netD.cuda() # Defining optimizer if args.optimizer == 'SGD': optim = torch.optim.SGD([ { 'params': get_parameters(model, bias=False) }, { 'params': get_parameters(model, bias=True), 'lr': args.lr * 2, 'weight_decay': args.weight_decay }, ], lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay) elif args.optimizer == 'Adam': optim = torch.optim.Adam([ { 'params': get_parameters(model, bias=False) }, { 'params': get_parameters(model, bias=True), 'lr': args.lr * 2 }, ], lr=args.lr, betas=(args.beta1, 0.999)) else: raise ValueError('Invalid optmizer argument. Has to be SGD or Adam') optimD = torch.optim.Adam(netD.parameters(), lr=0.0001, betas=(0.7, 0.999)) trainer = MyTrainer_ROAD(cuda=cuda, model=model, model_fix=model_fix, netD=netD, optimizer=optim, optimizerD=optimD, train_loader=train_loader, target_loader=target_loader, val_loader=val_loader, max_iter=args.num_iters, batch_size=args.batchSize, interval_validate=args.interval_validate, image_size=image_size) trainer.epoch = start_epoch trainer.iteration = start_iteration trainer.train()
def main(): parser = argparse.ArgumentParser() parser.add_argument('--dataroot', required=True, help='Path to source dataset') parser.add_argument('--batchSize', type=int, default=1, help='input batch size') parser.add_argument('--num_iters', type=int, default=100000, help='Number of training iterations') parser.add_argument('--optimizer', type=str, default='Adam', help='Optimizer to use | SGD, Adam') parser.add_argument('--lr', type=float, default=1.0e-5, help='learning rate') parser.add_argument('--momentum', type=float, default=0.99, help='Momentum for SGD') parser.add_argument('--beta1', type=float, default=0.9, help='beta1 for adam. default=0.5') parser.add_argument('--weight_decay', type=float, default=0.0005, help='Weight decay') parser.add_argument( '--interval_validate', type=int, default=500, help= 'Period for validation. Model is validated every interval_validate iterations' ) parser.add_argument( '--resume', default='', help= "path to the current checkpoint for resuming training. Do not specify if model has to be trained from scratch" ) parser.add_argument('--logdir', default='logs', help="Path to the directory to store log files") parser.add_argument('--method', default='LSD', help="Method to use for training | LSD, sourceonly") parser.add_argument('--l1_weight', type=float, default=1, help='L1 weight') parser.add_argument('--adv_weight', type=float, default=0.1, help='Adv_weight') parser.add_argument('--c_weight', type=float, default=0.1, help='C_weight') parser.add_argument('--gpu', type=int, required=True) args = parser.parse_args() print(args) gpu = args.gpu out = get_log_dir(args.logdir, args.method, args.lr, args.optimizer) resume = args.resume os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu) cuda = torch.cuda.is_available() torch.manual_seed(1337) if cuda: torch.cuda.manual_seed(1337) # Defining data loaders image_size = [640, 320] kwargs = {'num_workers': 4, 'pin_memory': True} if cuda else {} train_loader = torch.utils.data.DataLoader(torchfcn.datasets.SYNTHIA( 'SYNTHIA', args.dataroot, split='train', transform=True, image_size=image_size), batch_size=args.batchSize, shuffle=True, **kwargs) val_loader = torch.utils.data.DataLoader(torchfcn.datasets.SYNTHIA( 'SYNTHIA', args.dataroot, split='val', transform=True, image_size=image_size), batch_size=args.batchSize, shuffle=False, **kwargs) target_loader = torch.utils.data.DataLoader(torchfcn.datasets.CityScapes( 'cityscapes', args.dataroot, split='train', transform=True, image_size=image_size), batch_size=1, shuffle=True) # Defining models start_epoch = 0 start_iteration = 0 if args.method == 'sourceonly': model = torchfcn.models.FCN8s_sourceonly(n_class=19) elif args.method == 'LSD': model = torchfcn.models.FCN8s_LSD(n_class=19) netG = torchfcn.models._netG() netD = torchfcn.models._netD() netD.apply(weights_init) netG.apply(weights_init) else: raise ValueError('method argument can be either sourceonly or LSD') if resume: checkpoint = torch.load(resume) model.load_state_dict(checkpoint['model_state_dict']) start_epoch = checkpoint['epoch'] start_iteration = checkpoint['iteration'] else: vgg16 = torchfcn.models.VGG16(pretrained=True) model.copy_params_from_vgg16(vgg16) if cuda: model = model.cuda() if args.method == 'LSD': netD = netD.cuda() netG = netG.cuda() # Defining optimizer if args.optimizer == 'SGD': optim = torch.optim.SGD([ { 'params': get_parameters(model, bias=False) }, { 'params': get_parameters(model, bias=True), 'lr': args.lr * 2, 'weight_decay': args.weight_decay }, ], lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay) elif args.optimizer == 'Adam': optim = torch.optim.Adam([ { 'params': get_parameters(model, bias=False) }, { 'params': get_parameters(model, bias=True), 'lr': args.lr * 2 }, ], lr=args.lr, betas=(args.beta1, 0.999)) else: raise ValueError('Invalid optmizer argument. Has to be SGD or Adam') if args.method == 'LSD': optimD = torch.optim.Adam(netD.parameters(), lr=0.0001, betas=(0.7, 0.999)) optimG = torch.optim.Adam(netG.parameters(), lr=0.0001, betas=(0.7, 0.999)) if resume: optim.load_state_dict(checkpoint['optim_state_dict']) # Defining trainer object, and start training if args.method == 'sourceonly': trainer = torchfcn.Trainer_sourceonly( cuda=cuda, model=model, optimizer=optim, train_loader=train_loader, target_loader=target_loader, val_loader=val_loader, out=out, max_iter=args.num_iters, interval_validate=args.interval_validate) trainer.epoch = start_epoch trainer.iteration = start_iteration trainer.train() elif args.method == 'LSD': trainer = torchfcn.Trainer_LSD( cuda=cuda, model=model, netD=netD, netG=netG, optimizer=optim, optimizerD=optimD, optimizerG=optimG, train_loader=train_loader, target_loader=target_loader, l1_weight=args.l1_weight, adv_weight=args.adv_weight, c_weight=args.c_weight, val_loader=val_loader, out=out, max_iter=args.num_iters, interval_validate=args.interval_validate, image_size=image_size) trainer.epoch = start_epoch trainer.iteration = start_iteration trainer.train()
def main(): parser = argparse.ArgumentParser() parser.add_argument('--dataroot', required=True, help='Path to source dataset') parser.add_argument('--batchSize', type=int, default=1, help='input batch size') parser.add_argument('--num_iters', type=int, default=100000, help='Number of training iterations') parser.add_argument('--optimizer', type=str, default='Adam', help='Optimizer to use | SGD, Adam') parser.add_argument('--lr', type=float, default=1.0e-5, help='learning rate') parser.add_argument('--momentum', type=float, default=0.99, help='Momentum for SGD') parser.add_argument('--beta1', type=float, default=0.9, help='beta1 for adam. default=0.5') parser.add_argument('--weight_decay', type=float, default=0.0005, help='Weight decay') parser.add_argument('--interval_validate', type=int, default=500, help='Period for validation. Model is validated every interval_validate iterations') parser.add_argument('--resume', default='', help="path to the current checkpoint for resuming training. Do not specify if model has to be trained from scratch") parser.add_argument('--logdir', default='logs', help="Path to the directory to store log files") parser.add_argument('--method', default='LSD', help="Method to use for training | LSD, sourceonly") parser.add_argument('--l1_weight', type=float, default=1, help='L1 weight') parser.add_argument('--adv_weight', type=float, default=0.1, help='Adv_weight') parser.add_argument('--c_weight', type=float, default=0.1, help='C_weight') parser.add_argument('--gpu', type=int, required=True) args = parser.parse_args() print(args) gpu = args.gpu out = get_log_dir(args.logdir, args.method, args.lr, args.optimizer) resume = args.resume os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu) cuda = torch.cuda.is_available() torch.manual_seed(1337) if cuda: torch.cuda.manual_seed(1337) # Defining data loaders image_size=[640, 320] kwargs = {'num_workers': 4, 'pin_memory': True} if cuda else {} train_loader = torch.utils.data.DataLoader( torchfcn.datasets.SYNTHIA('SYNTHIA', args.dataroot, split='train', transform=True, image_size=image_size), batch_size=args.batchSize, shuffle=True, **kwargs) val_loader = torch.utils.data.DataLoader( torchfcn.datasets.SYNTHIA('SYNTHIA', args.dataroot, split='val', transform=True, image_size=image_size), batch_size=args.batchSize, shuffle=False, **kwargs) target_loader = torch.utils.data.DataLoader( torchfcn.datasets.CityScapes('cityscapes', args.dataroot, split='train', transform=True, image_size=image_size), batch_size=1, shuffle=True) # Defining models start_epoch = 0 start_iteration = 0 if args.method == 'sourceonly': model = torchfcn.models.FCN8s_sourceonly(n_class=19) elif args.method == 'LSD': model = torchfcn.models.FCN8s_LSD(n_class=19) netG = torchfcn.models._netG() netD = torchfcn.models._netD() netD.apply(weights_init) netG.apply(weights_init) else: raise ValueError('method argument can be either sourceonly or LSD') if resume: checkpoint = torch.load(resume) model.load_state_dict(checkpoint['model_state_dict']) start_epoch = checkpoint['epoch'] start_iteration = checkpoint['iteration'] else: vgg16 = torchfcn.models.VGG16(pretrained=True) model.copy_params_from_vgg16(vgg16) if cuda: model = model.cuda() if args.method == 'LSD': netD = netD.cuda() netG = netG.cuda() # Defining optimizer if args.optimizer == 'SGD': optim = torch.optim.SGD( [ {'params': get_parameters(model, bias=False)}, {'params': get_parameters(model, bias=True), 'lr': args.lr * 2, 'weight_decay': args.weight_decay}, ], lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay) elif args.optimizer == 'Adam': optim = torch.optim.Adam( [ {'params': get_parameters(model, bias=False)}, {'params': get_parameters(model, bias=True), 'lr': args.lr * 2}, ], lr=args.lr, betas=(args.beta1, 0.999)) else: raise ValueError('Invalid optmizer argument. Has to be SGD or Adam') if args.method == 'LSD': optimD = torch.optim.Adam(netD.parameters(), lr=0.0001, betas=(0.7, 0.999)) optimG = torch.optim.Adam(netG.parameters(), lr=0.0001, betas=(0.7, 0.999)) if resume: optim.load_state_dict(checkpoint['optim_state_dict']) # Defining trainer object, and start training if args.method == 'sourceonly': trainer = torchfcn.Trainer_sourceonly( cuda=cuda, model=model, optimizer=optim, train_loader=train_loader, target_loader=target_loader, val_loader=val_loader, out=out, max_iter=args.num_iters, interval_validate=args.interval_validate ) trainer.epoch = start_epoch trainer.iteration = start_iteration trainer.train() elif args.method == 'LSD': trainer = torchfcn.Trainer_LSD( cuda=cuda, model=model, netD=netD, netG=netG, optimizer=optim, optimizerD=optimD, optimizerG=optimG, train_loader=train_loader, target_loader=target_loader, l1_weight=args.l1_weight, adv_weight=args.adv_weight, c_weight=args.c_weight, val_loader=val_loader, out=out, max_iter=args.num_iters, interval_validate=args.interval_validate, image_size=image_size ) trainer.epoch = start_epoch trainer.iteration = start_iteration trainer.train()