def main(): cfg = configurations cuda = torch.cuda.is_available() torch.manual_seed(1337) if cuda: torch.cuda.manual_seed(1337) train_dataloader = torch.utils.data.DataLoader( ImageList(fileList="/home/yaohuaxu1/FCN/train.txt", transform=transforms.Compose([ transforms.ToTensor(), ])), shuffle=False, num_workers=8, batch_size=1) model = FCN8s() start_epoch = 0 start_iteration = 0 fcn16s = FCN16s() fcn16s.load_state_dict(torch.load(cfg[1]['fcn16s_pretrained_model'])) model.copy_params_from_fcn32s(fcn16s) if cuda: model = model.cuda() optim = torch.optim.SGD( [ {'params': get_parameters(model, bias=False)}, {'params': get_parameters(model, bias=True), 'lr': cfg[1]['lr'] * 2, 'weight_decay': 0}, ], lr=cfg[1]['lr'], momentum=cfg[1]['momentum'], weight_decay=cfg[1]['weight_decay']) trainer = Trainer(cuda=False, model=model, optimizer=optim, train_loader=train_dataloader, val_loader=train_dataloader, max_iter=cfg[1]['max_iteration'], size_average=False ) trainer.epoch = start_epoch trainer.iteration = start_iteration trainer.train()
def main(): parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) parser.add_argument('-g', '--gpu', type=int, required=True, help='gpu id') parser.add_argument('--resume', help='checkpoint path') # configurations (same configuration as original work) # https://github.com/shelhamer/fcn.berkeleyvision.org parser.add_argument( '--max-iteration', type=int, default=100000, help='max iteration' ) parser.add_argument( '--lr', type=float, default=1.0e-12, help='learning rate', ) parser.add_argument( '--weight-decay', type=float, default=0.0005, help='weight decay', ) parser.add_argument( '--momentum', type=float, default=0.99, help='momentum', ) parser.add_argument( '--pretrained-model', help='pretrained model of FCN32s', ) args = parser.parse_args() args.model = 'FCN16s' args.git_hash = git_hash() now = datetime.datetime.now() args.out = osp.join(here, 'logs', now.strftime('%Y%m%d_%H%M%S.%f')) os.makedirs(args.out) with open(osp.join(args.out, 'config.yaml'), 'w') as f: yaml.safe_dump(args.__dict__, f, default_flow_style=False) os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu) cuda = torch.cuda.is_available() torch.manual_seed(1337) if cuda: torch.cuda.manual_seed(1337) # 1. dataset #root = osp.expanduser('~/data/datasets') kwargs = {'num_workers': 4, 'pin_memory': True} if cuda else {} train_loader = torch.utils.data.DataLoader( torchfcn.datasets.SatelliteDataset(split='train', transform=True), batch_size=1, shuffle=True, **kwargs) val_loader = torch.utils.data.DataLoader( torchfcn.datasets.SatelliteDataset( split='val', transform=True), batch_size=1, shuffle=False, **kwargs) # 2. model model = torchfcn.models.FCN16s(n_class=2) start_epoch = 0 start_iteration = 0 # if args.resume: # checkpoint = torch.load(args.resume) # model.load_state_dict(checkpoint['model_state_dict']) # start_epoch = checkpoint['epoch'] # start_iteration = checkpoint['iteration'] # else: # fcn32s = torchfcn.models.FCN32s() # state_dict = torch.load(args.pretrained_model) # try: # fcn32s.load_state_dict(state_dict) # except RuntimeError: # fcn32s.load_state_dict(state_dict['model_state_dict']) # model.copy_params_from_fcn32s(fcn32s) if cuda: model = model.cuda() # 3. optimizer optim = torch.optim.SGD( [ {'params': get_parameters(model, bias=False)}, {'params': get_parameters(model, bias=True), 'lr': args.lr * 2, 'weight_decay': 0}, ], lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay) if args.resume: optim.load_state_dict(checkpoint['optim_state_dict']) trainer = torchfcn.Trainer( cuda=cuda, model=model, optimizer=optim, train_loader=train_loader, val_loader=val_loader, out=args.out, max_iter=args.max_iteration, interval_validate=4000, ) trainer.epoch = start_epoch trainer.iteration = start_iteration trainer.train()
def main(): parser = argparse.ArgumentParser() parser.add_argument('-g', '--gpu', type=int, required=True) parser.add_argument('-c', '--config', type=int, default=1, choices=configurations.keys()) parser.add_argument('--instance', action='store_true', help='Use instance labels, else use class labels.') parser.add_argument('--resume', help='Checkpoint path') args = parser.parse_args() gpu = args.gpu cfg = configurations[args.config] out = get_log_dir('fcn8s', args.config, cfg) print 'Running experiment {}'.format(out) resume = args.resume os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu) cuda = torch.cuda.is_available() torch.manual_seed(1337) if cuda: torch.cuda.manual_seed(1337) # 1. dataset root = osp.expanduser('~/data/datasets') kwargs = {'num_workers': 4, 'pin_memory': True} if cuda else {} if args.instance: print 'Beginning instance segmentation.' train_dataset = torchfcn.datasets.SBDInstSeg val_dataset = torchfcn.datasets.VOC2011InstSeg else: print 'Beginning semantic segmentation.' train_dataset = torchfcn.datasets.SBDClassSeg val_dataset = torchfcn.datasets.VOC2011ClassSeg train_loader = torch.utils.data.DataLoader( train_dataset(root, split='train', transform=True), batch_size=1, shuffle=True, **kwargs) val_loader = torch.utils.data.DataLoader( val_dataset(root, split='seg11valid', transform=True), batch_size=1, shuffle=False, **kwargs) # 2. model model = torchfcn.models.FCN8s(n_class=cfg['num_classes']) start_epoch = 0 start_iteration = 0 if resume: checkpoint = torch.load(resume) model.load_state_dict(checkpoint['model_state_dict']) start_epoch = checkpoint['epoch'] start_iteration = checkpoint['iteration'] else: fcn16s = torchfcn.models.FCN16s() fcn16s.load_state_dict(torch.load(cfg['fcn16s_pretrained_model'])) model.copy_params_from_fcn16s(fcn16s) if cuda: model = model.cuda() # 3. optimizer optim = torch.optim.SGD( [ {'params': get_parameters(model, bias=False)}, {'params': get_parameters(model, bias=True), 'lr': cfg['lr'] * 2, 'weight_decay': 0}, ], lr=cfg['lr'], momentum=cfg['momentum'], weight_decay=cfg['weight_decay']) if resume: optim.load_state_dict(checkpoint['optim_state_dict']) tensorboard_writer = SummaryWriter(log_dir=out, comment='') trainer = torchfcn.Trainer( cuda=cuda, model=model, optimizer=optim, train_loader=train_loader, val_loader=val_loader, out=out, max_iter=cfg['max_iteration'], interval_validate=cfg.get('interval_validate', len(train_loader)), tensorboard_writer=tensorboard_writer, interval_train_loss=100, n_class=cfg['num_classes'] ) trainer.epoch = start_epoch trainer.iteration = start_iteration trainer.train()
def main(): # parser = argparse.ArgumentParser() # parser.add_argument('-g', '--gpu', type=int) # parser.add_argument('-c', '--config', type=int, default=1, # choices=configurations.keys()) # parser.add_argument('--resume', help='Checkpoint path') # args = parser.parse_args() gpu = 0 cfg = configurations[1] out = get_log_dir('fcn8s-atonce', 1, cfg) resume = None os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu) cuda = torch.cuda.is_available() torch.manual_seed(1337) if cuda: torch.cuda.manual_seed(1337) # 1. dataset root = osp.expanduser('~/data/datasets') kwargs = {'num_workers': 4, 'pin_memory': True} if cuda else {} train_loader = torch.utils.data.DataLoader(torchfcn.datasets.SBDClassSeg( root, split='train', transform=True), batch_size=1, shuffle=True, **kwargs) val_loader = torch.utils.data.DataLoader(torchfcn.datasets.VOC2011ClassSeg( root, split='seg11valid', transform=True), batch_size=1, shuffle=False, **kwargs) # 2. model model3 = torchfcn.models.FCN8sAtOnce(n_class=21) model = model2 model2.set_mode(t) start_epoch = 0 start_iteration = 0 if resume: checkpoint = torch.load(resume) model.load_state_dict(checkpoint['model_state_dict']) start_epoch = checkpoint['epoch'] start_iteration = checkpoint['iteration'] else: vgg16 = torchfcn.models.VGG16(pretrained=True) model.copy_params_from_vgg16(vgg16) if cuda: model = model.cuda() # 3. optimizer st() optim = torch.optim.SGD([ { 'params': get_parameters(model, bias=False) }, { 'params': get_parameters(model, bias=True), 'lr': cfg['lr'] * 2, 'weight_decay': 0 }, ], lr=cfg['lr'], momentum=cfg['momentum'], weight_decay=cfg['weight_decay']) if resume: optim.load_state_dict(checkpoint['optim_state_dict']) trainer = Trainer( cuda=cuda, model=model, optimizer=optim, train_loader=train_loader, val_loader=val_loader, out=out, max_iter=cfg['max_iteration'], interval_validate=cfg.get('interval_validate', len(train_loader)), ) trainer.epoch = start_epoch trainer.iteration = start_iteration trainer.train()
def main(): parser = argparse.ArgumentParser() parser.add_argument('-g', '--gpu', type=str, required=True) parser.add_argument('-c', '--config', type=int, default=1, choices=configurations.keys()) parser.add_argument('--resume', help='Checkpoint path') args = parser.parse_args() gpu = args.gpu cfg = configurations[args.config] out = get_log_dir('fcn8s-atonce', args.config, cfg) resume = args.resume os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu) cuda = torch.cuda.is_available() torch.manual_seed(1337) if cuda: torch.cuda.manual_seed(1337) if torch.cuda.device_count() == 1: batch_size = 1 else: batch_size = 2 * torch.cuda.device_count() # 1. dataset root = osp.expanduser('~/data/datasets') kwargs = {'num_workers': 4, 'pin_memory': True} if cuda else {} mix_loader = torch.utils.data.DataLoader( torchfcn.datasets.CityScapesClassSeg( root, split=['train', 'val'], transform=True, preprocess=False, ), batch_size=batch_size, shuffle=True, **kwargs) train_loader = torch.utils.data.DataLoader( torchfcn.datasets.CityScapesClassSeg( root, split=['train'], transform=True, preprocess=False, ), batch_size=batch_size, shuffle=True, **kwargs) val_loader = torch.utils.data.DataLoader( torchfcn.datasets.CityScapesClassSeg( root, split=['val'], transform=True, preprocess=False, ), batch_size=batch_size, shuffle=False, **kwargs) # train_loader = torch.utils.data.DataLoader( # torchfcn.datasets.SBDClassSeg(root, split='train', transform=True), # batch_size=1, shuffle=True, **kwargs) # val_loader = torch.utils.data.DataLoader( # torchfcn.datasets.VOC2011ClassSeg( # root, split='seg11valid', transform=True), # batch_size=1, shuffle=False, **kwargs) # 2. model model = torchfcn.models.FCN8sAtOnce(n_class=20) start_epoch = 0 start_iteration = 0 if resume: checkpoint = torch.load(resume) model.load_state_dict(checkpoint['model_state_dict']) start_epoch = checkpoint['epoch'] start_iteration = checkpoint['iteration'] else: vgg16 = torchfcn.models.VGG16(pretrained=True) model.copy_params_from_vgg16(vgg16) if cuda: if torch.cuda.device_count() == 1: model = model.cuda() else: model = torch.nn.DataParallel(model).cuda() # 3. optimizer optim = torch.optim.Adam( [ { 'params': get_parameters(model, bias=False) }, { 'params': get_parameters(model, bias=True), 'lr': cfg['lr'] * 2, 'weight_decay': 0 }, ], lr=cfg['lr'], # momentum=cfg['momentum'], weight_decay=cfg['weight_decay']) if resume: optim.load_state_dict(checkpoint['optim_state_dict']) trainer = torchfcn.Trainer( cuda=cuda, model=model, optimizer=optim, train_loader=train_loader, val_loader=val_loader, mix_loader=mix_loader, out=out, nEpochs=5, max_iter=cfg['max_iteration'], ) trainer.epoch = start_epoch trainer.iteration = start_iteration trainer.train()
def main(): parser = argparse.ArgumentParser() parser.add_argument('-g', '--gpu', type=int, required=True) parser.add_argument('-c', '--config', type=int, default=1, choices=configurations.keys()) parser.add_argument('--resume', help='Checkpoint path') args = parser.parse_args() gpu = args.gpu cfg = configurations[args.config] out = get_log_dir('fcn16s', args.config, cfg) resume = args.resume os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu) cuda = torch.cuda.is_available() torch.manual_seed(1337) if cuda: torch.cuda.manual_seed(1337) # 1. dataset root = osp.expanduser( '/media/zhi/Drive3/KITTI/rwth_kitti_semantics_dataset') kwargs = {'num_workers': 4, 'pin_memory': True} if cuda else {} train_loader = torch.utils.data.DataLoader(torchfcn.datasets.KittiClassSeg( root, split='train', transform=True), batch_size=1, shuffle=True, **kwargs) val_loader = torch.utils.data.DataLoader( torchfcn.datasets.KittiClassSegValidate(root, split='validation', transform=True), batch_size=1, shuffle=False, **kwargs) # 2. model model = torchfcn.models.FCN16s(n_class=14) start_epoch = 0 start_iteration = 0 if resume: checkpoint = torch.load(resume) model.load_state_dict(checkpoint['model_state_dict']) start_epoch = checkpoint['epoch'] start_iteration = checkpoint['iteration'] else: fcn32s = torchfcn.models.FCN32s() fcn32s.load_state_dict(torch.load(cfg['fcn32s_pretrained_model'])) model.copy_params_from_fcn32s(fcn32s) if cuda: model = model.cuda() # 3. optimizer optim = torch.optim.SGD([ { 'params': get_parameters(model, bias=False) }, { 'params': get_parameters(model, bias=True), 'lr': cfg['lr'] * 2, 'weight_decay': 0 }, ], lr=cfg['lr'], momentum=cfg['momentum'], weight_decay=cfg['weight_decay']) if resume: optim.load_state_dict(checkpoint['optim_state_dict']) trainer = torchfcn.Trainer( cuda=cuda, model=model, optimizer=optim, train_loader=train_loader, val_loader=val_loader, out=out, max_iter=cfg['max_iteration'], interval_validate=cfg.get('interval_validate', len(train_loader)), ) trainer.epoch = start_epoch trainer.iteration = start_iteration trainer.train()
def main(): parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) parser.add_argument('-g', '--gpu', type=int, required=True, help='gpu id') parser.add_argument('--resume', help='checkpoint path') # configurations (same configuration as original work) # https://github.com/shelhamer/fcn.berkeleyvision.org parser.add_argument('--max-iteration', type=int, default=100000, help='max iteration') parser.add_argument( '--lr', type=float, default=1.0e-10, help='learning rate', ) parser.add_argument( '--weight-decay', type=float, default=0.0005, help='weight decay', ) parser.add_argument( '--momentum', type=float, default=0.99, help='momentum', ) args = parser.parse_args() args.model = 'FCN8sAtOnce' args.git_hash = git_hash() now = datetime.datetime.now() args.out = osp.join(here, 'logs', now.strftime('%Y%m%d_%H%M%S.%f')) os.makedirs(args.out) with open(osp.join(args.out, 'config.yaml'), 'w') as f: yaml.safe_dump(args.__dict__, f, default_flow_style=False) os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu) cuda = torch.cuda.is_available() torch.manual_seed(1337) if cuda: torch.cuda.manual_seed(1337) # 1. dataset root = osp.expanduser('~/facades_datasets/5.ECP') kwargs = {'num_workers': 4, 'pin_memory': True} if cuda else {} # use our dataset and defined transformations dataset_train = facade_dataset.ECP_Dataset(root, split='train') dataset_val = facade_dataset.ECP_Dataset(root, split='val') # define training and validation data loaders loader_train = torch.utils.data.DataLoader(dataset_train, batch_size=8, shuffle=True, **kwargs) loader_val = torch.utils.data.DataLoader(dataset_val, batch_size=1, shuffle=False, **kwargs) # 2. model model = torchfcn.models.FCN8sAtOnce(n_class=9) start_epoch = 0 start_iteration = 0 if args.resume: checkpoint = torch.load(args.resume) model.load_state_dict(checkpoint['model_state_dict']) start_epoch = checkpoint['epoch'] start_iteration = checkpoint['iteration'] else: vgg16 = torchfcn.models.VGG16(pretrained=True) model.copy_params_from_vgg16(vgg16) if cuda: model = model.cuda() # 3. optimizer optim = torch.optim.SGD([ { 'params': get_parameters(model, bias=False) }, { 'params': get_parameters(model, bias=True), 'lr': args.lr * 2, 'weight_decay': 0 }, ], lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay) if args.resume: optim.load_state_dict(checkpoint['optim_state_dict']) trainer = torchfcn.Trainer( cuda=cuda, model=model, optimizer=optim, train_loader=loader_train, val_loader=loader_val, out=args.out, max_iter=args.max_iteration, interval_validate=5000, ) trainer.epoch = start_epoch trainer.iteration = start_iteration trainer.train()