default=False, help='enables CUDA training') parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)') args = parser.parse_args() args.cuda = not args.no_cuda and torch.cuda.is_available() torch.manual_seed(args.seed) if args.cuda: torch.cuda.manual_seed(args.seed) test_left_img, test_right_img = DA.dataloader(args.datapath) model = StereoNet(3, 3, args.maxdisp) model = nn.DataParallel(model, device_ids=[0]) model.cuda() if args.loadmodel is not None: state_dict = torch.load(args.loadmodel) model.load_state_dict(state_dict['state_dict']) print('Number of model parameters: {}'.format( sum([p.data.nelement() for p in model.parameters()]))) def test(imgL, imgR): model.eval() if args.cuda:
def main(): global args os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu train_left_img, train_right_img, train_left_disp, test_left_img, test_right_img, test_left_disp = lt.dataloader( args.datapath) TrainImgLoader = torch.utils.data.DataLoader(DA.myImageFloder( train_left_img, train_right_img, train_left_disp, True), batch_size=args.train_bsize, shuffle=False, num_workers=1, pin_memory=True, drop_last=False) TestImgLoader = torch.utils.data.DataLoader(DA.myImageFloder( test_left_img, test_right_img, test_left_disp, False), batch_size=args.test_bsize, shuffle=False, num_workers=4, pin_memory=True, drop_last=False) if not os.path.isdir(args.save_path): os.makedirs(args.save_path) log = logger.setup_logger(args.save_path + 'training.log') for key, value in sorted(vars(args).items()): log.info(str(key) + ':' + str(value)) model = StereoNet(maxdisp=args.maxdisp) model = nn.DataParallel(model).cuda() model.apply(weights_init) optimizer = optim.RMSprop(model.parameters(), lr=args.lr) scheduler = lr_scheduler.StepLR(optimizer, step_size=args.stepsize, gamma=args.gamma) log.info('Number of model parameters: {}'.format( sum([p.data.nelement() for p in model.parameters()]))) args.start_epoch = 0 if args.resume: if os.path.isfile(args.resume): log.info("=> loading checkpoint '{}'".format((args.resume))) checkpoint = torch.load(args.resume) args.start_epoch = checkpoint['epoch'] model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) log.info("=> loaded checkpoint '{}' ".format(args.resume)) else: log.info("=> no checkpoint found at '{}'".format(args.resume)) log.info("=> will start from scratch.") else: log.info("Not Resume") start_full_time = time.time() for epoch in range(args.start_epoch, args.epoch): log.info('This is {}-th epoch'.format(epoch)) scheduler.step() train(TrainImgLoader, model, optimizer, log, epoch) savefilename = args.save_path + 'checkpoint_{}.pth'.format(epoch) torch.save( { 'epoch': epoch, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict() }, savefilename) test(TestImgLoader, model, log) log.info('full training time = {: 2f} Hours'.format( (time.time() - start_full_time) / 3600))
def main(): global args os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152 os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu import pickle paths = pickle.load(open("paths_80.pkl", "rb")) train_left_img = [] train_right_img = [] train_left_disp = [] for path in paths: train_left_img.append(path['img_l']) train_right_img.append(path['img_r']) train_left_disp.append(path['disp_l']) __normalize = {'mean': [0.0, 0.0, 0.0], 'std': [1.0, 1.0, 1.0]} TrainImgLoader = torch.utils.data.DataLoader(DA.myImageFloder( train_left_img, train_right_img, train_left_disp, True, normalize=__normalize), batch_size=args.train_bsize, shuffle=True, num_workers=1, drop_last=False) if not os.path.isdir(args.save_path): os.makedirs(args.save_path) log = logger.setup_logger(args.save_path + '/training.log') for key, value in sorted(vars(args).items()): log.info(str(key) + ':' + str(value)) model = StereoNet(k=args.stages - 1, r=args.stages - 1, maxdisp=args.maxdisp) model = nn.DataParallel(model).cuda() model.apply(weights_init) print('init with normal') optimizer = optim.RMSprop(model.parameters(), lr=args.lr) scheduler = lr_scheduler.StepLR(optimizer, step_size=args.stepsize, gamma=args.gamma) args.start_epoch = 0 if args.resume: if os.path.isfile(args.resume): log.info("=> loading checkpoint '{}'".format((args.resume))) checkpoint = torch.load(args.resume) args.start_epoch = checkpoint['epoch'] model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) log.info("=> loaded checkpoint '{}' (epoch {})".format( args.resume, checkpoint['epoch'])) else: log.info("=> no checkpoint found at '{}'".format(args.resume)) log.info("=> will start from scratch.") else: log.info("Not Resume") start_full_time = time.time() for epoch in range(args.start_epoch, args.epoch): log.info('This is {}-th epoch'.format(epoch)) train(TrainImgLoader, model, optimizer, log, epoch) savefilename = args.save_path + '/checkpoint.pth' torch.save( { 'epoch': epoch, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict() }, savefilename) scheduler.step() # will adjust learning rate test(TestImgLoader, model, log) log.info('full training time = {: 2f} Hours'.format( (time.time() - start_full_time) / 3600))
import torch import torchvision import torchvision.transforms as transforms import numpy as np from models.StereoNet8Xmulti import StereoNet import matplotlib.pyplot as plt torch.backends.cudnn.benchmark = True dir_kitti = '/home/liu/DP_DATA/STEREO/KITTI/testing/image_2' paths = [] for root, dirs, files in os.walk(dir_kitti): for file in files: paths.append(os.path.join(root, file)) net = StereoNet(3, 3, 192) #net=net.cuda() net = torch.nn.DataParallel(net).cuda() checkpoint = torch.load( '/home/liu/workspace/StereoNet-ActiveStereoNet/results/8Xmulti/checkpoint.pth' ) net.load_state_dict(checkpoint['state_dict']) mean = torch.tensor([0., 0., 0.], dtype=torch.float32) std = torch.tensor([1., 1., 1.], dtype=torch.float32) totensor = transforms.ToTensor() normalize = transforms.Normalize(mean.tolist(), std.tolist()) unnormalize = transforms.Normalize((-mean / std).tolist(), (1.0 / std).tolist())