Example #1
0
                    help='random seed (default: 1)')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()

# set gpu id used
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"

torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

all_left_img, all_right_img, all_left_disp, test_left_img, test_right_img, test_left_disp = lt.dataloader(
    args.datapath)

TrainImgLoader = torch.utils.data.DataLoader(
    DA.myImageFloder(all_left_img, all_right_img, all_left_disp, True),
    batch_size=12, shuffle=True, num_workers=8, drop_last=False)

TestImgLoader = torch.utils.data.DataLoader(
    DA.myImageFloder(test_left_img, test_right_img, test_left_disp, False),
    batch_size=8, shuffle=False, num_workers=4, drop_last=False)


if args.model == 'stackhourglass':
    model = stackhourglass(args.maxdisp)
elif args.model == 'basic':
    model = basic(args.maxdisp)
else:
    print('no model')

if args.cuda:
Example #2
0
def main():
    global args

    train_left_img, train_right_img, train_left_disp, test_left_img, test_right_img, test_left_disp = lt.dataloader(
        args.datapath)

    TrainImgLoader = torch.utils.data.DataLoader(DA.myImageFloder(
        train_left_img, train_right_img, train_left_disp, True),
                                                 batch_size=args.train_bsize,
                                                 shuffle=True,
                                                 num_workers=4,
                                                 drop_last=False)

    TestImgLoader = torch.utils.data.DataLoader(DA.myImageFloder(
        test_left_img, test_right_img, test_left_disp, False),
                                                batch_size=args.test_bsize,
                                                shuffle=False,
                                                num_workers=4,
                                                drop_last=False)

    if not os.path.isdir(args.save_path):
        os.makedirs(args.save_path)
    log = logger.setup_logger(args.save_path + '/training.log')
    for key, value in sorted(vars(args).items()):
        log.info(str(key) + ': ' + str(value))

    model = models.anynet.AnyNet(args)
    model = nn.DataParallel(model).cuda()
    optimizer = optim.Adam(model.parameters(), lr=args.lr, betas=(0.9, 0.999))
    log.info('Number of model parameters: {}'.format(
        sum([p.data.nelement() for p in model.parameters()])))

    args.start_epoch = 0
    if args.resume:
        if os.path.isfile(args.resume):
            log.info("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            log.info("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            log.info("=> no checkpoint found at '{}'".format(args.resume))
            log.info("=> Will start from scratch.")
    else:
        log.info('Not Resume')

    start_full_time = time.time()
    for epoch in range(args.start_epoch, args.epochs):
        log.info('This is {}-th epoch'.format(epoch))

        train(TrainImgLoader, model, optimizer, log, epoch)

        savefilename = args.save_path + '/checkpoint.tar'
        torch.save(
            {
                'epoch': epoch,
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict(),
            }, savefilename)

    test(TestImgLoader, model, log)
    log.info('full training time = {:.2f} Hours'.format(
        (time.time() - start_full_time) / 3600))
Example #3
0
def main():
    global args
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"  # see issue #152
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    import pickle
    paths = pickle.load(open("paths_80.pkl", "rb"))

    train_left_img = []
    train_right_img = []
    train_left_disp = []

    for path in paths:
        train_left_img.append(path['img_l'])
        train_right_img.append(path['img_r'])
        train_left_disp.append(path['disp_l'])

    __normalize = {'mean': [0.0, 0.0, 0.0], 'std': [1.0, 1.0, 1.0]}
    TrainImgLoader = torch.utils.data.DataLoader(DA.myImageFloder(
        train_left_img,
        train_right_img,
        train_left_disp,
        True,
        normalize=__normalize),
                                                 batch_size=args.train_bsize,
                                                 shuffle=True,
                                                 num_workers=1,
                                                 drop_last=False)

    if not os.path.isdir(args.save_path):
        os.makedirs(args.save_path)
    log = logger.setup_logger(args.save_path + '/training.log')
    for key, value in sorted(vars(args).items()):
        log.info(str(key) + ':' + str(value))

    model = StereoNet(k=args.stages - 1,
                      r=args.stages - 1,
                      maxdisp=args.maxdisp)
    model = nn.DataParallel(model).cuda()
    model.apply(weights_init)
    print('init with normal')

    optimizer = optim.RMSprop(model.parameters(), lr=args.lr)
    scheduler = lr_scheduler.StepLR(optimizer,
                                    step_size=args.stepsize,
                                    gamma=args.gamma)

    args.start_epoch = 0

    if args.resume:
        if os.path.isfile(args.resume):
            log.info("=> loading checkpoint '{}'".format((args.resume)))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            log.info("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            log.info("=> no checkpoint found at '{}'".format(args.resume))
            log.info("=> will start from scratch.")
    else:
        log.info("Not Resume")
    start_full_time = time.time()
    for epoch in range(args.start_epoch, args.epoch):
        log.info('This is {}-th epoch'.format(epoch))

        train(TrainImgLoader, model, optimizer, log, epoch)

        savefilename = args.save_path + '/checkpoint.pth'
        torch.save(
            {
                'epoch': epoch,
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict()
            }, savefilename)
        scheduler.step()  # will adjust learning rate

    test(TestImgLoader, model, log)
    log.info('full training time = {: 2f} Hours'.format(
        (time.time() - start_full_time) / 3600))
Example #4
0
def main():
    global args
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    train_left_img, train_right_img, train_left_disp, test_left_img, test_right_img, test_left_disp = lt.dataloader(
        args.datapath)
    TrainImgLoader = torch.utils.data.DataLoader(DA.myImageFloder(
        train_left_img, train_right_img, train_left_disp, True),
                                                 batch_size=args.train_bsize,
                                                 shuffle=False,
                                                 num_workers=1,
                                                 pin_memory=True,
                                                 drop_last=False)
    TestImgLoader = torch.utils.data.DataLoader(DA.myImageFloder(
        test_left_img, test_right_img, test_left_disp, False),
                                                batch_size=args.test_bsize,
                                                shuffle=False,
                                                num_workers=4,
                                                pin_memory=True,
                                                drop_last=False)

    if not os.path.isdir(args.save_path):
        os.makedirs(args.save_path)
    log = logger.setup_logger(args.save_path + 'training.log')
    for key, value in sorted(vars(args).items()):
        log.info(str(key) + ':' + str(value))

    model = StereoNet(maxdisp=args.maxdisp)
    model = nn.DataParallel(model).cuda()
    model.apply(weights_init)

    optimizer = optim.RMSprop(model.parameters(), lr=args.lr)
    scheduler = lr_scheduler.StepLR(optimizer,
                                    step_size=args.stepsize,
                                    gamma=args.gamma)

    log.info('Number of model parameters: {}'.format(
        sum([p.data.nelement() for p in model.parameters()])))

    args.start_epoch = 0
    if args.resume:
        if os.path.isfile(args.resume):
            log.info("=> loading checkpoint '{}'".format((args.resume)))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            log.info("=> loaded checkpoint '{}' ".format(args.resume))
        else:
            log.info("=> no checkpoint found at '{}'".format(args.resume))
            log.info("=> will start from scratch.")
    else:
        log.info("Not Resume")

    start_full_time = time.time()
    for epoch in range(args.start_epoch, args.epoch):
        log.info('This is {}-th epoch'.format(epoch))
        scheduler.step()

        train(TrainImgLoader, model, optimizer, log, epoch)

        savefilename = args.save_path + 'checkpoint_{}.pth'.format(epoch)
        torch.save(
            {
                'epoch': epoch,
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict()
            }, savefilename)

        test(TestImgLoader, model, log)
    log.info('full training time = {: 2f} Hours'.format(
        (time.time() - start_full_time) / 3600))
Example #5
0
parser.add_argument('--seed',
                    type=int,
                    default=1,
                    metavar='S',
                    help='random seed (default: 1)')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()

torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

all_left_img, all_right_img, all_left_disp, test_left_img, test_right_img, test_left_disp = lt.dataloader(
    args.datapath)

TrainImgLoader = torch.utils.data.DataLoader(DA.myImageFloder(
    all_left_img, all_right_img, all_left_disp, True),
                                             batch_size=12,
                                             shuffle=True,
                                             num_workers=8,
                                             drop_last=False)

TestImgLoader = torch.utils.data.DataLoader(DA.myImageFloder(
    test_left_img, test_right_img, test_left_disp, False),
                                            batch_size=8,
                                            shuffle=False,
                                            num_workers=4,
                                            drop_last=False)

if args.model == 'stackhourglass':
    model = stackhourglass(args.maxdisp)
elif args.model == 'basic':
Example #6
0
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
if not os.path.isdir(args.savemodel):
    os.mkdir(args.savemodel)

# set gpu id used
os.environ['CUDA_VISIBLE_DEVICES'] = '1'

torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

all_left_img, all_right_img, all_left_disp, test_left_img, test_right_img, test_left_disp, test_imgname = lt.dataloader(args.datapath)

TrainImgLoader = torch.utils.data.DataLoader(
    SecenFlowLoader.myImageFloder(all_left_img, all_right_img, all_left_disp, training=True),
    batch_size=args.trainbatch, shuffle=True, num_workers=8, drop_last=False)

TestImgLoader = torch.utils.data.DataLoader(
    SecenFlowLoader.myImageFloder(test_left_img, test_right_img, test_left_disp, training=False),
    batch_size=args.testbatch, shuffle=False, num_workers=4, drop_last=False)

if args.model == 'stackhourglass':
    model = PSMNet_STACK(args.maxdisp)
elif args.model == 'basic':
    model = PSMNet_BASIC(args.maxdisp)
else:
    print('no model')

if args.loadmodel is not None:
    state_dict = torch.load(args.loadmodel)
Example #7
0
def main():

    global args

    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)

    if args.distributed:

        if 'WORLD_SIZE' in os.environ:
            args.distributed = int(os.environ['WORLD_SIZE']) > 1

        args.world_size = 1

        if args.distributed:
            torch.cuda.set_device(args.local_rank)
            torch.distributed.init_process_group(backend='nccl',
                                                 init_method='env://')
            args.world_size = torch.distributed.get_world_size()
        assert torch.backends.cudnn.enabled, "Amp requires cudnn backend to be enabled."

    train_left_img, train_right_img, train_left_disp, test_left_img, test_right_img, test_left_disp = lt.dataloader(
        args.datapath)

    train_set = DA.myImageFloder(train_left_img, train_right_img,
                                 train_left_disp, True)
    val_set = DA.myImageFloder(test_left_img, test_right_img, test_left_disp,
                               False)

    if args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(
            train_set)
        val_sampler = torch.utils.data.distributed.DistributedSampler(val_set)

    else:
        train_sampler = None
        val_sampler = None

    TrainImgLoader = torch.utils.data.DataLoader(train_set,
                                                 batch_size=args.train_bsize,
                                                 shuffle=False,
                                                 num_workers=4,
                                                 pin_memory=True,
                                                 sampler=train_sampler,
                                                 drop_last=False)

    TestImgLoader = torch.utils.data.DataLoader(val_set,
                                                batch_size=args.test_bsize,
                                                shuffle=False,
                                                num_workers=4,
                                                pin_memory=True,
                                                sampler=None,
                                                drop_last=False)

    if not os.path.isdir(args.save_path):
        os.makedirs(args.save_path)
    log = logger.setup_logger(args.save_path + '/training.log')

    if args.local_rank == 0:
        log.info('len train_left_img: {}'.format(len(train_left_img)))
        log.info('len test_left_img: {}'.format(len(test_left_img)))

    if args.local_rank == 0:
        for key, value in sorted(vars(args).items()):
            log.info(str(key) + ': ' + str(value))

    if args.model_types == "PSMNet":
        model = PSMNet(args)
        args.loss_weights = [0.5, 0.7, 1.]

    elif args.model_types == "PSMNet_DSM":
        model = PSMNet_DSM(args)
        args.loss_weights = [0.5, 0.7, 1.]

    elif args.model_types == "Hybrid_Net_DSM" or "Hybrid_Net":
        model = Hybrid_Net(args)
        args.loss_weights = [0.5, 0.7, 1., 1., 1.]

    else:
        AssertionError("model error")

    if args.count_flops:

        FLOPs, param = count_flops(model.cuda())
        if args.local_rank == 0:
            log.info("macs:{}".format(FLOPs))
            log.info("parameters:{} ".format(param))

    if args.sync_bn:
        if args.local_rank == 0:
            log.info(
                "using apex synced BN-----------------------------------------------------"
            )
        model = apex.parallel.convert_syncbn_model(model)

    model = model.cuda()

    optimizer = optim.Adam(model.parameters(), lr=args.lr, betas=(0.9, 0.999))
    model, optimizer = amp.initialize(
        model,
        optimizer,
        opt_level=args.opt_level,
        keep_batchnorm_fp32=args.keep_batchnorm_fp32,
        loss_scale=args.loss_scale)

    if args.distributed:
        if args.local_rank == 0:
            log.info(
                "using distributed-----------------------------------------------------"
            )
        model = DDP(model, delay_allreduce=True)

    if args.local_rank == 0:
        log.info('Number of model parameters: {}'.format(
            sum([p.data.nelement() for p in model.parameters()])))

    args.start_epoch = 0
    if args.resume:

        if os.path.isfile(args.resume):

            checkpoint = torch.load(args.resume, map_location='cpu')
            args.start_epoch = checkpoint['epoch'] + 1
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            if args.local_rank == 0:
                log.info("=> loading checkpoint '{}'".format(args.resume))
                log.info("=> loaded checkpoint '{}' (epoch {})".format(
                    args.resume, checkpoint['epoch']))
        else:
            if args.local_rank == 0:
                log.info("=> no checkpoint found at '{}'".format(args.resume))
                log.info("=> Will start from scratch.")
    else:
        if args.local_rank == 0:
            log.info('Not Resume')

    start_full_time = time.time()

    if args.train:

        for epoch in range(args.start_epoch, args.epochs):
            if args.distributed:
                train_sampler.set_epoch(epoch)
            if args.local_rank == 0:
                log.info('This is {}-th epoch'.format(epoch))
            adjust_learning_rate(optimizer, epoch)

            train(TrainImgLoader, model, optimizer, log, epoch)

            # SAVE
            if args.local_rank == 0:
                savefilename = args.save_path + '/checkpoint_' + str(
                    epoch) + '.tar'
                torch.save(
                    {
                        'epoch': epoch,
                        'state_dict': model.state_dict(),
                        'optimizer': optimizer.state_dict(),
                    }, savefilename)

            if not epoch % 10:
                test(TestImgLoader, model, log)

    test(TestImgLoader, model, log)

    if args.local_rank == 0:
        log.info('full training time = {:.2f} Hours'.format(
            (time.time() - start_full_time) / 3600))
Example #8
0
    metavar='S',
    help='random seed (default: 1)')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()

torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

_, _, _, test_left_img, test_right_img, test_left_disp = lt.dataloader(
    args.datapath)

TestImgLoader = torch.utils.data.DataLoader(
    DA.myImageFloder(
        test_left_img,
        test_right_img,
        test_left_disp,
        False,
        normalize=__normalize),
    batch_size=args.batchsize,
    shuffle=False,
    num_workers=min(4, args.batchsize),
    drop_last=False)

model = StereoNet(args.scalenum, args.scalenum, args.maxdisp)

if args.cuda:
    model = nn.DataParallel(model)
    model.cuda()


def test(imgL, imgR, disp_true):
Example #9
0
    type=int,
    default=1,
    metavar='S',
    help='random seed (default: 1)')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()

torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

_, _, _, test_left_img, test_right_img, test_left_disp = lt.dataloader(
    args.datapath)

TestImgLoader = torch.utils.data.DataLoader(
    DA.myImageFloder(test_left_img, test_right_img, test_left_disp, False),
    batch_size=2,
    shuffle=False,
    num_workers=2,
    drop_last=False)

if args.model == 'stackhourglass':
    model = stackhourglass(args.maxdisp)
elif args.model == 'basic':
    model = basic(args.maxdisp)
elif args.model == 'stereoSRR':
    model = stereoSRR(args.maxdisp)
else:
    print('no model')
print(f'Use model {args.model}')
Example #10
0
def main():
    global args

    train_left_img, train_right_img, train_left_disp, test_left_img, test_right_img, test_left_disp = lt.dataloader(
        args.datapath)

    TrainImgLoader = torch.utils.data.DataLoader(DA.myImageFloder(
        train_left_img, train_right_img, train_left_disp, True),
                                                 batch_size=args.train_bsize,
                                                 shuffle=True,
                                                 num_workers=4,
                                                 drop_last=False)

    TestImgLoader = torch.utils.data.DataLoader(DA.myImageFloder(
        test_left_img, test_right_img, test_left_disp, False),
                                                batch_size=args.test_bsize,
                                                shuffle=False,
                                                num_workers=4,
                                                drop_last=False)

    if not os.path.isdir(args.save_path):
        os.makedirs(args.save_path)

    if (args.test):
        logFn = "/testing.log"
        if (not os.path.isdir(args.save_path + "/Testing")):
            os.makedirs(args.save_path + "/Testing")

        print("\n=== Testing ===")
    else:
        logFn = "/training.log"

    log = logger.setup_logger(args.save_path + logFn)
    for key, value in sorted(vars(args).items()):
        log.info(str(key) + ': ' + str(value))

    model = models.anynet.AnyNet(args)
    # model = nn.DataParallel(model).cuda()
    model = model.cuda()
    optimizer = optim.Adam(model.parameters(), lr=args.lr, betas=(0.9, 0.999))
    log.info('Number of model parameters: {}'.format(
        sum([p.data.nelement() for p in model.parameters()])))

    args.start_epoch = 0
    if args.test:
        # Only perform test. --resume option is assumed to be issued at the same time.
        if (args.resume is None):
            raise Exception(
                "--resume arguments must be set while --test is issued.")

        if (not os.path.isfile(args.save_path + "/" + args.resume)):
            raise Exception("Checkpoint %s does not exist." %
                            (args.save_path + "/" + args.resume))

        log.info("=> loading checkpoint '{}'".format(args.resume))
        checkpoint = torch.load(args.save_path + "/" + args.resume)
        model.load_state_dict(checkpoint['state_dict'])
    elif args.resume:
        if os.path.isfile(args.save_path + "/" + args.resume):
            log.info("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.save_path + "/" + args.resume)
            args.start_epoch = checkpoint['epoch']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            log.info("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            log.info("=> no checkpoint found at '{}'".format(args.resume))
            log.info("=> Will start from scratch.")
    else:
        log.info('Not Resume')

    start_full_time = time.time()

    if (not args.test):
        for epoch in range(args.start_epoch, args.epochs):
            log.info('This is {}-th epoch'.format(epoch))

            train(TrainImgLoader, model, optimizer, log, epoch)

            savefilename = args.save_path + '/checkpoint.tar'
            torch.save(
                {
                    'epoch': epoch,
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict(),
                }, savefilename)

    if (not args.test):
        test(TestImgLoader, model, log)
    else:
        test(TestImgLoader, model, log, args.test_stride,
             args.save_path + "/Testing")

    log.info('full training time = {:.2f} Hours'.format(
        (time.time() - start_full_time) / 3600))
Example #11
0
parser.add_argument('--colormode', type=int, default=1,
                    help='load image as RGB or gray')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()

train_batch = 12
test_batch = 3

torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

all_left_img, all_right_img, all_left_disp, test_left_img, test_right_img, test_left_disp = lt.dataloader(args.datapath)

TrainImgLoader = torch.utils.data.DataLoader(
         DA.myImageFloder(all_left_img,all_right_img,all_left_disp, True, colormode=args.colormode),
         batch_size= train_batch, shuffle= True, num_workers= 8, drop_last=False)

TestImgLoader = torch.utils.data.DataLoader(
         DA.myImageFloder(test_left_img,test_right_img,test_left_disp, False, colormode=args.colormode),
         batch_size= test_batch, shuffle= False, num_workers= 4, drop_last=False)


if args.model == 'stackhourglass':
    model = stackhourglass(args.maxdisp, args.colormode)
elif args.model == 'basic':
    model = basic(args.maxdisp, args.colormode)
else:
    print('no model')

if args.cuda:
Example #12
0
parser.add_argument('--seed',
                    type=int,
                    default=1,
                    metavar='S',
                    help='random seed (default: 1)')
args = parser.parse_args()
args.cuda = torch.cuda.is_available()

torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

all_left_img, all_right_img, all_left_disp, test_left_img, test_right_img, test_left_disp = mlt.dataloader(
    args.datapath)

TestImgLoader = torch.utils.data.DataLoader(DA.myImageFloder(
    test_left_img, test_right_img, test_left_disp, True),
                                            batch_size=1,
                                            shuffle=False,
                                            num_workers=4,
                                            drop_last=False)

if args.model == 'stackhourglass':
    model = stackhourglass(args.maxdisp)
elif args.model == 'basic':
    model = basic(args.maxdisp)
else:
    print('no model')

epoch_list = [990, 986, 988, 922, 991, 994, 993, 987]
epoch_list = epoch_list[:4]
model_list = []
Example #13
0
def main():


    global args

    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)


    if args.datatype == '2015':
        all_left_img, all_right_img, all_left_disp, test_left_img, test_right_img, test_left_disp = ls2015.dataloader(
            args.datapath2015, split = args.split_for_val)
        from dataloader import KITTILoader as DA


    elif args.datatype == 'Sence Flow':
        train_left_img, train_right_img, train_left_disp, test_left_img, test_right_img, test_left_disp = lt.dataloader(
        args.datapath)
        from dataloader import SecenFlowLoader as DA


    else:
        AssertionError


    if not os.path.isdir(args.save_path):
        os.makedirs(args.save_path)

    log = logger.setup_logger(args.save_path + '/FLOPs_inference_time.log')
    for key, value in sorted(vars(args).items()):
        log.info(str(key) + ': ' + str(value))
    if args.model_types == "PSMNet":
        model = PSMNet(args)
        args.loss_weights = [0.5, 0.7, 1.]
        #from dataloader import SecenFlowLoader as DA

    elif args.model_types == "PSMNet_TSM":
        model = PSMNet_TSM(args)
        args.loss_weights = [0.5, 0.7, 1.]
        #from dataloader import SecenFlowLoader as DA

    elif args.model_types ==  "Hybrid_Net":
        model = Hybrid_Net(args)
        args.loss_weights = [0.5, 0.7, 1., 1., 1.]
        #from dataloader import SecenFlowLoader as DA

    elif args.model_types == "Hybrid_Net_DSM" :
        model = Hybrid_Net(args)
        args.loss_weights = [0.5, 0.7, 1., 1., 1.]
        #from dataloader import SecenFlowLoader as DA



    else:

        AssertionError("model error")






    model = nn.DataParallel(model).cuda()



    for i in range (30):

        #print("test_left_img", test_left_img[i])
        log.info("=> test_left_img '{}'".format(test_left_img[i]))


    TestImgLoader = torch.utils.data.DataLoader(
        DA.myImageFloder(test_left_img, test_right_img, test_left_disp, False),
        batch_size=args.test_bsize, shuffle=False, num_workers=4, drop_last=False)



    if args.resume:
        if os.path.isfile(args.resume):
            log.info("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch'] + 1
            model.load_state_dict(checkpoint['state_dict'])
            #optimizer.load_state_dict(checkpoint['optimizer'])
            log.info("=> loaded checkpoint '{}' (epoch {})"
                     .format(args.resume, checkpoint['epoch']))
        else:
            log.info("=> no checkpoint found at '{}'".format(args.resume))
            log.info("=> Will start from scratch.")
    else:
        log.info('Not Resume')

    log.info('Number of model parameters: {}'.format(sum([p.data.nelement() for p in model.parameters()])))


    #if args.testing:
    test(TestImgLoader, model, log)
Example #14
0
def main():
    global args
    train_left_img, train_right_img, train_left_disp, test_left_img, test_right_img, test_left_disp = lt.dataloader(
        args.datapath)

    train_left_img.sort()
    train_right_img.sort()
    train_left_disp.sort()

    test_left_img.sort()
    test_right_img.sort()
    test_left_disp.sort()

    TrainImgLoader = torch.utils.data.DataLoader(DA.myImageFloder(
        train_left_img, train_right_img, train_left_disp, True),
                                                 batch_size=2,
                                                 shuffle=True,
                                                 num_workers=8,
                                                 drop_last=False)

    TestImgLoader = torch.utils.data.DataLoader(DA.myImageFloder(
        test_left_img, test_right_img, test_left_disp, False),
                                                batch_size=2,
                                                shuffle=False,
                                                num_workers=4,
                                                drop_last=False)

    if not os.path.isdir(args.savepath):
        os.makedirs(args.savepath)
    log = logger.setup_logger(args.savepath + '/training.log')
    for key, value in sorted(vars(args).items()):
        log.info(str(key) + ':' + str(value))

    lr = args.lr
    model = iresNet()
    model = nn.DataParallel(model).cuda()

    optimizer = optim.Adam(model.parameters(),
                           lr=lr,
                           betas=(0.9, 0.999),
                           eps=1e-08,
                           weight_decay=0)

    log.info('Number of model parameters: {}'.format(
        sum([p.data.nelement() for p in model.parameters()])))

    args.start_epoch = 0

    if args.savepath:
        if os.path.isfile(args.loadmodel):
            log.info("=> loading checkpoint '{}'".format((args.loadmodel)))
            checkpoint = torch.load(args.loadmodel)
            args.start_epoch = checkpoint['epoch']
        else:
            log.info("=> no checkpoint '{}'".format((args.loadmodel)))
            log.info("=>will start from scratch.")
    else:
        log.info("Not Resume")
        # train
        start_full_time = time.time()  #count the time training used
        for epoch in range(args.start_epoch, args.epoch):
            log.info('This is {}-th epoch'.format(epoch))

            train(train_left_img, train_right_img, test_left_disp, model,
                  optimizer, log)

            savefilename = args.savepath + '/checkpoint.pth'
            torch.save(
                {
                    'epoch': epoch,
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict()
                }, savefilename)

        test(test_left_disp, test_right_img, test_left_disp, model, log)
        log.info('Full traing time = {:2f} Hours'.format(
            (time.time() - start_full_time) / 3600))
Example #15
0
# set gpu id used
#os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"

# if args.KITTI == '2015':
#     from dataloader import KITTIloader2015 as ls
# else:
#     from dataloader import KITTIloader2012 as ls

torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

all_left, all_right, all_gt, test_left, test_right, test_gt = lt.dataloader(
    args.datapath)

Trainloader = torch.utils.data.DataLoader(DA.myImageFloder(
    all_left, all_right, all_gt, True),
                                          batch_size=12,
                                          shuffle=True,
                                          num_workers=8,
                                          drop_last=False)

Testloader = torch.utils.data.DataLoader(DA.myImageFloder(
    all_left, all_right, all_gt, False),
                                         batch_size=8,
                                         shuffle=False,
                                         num_workers=4,
                                         drop_last=False)

# if args.model == 'stackhourglass':
#     model = stackhourglass(args.maxdisp)
# elif args.model == 'basic':
Example #16
0
def main():
    global args

    train_left_img, train_right_img, train_left_disp, test_left_img, test_right_img, test_left_disp = lt.dataloader(
        args.datapath)

    TrainImgLoader = torch.utils.data.DataLoader(DA.myImageFloder(
        train_left_img, train_right_img, train_left_disp, True),
                                                 batch_size=args.train_bsize,
                                                 shuffle=True,
                                                 num_workers=4,
                                                 drop_last=False)

    TestImgLoader = torch.utils.data.DataLoader(DA.myImageFloder(
        test_left_img, test_right_img, test_left_disp, False),
                                                batch_size=args.test_bsize,
                                                shuffle=False,
                                                num_workers=4,
                                                drop_last=False)

    if not os.path.isdir(args.save_path):
        os.makedirs(args.save_path)
    # log = logger.setup_logger(args.save_path + '/training.log')
    # for key, value in sorted(vars(args).items()):
    #     log.info(str(key) + ': ' + str(value))

    model = models.anynet.AnyNet(args)
    model = nn.DataParallel(model).cuda()
    # optimizer = optim.Adam(model.parameters(), lr=args.lr, betas=(0.9, 0.999))
    # log.info('Number of model parameters: {}'.format(sum([p.data.nelement() for p in model.parameters()])))

    num_pretrain_items = 0
    num_model_items = 0
    if args.loadmodel is not None:
        pretrained_dict = torch.load(args.loadmodel)
        # start_epoch = pretrained_dict['epoch'] + 1
        model_dict = model.state_dict()
        pretrained_dict = {
            k: v
            for k, v in pretrained_dict['state_dict'].items()
            if k in model_dict
        }
        num_pretrain_items = len(pretrained_dict.items())
        num_model_items = len(model_dict.items())
        print('Number of pretrained items: {:d}'.format(num_pretrain_items))
        print('Number of model items: {:d}'.format(num_model_items))
        model_dict.update(pretrained_dict)
        model.load_state_dict(model_dict)
        # state_dict = torch.load(args.loadmodel)
        # model.load_state_dict(state_dict['state_dict'])
    else:
        start_epoch = 1
        model_dict = model.state_dict()
        num_model_items = len(model_dict.items())
        print('Number of model items: {:d}'.format(num_model_items))

    if args.start_epoch is not 1:
        start_epoch = args.start_epoch
    else:
        start_epoch = 1
    print(model)
    print('Number of model parameters: {}'.format(
        sum([p.data.nelement() for p in model.parameters()])))

    if args.trainfull:
        optimizer = optim.Adam(model.parameters(),
                               lr=0.001,
                               betas=(0.9, 0.999))
    else:
        for i, p in enumerate(model.parameters()):
            print(i, p.shape)
            if i < args.fixnum:
                p.requires_grad = False
        optimizer = optim.Adam(filter(lambda p: p.requires_grad,
                                      model.parameters()),
                               lr=0.001,
                               betas=(0.9, 0.999))

    # args.start_epoch = 0
    # if args.resume:
    #     if os.path.isfile(args.resume):
    #         log.info("=> loading checkpoint '{}'".format(args.resume))
    #         checkpoint = torch.load(args.resume)
    #         args.start_epoch = checkpoint['epoch']
    #         model.load_state_dict(checkpoint['state_dict'])
    #         optimizer.load_state_dict(checkpoint['optimizer'])
    #         log.info("=> loaded checkpoint '{}' (epoch {})"
    #                  .format(args.resume, checkpoint['epoch']))
    #     else:
    #         log.info("=> no checkpoint found at '{}'".format(args.resume))
    #         log.info("=> Will start from scratch.")
    # else:
    #     log.info('Not Resume')

    train_step = 0
    test_step = 0
    start_full_time = time.time()
    for epoch in range(start_epoch, args.epochs + 1):
        # log.info('This is {}-th epoch'.format(epoch))
        print('This is {}-th epoch'.format(epoch))

        # train(TrainImgLoader, model, optimizer, log, epoch)
        train_losses, train_step = train(TrainImgLoader, model, optimizer,
                                         epoch, train_step)
        test_losses, test_step = test(TestImgLoader, model, epoch, test_step)

        savefilename = args.save_path + 'sf_' + str(epoch) + '.tar'
        torch.save(
            {
                'epoch': epoch,
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict(),
            }, savefilename)

    # test(TestImgLoader, model, log)
    # log.info('full training time = {:.2f} Hours'.format((time.time() - start_full_time) / 3600))
    print('full training time = %.2f HR' %
          ((time.time() - start_full_time) / 3600))