Esempio n. 1
0
def main(args):
	if args.save_dir is not None and args.save_dir != 'None':
		if not os.path.exists(args.save_dir):
			os.makedirs(args.save_dir)
			
	# load model
	do_flow = True
	do_disp = False
	if args.joint_model:
		do_disp = True
	model = model_utils.make_model(args, do_flow, do_disp, do_seg=args.do_seg)
	print('Number of model parameters: {}'.format(sum([p.data.nelement() for p in model.parameters()])))

	dataset_info = args.dataset + '_{}'.format(args.split)
	if args.pass_opt is not None:
		dataset_info += '_{}'.format(args.pass_opt)

	ckpt = torch.load(args.loadmodel)
	state_dict = ckpt['state_dict']
	model.load_state_dict(model_utils.patch_model_state_dict(state_dict))
	print('==> Successfully loaded a model {}.'.format(args.loadmodel))

	model.eval()
	cudnn.benchmark = True

	train_data, test_data = make_flow_disp_data(args.dataset)
	if args.split.startswith('train'):      # train or training
		data = train_data
	else:
		data = test_data
	# print('There are {} data items to be processed.'.format(len(data)))

	for i in tqdm(range(len(data))):
		process_single_data(model, data[i], args)
def main(args):
    if args.save_dir is not None and args.save_dir != 'None':
        if not os.path.exists(args.save_dir):
            os.makedirs(args.save_dir)
        else:
            os.system('rm -rf {}/*'.format(args.save_dir))
        os.makedirs(os.path.join(args.save_dir, 'flow_raw'))
        os.makedirs(os.path.join(args.save_dir, 'flow_occ'))
        os.makedirs(os.path.join(args.save_dir, 'disp_0'))
        os.makedirs(os.path.join(args.save_dir, 'disp_1_unwarped'))
        os.makedirs(os.path.join(args.save_dir, 'seg'))

    # make flow, disp, and seg model
    print('==> Making a holistic backbone model')
    do_flow = True
    do_disp = True
    do_seg = True
    model = model_utils.make_model(args, do_flow, do_disp, do_seg=do_seg)

    ckpt = torch.load(args.loadmodel)
    state_dict = model_utils.patch_model_state_dict(ckpt['state_dict'])
    model.load_state_dict(state_dict)
    model.eval()
    print('==> Successfully loaded a model {}.'.format(args.loadmodel))

    path_list = make_kitti2015_paths(args.kitti_dir, args.split)

    args.stride = 32
    for i in tqdm(range(len(path_list))):
        process_single_data(model, path_list[i], args)
Esempio n. 3
0
def main(args):
	if args.save_dir is not None and args.save_dir != 'None':
		if not os.path.exists(args.save_dir):
			os.makedirs(args.save_dir)
			
	# load model
	model = model_utils.make_model(
		args, 
		do_flow=not args.no_flow, 
		do_disp=not args.no_disp, 
		do_seg=True
	)

	ckpt = torch.load(args.loadmodel)
	model.load_state_dict(ckpt['state_dict'])
	print('==> Successfully loaded a model {}.'.format(args.loadmodel))

	model.eval()

	# if args.split == 'training':
	#     args.split = 'train'
	# if args.split == 'test' and args.dataset.startswith('kitti'):
	#     args.split = 'val'
	# phase = args.dataset + '_' + args.split
	# data_dir = args.datapath
	
	# # single-scale testing
	# dataset = SegList(data_dir, phase, 
	#     ToTensor(convert_pix_range=False), 
	#     list_dir=data_dir, out_name=True, im_format='cv2'
	# )
	# test_loader = torch.utils.data.DataLoader(
	#     dataset,
	#     batch_size=1, shuffle=False, num_workers=8,
	#     pin_memory=False
	# )

	kitti_dir = '/home/hzjiang/workspace/Data/KITTI_scene_flow'
	all_im_paths = make_kitti2015_paths(kitti_dir, args.split)
	print('{} samples found for {}.'.format(len(all_im_paths), args.split))

	cudnn.benchmark = True

	out_dir = args.save_dir
	test(
		all_im_paths, 
		model, 
		args.num_seg_class, 
		save_vis=args.save_dir is not None,
		output_dir=args.save_dir
	)
Esempio n. 4
0
File: demo.py Progetto: wpfhtl/SENSE
def main(args):
    # holistic scene model
    print('==> Making a holistic scene model.')
    holistic_scene_model = model_utils.make_model(args,
                                                  do_flow=True,
                                                  do_disp=True,
                                                  do_seg=True)
    # print('Number of model parameters: {}'.format(sum([p.data.nelement() for p in model.parameters()])))
    holistic_scene_model_path = 'data/pretrained_models/kitti2012+kitti2015_new_lr_schedule_lr_disrupt+semi_loss_v3.pth'
    ckpt = torch.load(holistic_scene_model_path)
    state_dict = model_utils.patch_model_state_dict(ckpt['state_dict'])
    holistic_scene_model.load_state_dict(state_dict)
    holistic_scene_model.eval()

    # warped disparity refinement model for scene flow estimation
    print(
        '==> Making a warped disparity refinment model for scene flow estimation.'
    )
    warp_disp_ref_model = UNet()
    warp_disp_ref_model = nn.DataParallel(warp_disp_ref_model).cuda()
    warp_disp_ref_model_path = 'data/pretrained_models/kitti2015_warp_disp_refine_1500.pth'
    ckpt = torch.load(warp_disp_ref_model_path)
    state_dict = ckpt['state_dict']
    warp_disp_ref_model.load_state_dict(state_dict)
    warp_disp_ref_model.eval()

    cudnn.benchmark = True

    # input data
    cur_left_im = imread('data/image_2/000010_10.png')
    cur_right_im = imread('data/image_3/000010_10.png')
    nxt_left_im = imread('data/image_2/000010_11.png')
    nxt_right_im = imread('data/image_3/000010_11.png')
    camera_data = read_camera_data('data/calib_cam_to_cam/000010.txt')

    # optical flow, stereo disparity, and semantic segmentation estimation
    print(
        '==> Running the holistic scene model for optical flow, stereo disparity, and semantic segmentation.'
    )
    flow_raw, flow_occ, disp0, disp1_unwarped, seg = run_holistic_scene_model(
        cur_left_im, cur_right_im, nxt_left_im, nxt_right_im,
        holistic_scene_model)

    # run refinement for warped disparity (disp1) for scene flow estimation
    print('==> Running the warped disparity refinement model.')
    flow_rigid, disp1_raw, disp1_rigid, disp1_nn = run_warped_disparity_refinement(
        cur_left_im, flow_raw, flow_occ, disp0, disp1_unwarped, seg,
        camera_data, warp_disp_ref_model)

    # save results
    print('==> Saving results.')
    os.makedirs('data/results', exist_ok=True)
    smisc.imsave('data/results/000010_flow_raw.png',
                 kitti_viz.flow_to_color(flow_raw))
    smisc.imsave('data/results/000010_flow_rigid.png',
                 kitti_viz.flow_to_color(flow_rigid))
    smisc.imsave('data/results/000010_disp0.png',
                 kitti_viz.disp_to_color(disp0))
    smisc.imsave('data/results/000010_disp1_raw.png',
                 kitti_viz.disp_to_color(disp1_raw))
    smisc.imsave('data/results/000010_disp1_rigid.png',
                 kitti_viz.disp_to_color(disp1_rigid))
    smisc.imsave('data/results/000010_disp1_nn.png',
                 kitti_viz.disp_to_color(disp1_nn))

    seg_im = CITYSCAPE_PALETTE[seg]
    smisc.imsave('data/results/000010_seg.png', seg_im.astype('uint8'))
def main(args):
	train_loader, flow_test_loader, disp_test_loader = make_data_loader(args)

	torch.manual_seed(args.seed)
	torch.cuda.manual_seed(args.seed)
	np.random.seed(args.seed)
	random.seed(args.seed)

	model = model_utils.make_model(
		args, 
		do_flow=not args.no_flow,
		do_disp=not args.no_disp,
		do_seg=(args.do_seg or args.do_seg_distill)
	)
	print('Number of model parameters: {}'.format(
		sum([p.data.nelement() for p in model.parameters()]))
	)

	optimizer = optim.Adam(model.parameters(), 
		lr=args.lr, 
		betas=(0.9, 0.999),
		eps=1e-08, 
		weight_decay=0.0004
	)

	if args.loadmodel is not None:
		ckpt = torch.load(args.loadmodel)
		state_dict = ckpt['state_dict']
		model.load_state_dict(model_utils.patch_model_state_dict(state_dict))
		print('==> A pre-trained checkpoint has been loaded.')
	start_epoch = 1

	if args.auto_resume:
		# search for the latest saved checkpoint
		epoch_found = -1
		for epoch in range(args.epochs+1, 1, -1):
			ckpt_dir = model_utils.make_joint_checkpoint_name(args, epoch)
			ckpt_dir = os.path.join(args.savemodel, ckpt_dir)
			ckpt_path = os.path.join(ckpt_dir, 'model_{:04d}.pth'.format(epoch))
			if os.path.exists(ckpt_path):
				epoch_found = epoch
				break
		if epoch_found > 0:
			ckpt = torch.load(ckpt_path)
			assert ckpt['epoch'] == epoch_found, [ckpt['epoch'], epoch_found]
			start_epoch = ckpt['epoch'] + 1
			optimizer.load_state_dict(ckpt['optimizer'])
			model.load_state_dict(ckpt['state_dict'])
			print('==> Automatically resumed training from {}.'.format(ckpt_path))
	else:
		if args.resume is not None:
			ckpt = torch.load(args.resume)
			start_epoch = ckpt['epoch'] + 1
			optimizer.load_state_dict(ckpt['optimizer'])
			model.load_state_dict(ckpt['state_dict'])
			print('==> Manually resumed training from {}.'.format(args.resume))
	
	cudnn.benchmark = True

	(flow_crit, flow_occ_crit), flow_down_scales, flow_weights = model_utils.make_flow_criteria(args)
	(disp_crit, disp_occ_crit), disp_down_scales, disp_weights = model_utils.make_disp_criteria(args)

	hard_seg_crit = None
	soft_seg_crit = None
	self_supervised_crit = None
	criteria = (
		disp_crit, disp_occ_crit, 
		flow_crit, flow_occ_crit
	)

	min_loss=100000000000000000
	min_epo=0
	min_err_pct = 10000
	start_full_time = time.time()

	train_print_format = '{}\t{:d}\t{:d}\t{:d}\t{:d}\t{:.3f}\t{:.3f}\t{:.3f}'\
		'\t{:.3f}\t{:.3f}\t{:.3f}\t{:.3f}\t{:.6f}'
	test_print_format = '{}\t{:d}\t{:d}\t{:.3f}\t{:.2f}\t{:.3f}\t{:.2f}\t{:.2f}\t{:.2f}'\
		'\t{:.3f}\t{:.3f}\t{:.3f}\t{:.3f}\t{:.6f}'

	global_step = 0
	for epoch in range(start_epoch, args.epochs+1):
		total_train_loss = 0
		total_err = 0
		total_test_err_pct = 0
		total_disp_occ_acc = 0
		total_epe = 0
		total_flow_occ_acc = 0
		total_seg_acc = 0
		lr = adjust_learning_rate(optimizer, epoch, len(train_loader))
			 
		## training ##
		start_time = time.time() 
		for batch_idx, batch_data in enumerate(train_loader):
			end = time.time()
			train_res = train(model, optimizer, batch_data, criteria, args)
			loss, flow_loss, flow_occ_loss, disp_loss, disp_occ_loss = train_res
			global_step += 1
			if (batch_idx + 1) % args.print_freq == 0:
				print(train_print_format.format(
					'Train', global_step, epoch, batch_idx, len(train_loader),
					loss, 
					flow_loss, flow_occ_loss, 
					disp_loss, disp_occ_loss,
					end - start_time, time.time() - start_time, lr
				))
				sys.stdout.flush()
			start_time = time.time()
			total_train_loss += loss

		# should have used the validation set to select the best model
		start_time = time.time()
		for batch_idx, batch_data in enumerate(flow_test_loader):
			loss_data = test_flow(
				model, 
				batch_data,
				criteria, 
				args.cmd, 
				flow_down_scales[0]
			)
			epe, flow_occ_acc, loss, flow_loss, flow_occ_loss = loss_data
			total_epe += epe
			total_flow_occ_acc += flow_occ_acc

		for batch_idx, batch_data in enumerate(disp_test_loader):
			loss_data = test_disp(
				model, 
				batch_data, 
				criteria, 
				args.cmd
			)
			err, err_pct, disp_occ_acc, loss, disp_loss, disp_occ_loss = loss_data
			total_err += err
			total_test_err_pct += err_pct
			total_disp_occ_acc += disp_occ_acc

		if total_test_err_pct/len(disp_test_loader) * 100 < min_err_pct:
			min_loss = total_err/len(disp_test_loader)
			min_epo = epoch
			min_err_pct = total_test_err_pct/len(disp_test_loader) * 100

		print(test_print_format.format(
			'Test', global_step, epoch,
			total_epe / len(flow_test_loader) * args.div_flow,
			total_flow_occ_acc / len(flow_test_loader) * 100,
			total_err/len(disp_test_loader), 
			total_test_err_pct/len(disp_test_loader) * 100,
			total_disp_occ_acc / len(disp_test_loader) * 100,
			flow_loss, flow_occ_loss,
			disp_loss * args.disp_loss_weight, 
			disp_occ_loss * args.disp_loss_weight,
			time.time() - start_time, lr
		))
		
		save_checkpoint(model, optimizer, epoch, global_step, args)
	print('Elapsed time = %.2f HR' %((time.time() - start_full_time)/3600))
Esempio n. 6
0
def main(args):
    train_loader, test_loader = make_data_loader(args)

    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)
    random.seed(args.seed)

    model = model_utils.make_model(args,
                                   do_flow=not args.no_flow,
                                   do_disp=not args.no_disp,
                                   do_seg=(args.do_seg or args.do_seg_distill))
    print('Number of model parameters: {}'.format(
        sum([p.data.nelement() for p in model.parameters()])))

    optimizer = optim.Adam(model.parameters(),
                           lr=args.lr,
                           betas=(0.9, 0.999),
                           eps=1e-08,
                           weight_decay=0.0004)

    if args.loadmodel is not None:
        ckpt = torch.load(args.loadmodel)
        state_dict = ckpt['state_dict']
        missing_keys, unexpected_keys = model.load_state_dict(
            model_utils.patch_model_state_dict(state_dict))
        assert not unexpected_keys, 'Got unexpected keys: {}'.format(
            unexpected_keys)
        if missing_keys:
            for mk in missing_keys:
                assert mk.find(
                    'seg_decoder'
                ) >= 0, 'Only segmentation decoder can be initialized randomly.'
        print('==> A pre-trained model has been loaded.')
    start_epoch = 1

    if args.auto_resume:
        # search for the latest saved checkpoint
        epoch_found = -1
        for epoch in range(args.epochs + 1, 1, -1):
            ckpt_dir = model_utils.make_joint_checkpoint_name(args, epoch)
            ckpt_dir = os.path.join(args.savemodel, ckpt_dir)
            ckpt_path = os.path.join(ckpt_dir,
                                     'model_{:04d}.pth'.format(epoch))
            if os.path.exists(ckpt_path):
                epoch_found = epoch
                break
        if epoch_found > 0:
            ckpt = torch.load(ckpt_path)
            assert ckpt['epoch'] == epoch_found, [ckpt['epoch'], epoch_found]
            start_epoch = ckpt['epoch'] + 1
            optimizer.load_state_dict(ckpt['optimizer'])
            model.load_state_dict(ckpt['state_dict'])
            print('==> Automatically resumed training from {}.'.format(
                ckpt_path))
    else:
        if args.resume is not None:
            ckpt = torch.load(args.resume)
            start_epoch = ckpt['epoch'] + 1
            optimizer.load_state_dict(ckpt['optimizer'])
            model.load_state_dict(ckpt['state_dict'])
            print('==> Manually resumed training from {}.'.format(args.resume))

    cudnn.benchmark = True

    (flow_crit, flow_occ_crit
     ), flow_down_scales, flow_weights = model_utils.make_flow_criteria(args)
    (disp_crit, disp_occ_crit
     ), disp_down_scales, disp_weights = model_utils.make_disp_criteria(args)

    hard_seg_crit = model_utils.make_seg_criterion(args, hard_lab=True)
    soft_seg_crit = model_utils.make_seg_criterion(args, hard_lab=False)
    args.hard_seg_loss_weight *= float(disp_weights[0])
    args.soft_seg_loss_weight *= float(disp_weights[0])

    self_supervised_crit = make_self_supervised_loss(
        args,
        disp_downscales=disp_down_scales,
        disp_pyramid_weights=disp_weights,
        flow_downscales=flow_down_scales,
        flow_pyramid_weights=flow_weights).cuda()
    criteria = (disp_crit, disp_occ_crit, flow_crit, flow_occ_crit,
                hard_seg_crit, soft_seg_crit, self_supervised_crit)

    min_loss = 100000000000000000
    min_epo = 0
    min_err_pct = 10000
    start_full_time = time.time()

    train_print_format = '{}\t{:d}\t{:d}\t{:d}\t{:d}\t{:.3f}\t{:.3f}\t{:.3f}'\
          '\t{:.3f}\t{:.3f}\t{:.3f}\t{:.3f}\t{:.3f}\t{:.3f}\t{:.3f}\t{:.6f}'
    test_print_format = '{}\t{:d}\t{:d}\t{:.3f}\t{:.2f}\t{:.3f}\t{:.2f}\t{:.2f}\t{:.2f}'\
         '\t{:.3f}\t{:.3f}\t{:.3f}\t{:.3f}\t{:.3f}\t{:.6f}'

    global_step = 0
    for epoch in range(start_epoch, args.epochs + 1):
        total_train_loss = 0
        total_err = 0
        total_test_err_pct = 0
        total_disp_occ_acc = 0
        total_epe = 0
        total_flow_occ_acc = 0
        total_seg_acc = 0
        lr = adjust_learning_rate(optimizer, epoch, len(train_loader))

        ## training ##
        start_time = time.time()
        for batch_idx, batch_data in enumerate(train_loader):
            end = time.time()
            # (cur_im, nxt_im), (flow, flow_occ), (left_im, right_im), (disp, disp_occ, seg_im) = data
            # if args.seg_root_dir is None:
            # 	seg_im = None
            train_res = train(model, optimizer, batch_data, criteria, args)
            loss, flow_loss, flow_occ_loss, disp_loss, disp_occ_loss, seg_loss, seg_distill_loss, ss_loss, ss_losses = train_res
            global_step += 1
            if (batch_idx + 1) % args.print_freq == 0:
                print(
                    train_print_format.format('Train', global_step,
                                              epoch, batch_idx,
                                              len(train_loader), loss,
                                              flow_loss, flow_occ_loss,
                                              disp_loss, disp_occ_loss,
                                              seg_loss, seg_distill_loss,
                                              ss_loss, end - start_time,
                                              time.time() - start_time, lr))
                for k, v in ss_losses.items():
                    print('{: <10}\t{:.3f}'.format(k, v))
                sys.stdout.flush()
            start_time = time.time()
            total_train_loss += loss

        # should have had a validation set

        save_checkpoint(model, optimizer, epoch, global_step, args)
    print('Elapsed time = %.2f HR' % ((time.time() - start_full_time) / 3600))
Esempio n. 7
0
def main(args):
    train_loader, test_loader = make_data_loader(args)

    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    model = model_utils.make_model(args, do_seg=args.do_seg)
    print('Number of model parameters: {}'.format(
        sum([p.data.nelement() for p in model.parameters()])))

    optimizer = optim.Adam(model.parameters(),
                           lr=args.lr,
                           betas=(0.9, 0.999),
                           eps=1e-08,
                           weight_decay=0.0004)

    if args.loadmodel is not None:
        ckpt = torch.load(args.loadmodel)
        state_dict = ckpt['state_dict']
        model.load_state_dict(model_utils.patch_model_state_dict(state_dict))
        print('==> A pre-trained checkpoint has been loaded {}'.format(
            args.loadmodel))
    start_epoch = 1

    if args.auto_resume:
        # search for the latest saved checkpoint
        epoch_found = -1
        for epoch in range(args.epochs + 1, 1, -1):
            ckpt_path = model_utils.make_joint_checkpoint_name(args, epoch)
            ckpt_path = os.path.join(args.savemodel, ckpt_path)
            if os.path.exists(ckpt_path):
                epoch_found = epoch
                break
        if epoch_found > 0:
            ckpt = torch.load(ckpt_path)
            assert ckpt['epoch'] == epoch_found, [ckpt['epoch'], epoch_found]
            start_epoch = ckpt['epoch'] + 1
            optimizer.load_state_dict(ckpt['optimizer'])
            model.load_state_dict(ckpt['state_dict'])
            print('==> Automatically resumed training from {}.'.format(
                ckpt_path))

    cudnn.benchmark = True

    (flow_crit, flow_occ_crit
     ), flow_down_scales, flow_weights = model_utils.make_flow_criteria(args)
    (disp_crit, disp_occ_crit
     ), disp_down_scales, disp_weights = model_utils.make_disp_criteria(args)

    criteria = (disp_crit, disp_occ_crit, flow_crit, flow_occ_crit)

    min_loss = 100000000000000000
    min_epo = 0
    min_err_pct = 10000
    start_full_time = time.time()

    train_print_format = '{}\t{:d}\t{:d}\t{:d}\t{:d}\t{:.3f}\t{:.3f}\t{:.3f}'\
          '\t{:.3f}\t{:.3f}\t{:.3f}\t{:.3f}\t\t{:.6f}'
    test_print_format = '{}\t{:d}\t{:d}\t{:.3f}\t{:.2f}\t{:.3f}\t{:.2f}\t{:.2f}\t{:.2f}'\
         '\t{:.3f}\t{:.3f}\t{:.3f}\t{:.3f}\t{:.3f}\t{:.6f}'

    global_step = 0
    for epoch in range(start_epoch, args.epochs + 1):
        total_train_loss = 0
        total_err = 0
        total_test_err_pct = 0
        total_disp_occ_acc = 0
        total_epe = 0
        total_flow_occ_acc = 0
        total_seg_acc = 0
        lr = adjust_learning_rate(optimizer, epoch, len(train_loader))

        ## training ##
        start_time = time.time()
        for batch_idx, batch_data in enumerate(train_loader):
            end = time.time()
            train_res = train(model, optimizer, batch_data, criteria, args)
            loss, flow_loss, flow_occ_loss, disp_loss, disp_occ_loss = train_res
            global_step += 1
            if (batch_idx + 1) % args.print_freq == 0:
                print(
                    train_print_format.format('Train', global_step,
                                              epoch, batch_idx,
                                              len(train_loader), loss,
                                              flow_loss, flow_occ_loss,
                                              disp_loss, disp_occ_loss,
                                              end - start_time,
                                              time.time() - start_time, lr))
                sys.stdout.flush()
            start_time = time.time()
            total_train_loss += loss

        save_checkpoint(model, optimizer, epoch, global_step, args)
    print('Elapsed time = %.2f HR' % ((time.time() - start_full_time) / 3600))