def main(): board_writer = SummaryWriter( os.path.join(args.checkpoint_dir, "tensorBoard")) if args.model == 'PSMNet_stackhourglass': net = PSMNet_stackhourglass(args.max_disp) elif args.model == 'PSMNet_basic': net = PSMNet_basic(args.max_disp) else: print('no model') # Validation loader test_transform_list = [ myTransforms.RandomCrop(args.test_img_height, args.test_img_width, validate=True), myTransforms.ToTensor(), myTransforms.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD) ] test_transform = myTransforms.Compose(test_transform_list) test_data = StereoDataset(data_dir=args.data_dir, isDebug=args.isDebug, dataset_name=args.dataset_name, mode='test', transform=test_transform) logger.info('=> {} test samples found in the test set'.format( len(test_data))) test_loader = DataLoader(dataset=test_data, batch_size=args.test_batch_size, shuffle=False, num_workers=args.num_workers, pin_memory=True, drop_last=False) net.cuda() if args.pretrained_net is not None: logger.info('=> Loading pretrained Net: %s' % args.pretrained_net) # Enable training from a partially pretrained model utils.load_pretrained_net(net, args.pretrained_net, strict=args.strict, logger=logger) else: logger.info('=> args.pretrained_net is None! Please specify it!!!') return assert args.test_batch_size == 1, "test_batch_size must be 1." logger.info('=> Start testing...') testOnTestSet(net, test_loader, args.dataset_name, board_writer, mode="test", epoch=0)
def getDataLoader(args, logger): # Train loader # # 0:debug; 1:overFit; 1_x:在数据子集上训练; 2:Train全量数据 if args.debug_overFit_train in [ 0, 1_1200, 1_2400, 1_1200, 1_4800, 1_9600, 1_19200, 2 ]: train_transform_list = [ transforms.RandomCrop(args.img_height, args.img_width), transforms.RandomColor(), transforms.RandomVerticalFlip(), transforms.ToTensor( ), # 将图像数据转化为Tensor并除以255.0,将像素数值范围归一化到[0,1]之间且[H, W, C=3]->[C=3, H, W] transforms.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD) ] # 使用ImageNet数据集的均值和方差再做归一化
def main(): # For reproducibility torch.manual_seed(args.seed) torch.cuda.manual_seed(args.seed) np.random.seed(args.seed) torch.backends.cudnn.benchmark = True device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # Train loader train_transform_list = [transforms.RandomCrop(args.img_height, args.img_width), transforms.RandomColor(), transforms.RandomVerticalFlip(), transforms.ToTensor(), transforms.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD) ] train_transform = transforms.Compose(train_transform_list) train_data = dataloader.StereoDataset(data_dir=args.data_dir, dataset_name=args.dataset_name, mode='train' if args.mode != 'train_all' else 'train_all', load_pseudo_gt=args.load_pseudo_gt, transform=train_transform) logger.info('=> {} training samples found in the training set'.format(len(train_data))) train_loader = DataLoader(dataset=train_data, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=True, drop_last=True) # Validation loader val_transform_list = [transforms.RandomCrop(args.val_img_height, args.val_img_width, validate=True), transforms.ToTensor(), transforms.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD) ] val_transform = transforms.Compose(val_transform_list) val_data = dataloader.StereoDataset(data_dir=args.data_dir, dataset_name=args.dataset_name, mode=args.mode, transform=val_transform) val_loader = DataLoader(dataset=val_data, batch_size=args.val_batch_size, shuffle=False, num_workers=args.num_workers, pin_memory=True, drop_last=False) # Network aanet = nets.AANet(args.max_disp, num_downsample=args.num_downsample, feature_type=args.feature_type, no_feature_mdconv=args.no_feature_mdconv, feature_pyramid=args.feature_pyramid, feature_pyramid_network=args.feature_pyramid_network, feature_similarity=args.feature_similarity, aggregation_type=args.aggregation_type, num_scales=args.num_scales, num_fusions=args.num_fusions, num_stage_blocks=args.num_stage_blocks, num_deform_blocks=args.num_deform_blocks, no_intermediate_supervision=args.no_intermediate_supervision, refinement_type=args.refinement_type, mdconv_dilation=args.mdconv_dilation, deformable_groups=args.deformable_groups).to(device) logger.info('%s' % aanet) if args.pretrained_aanet is not None: logger.info('=> Loading pretrained AANet: %s' % args.pretrained_aanet) # Enable training from a partially pretrained model utils.load_pretrained_net(aanet, args.pretrained_aanet, no_strict=(not args.strict)) if torch.cuda.device_count() > 1: logger.info('=> Use %d GPUs' % torch.cuda.device_count()) aanet = torch.nn.DataParallel(aanet) # Save parameters num_params = utils.count_parameters(aanet) logger.info('=> Number of trainable parameters: %d' % num_params) save_name = '%d_parameters' % num_params open(os.path.join(args.checkpoint_dir, save_name), 'a').close() # Optimizer # Learning rate for offset learning is set 0.1 times those of existing layers specific_params = list(filter(utils.filter_specific_params, aanet.named_parameters())) base_params = list(filter(utils.filter_base_params, aanet.named_parameters())) specific_params = [kv[1] for kv in specific_params] # kv is a tuple (key, value) base_params = [kv[1] for kv in base_params] specific_lr = args.learning_rate * 0.1 params_group = [ {'params': base_params, 'lr': args.learning_rate}, {'params': specific_params, 'lr': specific_lr}, ] optimizer = torch.optim.Adam(params_group, weight_decay=args.weight_decay) # Resume training if args.resume: # AANet start_epoch, start_iter, best_epe, best_epoch = utils.resume_latest_ckpt( args.checkpoint_dir, aanet, 'aanet') # Optimizer utils.resume_latest_ckpt(args.checkpoint_dir, optimizer, 'optimizer') else: start_epoch = 0 start_iter = 0 best_epe = None best_epoch = None # LR scheduler if args.lr_scheduler_type is not None: last_epoch = start_epoch if args.resume else start_epoch - 1 if args.lr_scheduler_type == 'MultiStepLR': milestones = [int(step) for step in args.milestones.split(',')] lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=milestones, gamma=args.lr_decay_gamma, last_epoch=last_epoch) else: raise NotImplementedError train_model = model.Model(args, logger, optimizer, aanet, device, start_iter, start_epoch, best_epe=best_epe, best_epoch=best_epoch) logger.info('=> Start training...') if args.evaluate_only: assert args.val_batch_size == 1 train_model.validate(val_loader) else: for _ in range(start_epoch, args.max_epoch): if not args.evaluate_only: train_model.train(train_loader) if not args.no_validate: train_model.validate(val_loader) if args.lr_scheduler_type is not None: lr_scheduler.step() logger.info('=> End training\n\n')
# fix the random seed torch.manual_seed(233) torch.cuda.manual_seed(233) np.random.seed(233) torch.backends.cudnn.benchmark = True device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') IMAGENET_MEAN = [0.485, 0.456, 0.406] IMAGENET_STD = [0.229, 0.224, 0.225] # Train Dataloader train_transform_list = [transforms.RandomCrop(args.img_height, args.img_width), transforms.RandomColor(), transforms.RandomVerticalFlip(), transforms.ToTensor(), transforms.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD)] train_transform = transforms.Compose(train_transform_list) train_data = dataloader.StereoDataset(data_dir=args.dir_path, dataset_name=args.dataset_name, mode='train' if args.mode != 'train_all' else 'train_all', load_pseudo_gt=args.load_pseudo_gt, transform=train_transform) logger.info('=> {} training samples found in the training set'.format(len(train_data)))
# # 0:debug; 1:overFit; 1_x:在数据子集上训练; 2:Train全量数据 if args.debug_overFit_train in [ 0, 1_1200, 1_2400, 1_1200, 1_4800, 1_9600, 1_19200, 2 ]: train_transform_list = [ transforms.RandomCrop(args.img_height, args.img_width), transforms.RandomColor(), transforms.RandomVerticalFlip(), transforms.ToTensor( ), # 将图像数据转化为Tensor并除以255.0,将像素数值范围归一化到[0,1]之间且[H, W, C=3]->[C=3, H, W] transforms.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD) ] # 使用ImageNet数据集的均值和方差再做归一化 elif args.debug_overFit_train == 1: train_transform_list = [ transforms.RandomCrop(args.img_height, args.img_width, validate=True), # 只做CenterCrop transforms.ToTensor(), transforms.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD) ] train_transform = transforms.Compose(train_transform_list) train_data = dataloader.StereoDataset( data_dir=args.data_dir, debug_overFit_train=args.debug_overFit_train, dataset_name=args.dataset_name, mode='train' if args.mode != 'train_all' else 'train_all', load_pseudo_gt=args.load_pseudo_gt, transform=train_transform)
def getDataLoader(args, logger): # Train loader train_transform_list = [ myTransforms.RandomCrop(args.img_height, args.img_width), myTransforms.RandomColor(), myTransforms.RandomVerticalFlip(), myTransforms.ToTensor(), # 将图像数据转化为Tensor并除以255.0,将像素数值范围归一化到[0,1]之间且[H, W, C=3]->[C=3, H, W] myTransforms.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD) # 使用ImageNet数据集的均值和方差再做归一化 ] train_transform = myTransforms.Compose(train_transform_list) train_data = StereoDataset( data_dir=args.data_dir, isDebug=args.isDebug, dataset_name=args.dataset_name, mode='train' if args.mode != 'train_all' else 'train_all', load_pseudo_gt=args.load_pseudo_gt, transform=train_transform) logger.info('=> {} training samples found in the training set'.format( len(train_data))) # 尝试分布式训练 # 注意DistributedSampler默认参数就进行了shuffle train_sampler = torch.utils.data.distributed.DistributedSampler( train_data) if args.distributed else None # train_loader = DataLoader(dataset=train_data, batch_size=args.batch_size, shuffle=True, # num_workers=args.num_workers, pin_memory=True, drop_last=True, # sampler=train_sampler) # 尝试分布式训练 is_shuffle = False if args.distributed else True # 需要注意的是,这里的batch_size指的是每个进程下的batch_size。也就是说,总batch_size是这里的batch_size再乘以并行数(world_size)。 train_loader = DataLoader(dataset=train_data, batch_size=args.batch_size, num_workers=args.num_workers, shuffle=is_shuffle, pin_memory=True, drop_last=True, sampler=train_sampler) # Validation loader val_transform_list = [ myTransforms.RandomCrop(args.val_img_height, args.val_img_width, validate=True), myTransforms.ToTensor(), myTransforms.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD) ] val_transform = myTransforms.Compose(val_transform_list) val_data = StereoDataset(data_dir=args.data_dir, isDebug=args.isDebug, dataset_name=args.dataset_name, mode='val', transform=val_transform) logger.info('=> {} val samples found in the val set'.format(len(val_data))) val_loader = DataLoader(dataset=val_data, batch_size=args.val_batch_size, shuffle=False, num_workers=args.num_workers, pin_memory=True, drop_last=False) return train_loader, val_loader