'temporal': TemporalCenterCrop(config.sample_duration), 'target': ClassLabel() } # 测试时的数据转换 print('==> Loading validation dataset........') val_data = get_validation_set(config, validation_transforms['spatial'], validation_transforms['temporal'], validation_transforms['target']) data_loader = DataLoader(val_data, config.batch_size, shuffle=True, num_workers=config.num_workers, pin_memory=True) model = model_factory.get_model(config) model.cuda() print("==> Loading existing model '{}' ".format('lrcn')) model_info = torch.load( os.path.join('model/checkpoints/', '{}_save_best.pth'.format(config.dataset))) model.load_state_dict(model_info['state_dict']) model.eval() print('==> Starting test.......') steps_in_epoch = int(np.ceil(len(data_loader.dataset) / config.batch_size)) print(steps_in_epoch) accuracies = np.zeros(steps_in_epoch, np.float32) epoch_start_time = time.time() for step, (clips, targets) in enumerate(data_loader): start_time = time.time()
from transforms.temporal_transforms import TemporalRandomCrop from transforms.spatial_transforms import Compose, Normalize, RandomHorizontalFlip, MultiScaleRandomCrop, ToTensor, CenterCrop '''--------------------------------------配置和日志设置------------------------------------------''' config = parse_opts() # 配置解析 config = prepare_output_dirs(config) # 输出文件夹初始化 config = init_cropping_scales(config) # 裁剪配置 config = set_lr_scheduling_policy(config) # 学习率配置 # 均值和方差设置 mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] print_config(config) # 输出配置文件 write_config(config, os.path.join(config.save_dir, 'config.json')) # 写入json文件 '''---------------------------------------初始化模型-------------------------------------------''' os.environ["CUDA_VISIBLE_DEVICES"] = '0' # 运行环境 model = model_factory.get_model(config) # 获取模型以及需要更新的参数 # 设置转换函数 norm_method = Normalize(mean, std) train_transforms = { 'spatial': Compose([ MultiScaleRandomCrop(config.scales, config.spatial_size), RandomHorizontalFlip(), ToTensor(255), norm_method ]), 'temporal': TemporalRandomCrop(config.sample_duration), 'target': ClassLabel() } # 训练时的数据转换,255表示将数据转换到0~1 validation_transforms = {
from transforms.spatial_transforms import Compose, Normalize, RandomHorizontalFlip, MultiScaleRandomCrop, ToTensor, CenterCrop '''--------------------------------------配置和日志设置------------------------------------------''' config = parse_opts() # 配置解析 config = prepare_output_dirs(config) # 输出文件夹初始化 config = init_cropping_scales(config) # 裁剪配置 config = set_lr_scheduling_policy(config) # 学习率配置 # 均值和方差设置 mean = [0.39608, 0.38182, 0.35067] std = [0.15199, 0.14856, 0.15698] print_config(config) # 输出配置文件 write_config(config, os.path.join(config.save_dir, 'config.json')) # 写入json文件 '''---------------------------------------初始化模型-------------------------------------------''' device = torch.device(config.device) # 运行环境 model, parameters = model_factory.get_model(config) # 获取模型以及需要更新的参数 # 设置转换函数 norm_method = Normalize(mean, std) train_transforms = { 'spatial': Compose([ MultiScaleRandomCrop(config.scales, config.spatial_size), RandomHorizontalFlip(), ToTensor(config.norm_value), norm_method ]), 'temporal': TemporalRandomCrop(config.sample_duration), 'target': ClassLabel() } # 训练时的数据转换 validation_transforms = {
if not config.no_tensorboard: from tensorboardX import SummaryWriter writer = SummaryWriter(log_dir=config.log_dir) else: writer = None #################################################################### #################################################################### # Initialize model device = torch.device(config.device) #torch.backends.cudnn.enabled = False # Returns the network instance (I3D, 3D-ResNet etc.) # Note: this also restores the weights and optionally replaces final layer model, parameters = model_factory.get_model(config) print('#' * 60) if config.model == 'i3d': param_names = [p['name'] for p in parameters] print('Parameters to train:') print(param_names) print('#' * 60) #################################################################### #################################################################### # Setup of data transformations if config.no_dataset_mean and config.no_dataset_std: # Just zero-center and scale to unit std print('Data normalization: no dataset mean, no dataset std')