def __init__(self, scene_ds, meters, batch_size=64, cache_data=True, n_workers=4, sampler=None): self.rank = get_rank() self.world_size = get_world_size() self.tmp_dir = get_tmp_dir() self.scene_ds = scene_ds if sampler is None: sampler = DistributedSceneSampler(scene_ds, num_replicas=self.world_size, rank=self.rank, shuffle=True) dataloader = DataLoader(scene_ds, batch_size=batch_size, num_workers=n_workers, sampler=sampler, collate_fn=self.collate_fn) if cache_data: self.dataloader = list(tqdm(dataloader)) else: self.dataloader = dataloader self.meters = meters self.meters = OrderedDict({ k: v for k, v in sorted(self.meters.items(), key=lambda item: item[0]) })
def gather_distributed(self, tmp_dir): tmp_dir = Path(tmp_dir) tmp_dir.mkdir(exist_ok=True, parents=True) rank, world_size = get_rank(), get_world_size() tmp_file_template = (tmp_dir / 'rank={rank}.pth.tar').as_posix() if rank > 0: tmp_file = tmp_file_template.format(rank=rank) torch.save(self.datas, tmp_file) if world_size > 1: torch.distributed.barrier() if rank == 0 and world_size > 1: all_datas = self.datas for n in range(1, world_size): tmp_file = tmp_file_template.format(rank=n) datas = torch.load(tmp_file) for k in all_datas.keys(): all_datas[k].extend(datas.get(k, [])) Path(tmp_file).unlink() self.datas = all_datas if world_size > 1: torch.distributed.barrier() return
def __init__(self, scene_ds, batch_size=8, cache_data=False, n_workers=4): self.rank = get_rank() self.world_size = get_world_size() self.tmp_dir = get_tmp_dir() sampler = DistributedSceneSampler(scene_ds, num_replicas=self.world_size, rank=self.rank) self.sampler = sampler dataloader = DataLoader(scene_ds, batch_size=batch_size, num_workers=n_workers, sampler=sampler, collate_fn=self.collate_fn) if cache_data: self.dataloader = list(tqdm(dataloader)) else: self.dataloader = dataloader
def __init__(self, scene_ds, batch_size=1, cache_data=False, n_workers=4): self.rank = get_rank() self.world_size = get_world_size() self.tmp_dir = get_tmp_dir() assert batch_size == 1, 'Multiple view groups not supported for now.' sampler = DistributedSceneSampler(scene_ds, num_replicas=self.world_size, rank=self.rank) self.sampler = sampler dataloader = DataLoader(scene_ds, batch_size=batch_size, num_workers=n_workers, sampler=sampler, collate_fn=self.collate_fn) if cache_data: self.dataloader = list(tqdm(dataloader)) else: self.dataloader = dataloader
def gather_distributed(self, tmp_dir=None): rank, world_size = get_rank(), get_world_size() tmp_file_template = (tmp_dir / 'rank={rank}.pth.tar').as_posix() if rank > 0: tmp_file = tmp_file_template.format(rank=rank) torch.save(self, tmp_file) if world_size > 1: torch.distributed.barrier() datas = [self] if rank == 0 and world_size > 1: for n in range(1, world_size): tmp_file = tmp_file_template.format(rank=n) data = torch.load(tmp_file) datas.append(data) Path(tmp_file).unlink() if world_size > 1: torch.distributed.barrier() return concatenate(datas)
metrics_to_print.update({ f'{det_key}/ba_input/ADD-S_ntop=BOP_matching=OVERLAP/norm': f'Multiview before BA/ADD-S (m)', f'{det_key}/ba_output/ADD-S_ntop=BOP_matching=OVERLAP/norm': f'Multiview after BA/ADD-S (m)', }) if get_rank() == 0: save_dir.mkdir() results = format_results(all_predictions, eval_metrics, eval_dfs, print_metrics=False) (save_dir / 'full_summary.txt').write_text(results.get('summary_txt', '')) full_summary = results['summary'] summary_txt = 'Results:' for k, v in metrics_to_print.items(): if k in full_summary: summary_txt += f"\n{v}: {full_summary[k]}" logger.info(f"{'-'*80}") logger.info(summary_txt) logger.info(f"{'-'*80}") torch.save(results, save_dir / 'results.pth.tar') (save_dir / 'summary.txt').write_text(summary_txt) logger.info(f"Saved: {save_dir}") if __name__ == '__main__': patch_tqdm() main() time.sleep(2) if get_world_size() > 1: torch.distributed.barrier()
def train_pose(args): torch.set_num_threads(1) if args.resume_run_id: resume_dir = EXP_DIR / args.resume_run_id resume_args = yaml.load((resume_dir / 'config.yaml').read_text()) keep_fields = set([ 'resume_run_id', 'epoch_size', ]) vars(args).update({ k: v for k, v in vars(resume_args).items() if k not in keep_fields }) args.train_refiner = args.TCO_input_generator == 'gt+noise' args.train_coarse = not args.train_refiner args.save_dir = EXP_DIR / args.run_id logger.info(f"{'-'*80}") for k, v in args.__dict__.items(): logger.info(f"{k}: {v}") logger.info(f"{'-'*80}") # Initialize distributed device = torch.cuda.current_device() init_distributed_mode() world_size = get_world_size() args.n_gpus = world_size args.global_batch_size = world_size * args.batch_size logger.info(f'Connection established with {world_size} gpus.') # Make train/val datasets def make_datasets(dataset_names): datasets = [] for (ds_name, n_repeat) in dataset_names: assert 'test' not in ds_name ds = make_scene_dataset(ds_name) logger.info(f'Loaded {ds_name} with {len(ds)} images.') for _ in range(n_repeat): datasets.append(ds) return ConcatDataset(datasets) # tracking dataset scene_ds_train = make_datasets(args.train_ds_names) scene_ds_val = make_datasets(args.val_ds_names) ds_kwargs = dict( resize=args.input_resize, rgb_augmentation=args.rgb_augmentation, background_augmentation=args.background_augmentation, min_area=args.min_area, gray_augmentation=args.gray_augmentation, ) ds_train = PoseTrackingDataset(scene_ds_train, **ds_kwargs) ds_val = PoseTrackingDataset(scene_ds_val, **ds_kwargs) train_sampler = PartialSampler(ds_train, epoch_size=args.epoch_size) ds_iter_train = DataLoader(ds_train, sampler=train_sampler, batch_size=args.batch_size, num_workers=args.n_dataloader_workers, collate_fn=ds_train.collate_fn, drop_last=False, pin_memory=True) ds_iter_train = MultiEpochDataLoader(ds_iter_train) val_sampler = PartialSampler(ds_val, epoch_size=int(0.1 * args.epoch_size)) ds_iter_val = DataLoader(ds_val, sampler=val_sampler, batch_size=args.batch_size, num_workers=args.n_dataloader_workers, collate_fn=ds_val.collate_fn, drop_last=False, pin_memory=True) ds_iter_val = MultiEpochDataLoader(ds_iter_val) # Make model # renderer = BulletBatchRenderer(object_set=args.urdf_ds_name, n_workers=args.n_rendering_workers) object_ds = make_object_dataset(args.object_ds_name) mesh_db = MeshDataBase.from_object_ds(object_ds).batched( n_sym=args.n_symmetries_batch).cuda().float() model = create_model_pose_custom(cfg=args, mesh_db=mesh_db).cuda() eval_bundle = make_eval_bundle(args, model) if args.resume_run_id: resume_dir = EXP_DIR / args.resume_run_id path = resume_dir / 'checkpoint.pth.tar' logger.info(f'Loading checkpoing from {path}') save = torch.load(path) state_dict = save['state_dict'] model.load_state_dict(state_dict) start_epoch = save['epoch'] + 1 else: start_epoch = 0 end_epoch = args.n_epochs if args.run_id_pretrain is not None: pretrain_path = EXP_DIR / args.run_id_pretrain / 'checkpoint.pth.tar' logger.info(f'Using pretrained model from {pretrain_path}.') model.load_state_dict(torch.load(pretrain_path)['state_dict']) # Synchronize models across processes. model = sync_model(model) model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[device], output_device=device) # Optimizer optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay) # Warmup if args.n_epochs_warmup == 0: lambd = lambda epoch: 1 else: n_batches_warmup = args.n_epochs_warmup * (args.epoch_size // args.batch_size) lambd = lambda batch: (batch + 1) / n_batches_warmup lr_scheduler_warmup = torch.optim.lr_scheduler.LambdaLR(optimizer, lambd) lr_scheduler_warmup.last_epoch = start_epoch * args.epoch_size // args.batch_size # LR schedulers # Divide LR by 10 every args.lr_epoch_decay lr_scheduler = torch.optim.lr_scheduler.StepLR( optimizer, step_size=args.lr_epoch_decay, gamma=0.1, ) lr_scheduler.last_epoch = start_epoch - 1 lr_scheduler.step() for epoch in range(start_epoch, end_epoch): meters_train = defaultdict(lambda: AverageValueMeter()) meters_val = defaultdict(lambda: AverageValueMeter()) meters_time = defaultdict(lambda: AverageValueMeter()) h = functools.partial(h_pose_custom, model=model, cfg=args, n_iterations=args.n_iterations, mesh_db=mesh_db, input_generator=args.TCO_input_generator) def train_epoch(): model.train() iterator = tqdm(ds_iter_train, ncols=80) t = time.time() for n, sample in enumerate(iterator): if n > 0: meters_time['data'].add(time.time() - t) optimizer.zero_grad() t = time.time() loss = h(data=sample, meters=meters_train) meters_time['forward'].add(time.time() - t) iterator.set_postfix(loss=loss.item()) meters_train['loss_total'].add(loss.item()) t = time.time() loss.backward() total_grad_norm = torch.nn.utils.clip_grad_norm_( model.parameters(), max_norm=args.clip_grad_norm, norm_type=2) meters_train['grad_norm'].add( torch.as_tensor(total_grad_norm).item()) optimizer.step() meters_time['backward'].add(time.time() - t) meters_time['memory'].add(torch.cuda.max_memory_allocated() / 1024.**2) if epoch < args.n_epochs_warmup: lr_scheduler_warmup.step() t = time.time() if epoch >= args.n_epochs_warmup: lr_scheduler.step() @torch.no_grad() def validation(): model.eval() for sample in tqdm(ds_iter_val, ncols=80): loss = h(data=sample, meters=meters_val) meters_val['loss_total'].add(loss.item()) @torch.no_grad() def test(): model.eval() return run_eval(eval_bundle, epoch=epoch) train_epoch() if epoch % args.val_epoch_interval == 0: validation() test_dict = None if epoch % args.test_epoch_interval == 0: test_dict = test() log_dict = dict() log_dict.update({ 'grad_norm': meters_train['grad_norm'].mean, 'grad_norm_std': meters_train['grad_norm'].std, 'learning_rate': optimizer.param_groups[0]['lr'], 'time_forward': meters_time['forward'].mean, 'time_backward': meters_time['backward'].mean, 'time_data': meters_time['data'].mean, 'gpu_memory': meters_time['memory'].mean, 'time': time.time(), 'n_iterations': (epoch + 1) * len(ds_iter_train), 'n_datas': (epoch + 1) * args.global_batch_size * len(ds_iter_train), }) for string, meters in zip(('train', 'val'), (meters_train, meters_val)): for k in dict(meters).keys(): log_dict[f'{string}_{k}'] = meters[k].mean log_dict = reduce_dict(log_dict) if get_rank() == 0: log(config=args, model=model, epoch=epoch, log_dict=log_dict, test_dict=test_dict) dist.barrier()
import os import torch from cosypose.utils.distributed import init_distributed_mode, get_world_size, get_tmp_dir, get_rank from cosypose.utils.logging import get_logger logger = get_logger(__name__) if __name__ == '__main__': init_distributed_mode() proc_id = get_rank() n_tasks = get_world_size() n_cpus = os.environ.get('N_CPUS', 'not specified') logger.info(f'Number of processes (=num GPUs): {n_tasks}') logger.info(f'Process ID: {proc_id}') logger.info(f'TMP Directory for this job: {get_tmp_dir()}') logger.info(f'GPU CUDA ID: {torch.cuda.current_device()}') logger.info(f'Max number of CPUs for this process: {n_cpus}')
def train_detector(args): torch.set_num_threads(1) if args.resume_run_id: resume_dir = EXP_DIR / args.resume_run_id resume_args = yaml.load((resume_dir / 'config.yaml').read_text()) keep_fields = set([ 'resume_run_id', 'epoch_size', ]) vars(args).update({ k: v for k, v in vars(resume_args).items() if k not in keep_fields }) args = check_update_config(args) args.save_dir = EXP_DIR / args.run_id logger.info(f"{'-'*80}") for k, v in args.__dict__.items(): logger.info(f"{k}: {v}") logger.info(f"{'-'*80}") # Initialize distributed device = torch.cuda.current_device() init_distributed_mode() world_size = get_world_size() args.n_gpus = world_size args.global_batch_size = world_size * args.batch_size logger.info(f'Connection established with {world_size} gpus.') # Make train/val datasets def make_datasets(dataset_names): datasets = [] all_labels = set() for (ds_name, n_repeat) in dataset_names: assert 'test' not in ds_name ds = make_scene_dataset(ds_name) logger.info(f'Loaded {ds_name} with {len(ds)} images.') all_labels = all_labels.union(set(ds.all_labels)) for _ in range(n_repeat): datasets.append(ds) return ConcatDataset(datasets), all_labels scene_ds_train, train_labels = make_datasets(args.train_ds_names) scene_ds_val, _ = make_datasets(args.val_ds_names) label_to_category_id = dict() label_to_category_id['background'] = 0 for n, label in enumerate(sorted(list(train_labels)), 1): label_to_category_id[label] = n logger.info( f'Training with {len(label_to_category_id)} categories: {label_to_category_id}' ) args.label_to_category_id = label_to_category_id ds_kwargs = dict( resize=args.input_resize, rgb_augmentation=args.rgb_augmentation, background_augmentation=args.background_augmentation, gray_augmentation=args.gray_augmentation, label_to_category_id=label_to_category_id, ) ds_train = DetectionDataset(scene_ds_train, **ds_kwargs) ds_val = DetectionDataset(scene_ds_val, **ds_kwargs) train_sampler = PartialSampler(ds_train, epoch_size=args.epoch_size) ds_iter_train = DataLoader(ds_train, sampler=train_sampler, batch_size=args.batch_size, num_workers=args.n_dataloader_workers, collate_fn=collate_fn, drop_last=False, pin_memory=True) ds_iter_train = MultiEpochDataLoader(ds_iter_train) val_sampler = PartialSampler(ds_val, epoch_size=int(0.1 * args.epoch_size)) ds_iter_val = DataLoader(ds_val, sampler=val_sampler, batch_size=args.batch_size, num_workers=args.n_dataloader_workers, collate_fn=collate_fn, drop_last=False, pin_memory=True) ds_iter_val = MultiEpochDataLoader(ds_iter_val) model = create_model_detector(cfg=args, n_classes=len( args.label_to_category_id)).cuda() if args.resume_run_id: resume_dir = EXP_DIR / args.resume_run_id path = resume_dir / 'checkpoint.pth.tar' logger.info(f'Loading checkpoing from {path}') save = torch.load(path) state_dict = save['state_dict'] model.load_state_dict(state_dict) start_epoch = save['epoch'] + 1 else: start_epoch = 0 end_epoch = args.n_epochs if args.run_id_pretrain is not None: pretrain_path = EXP_DIR / args.run_id_pretrain / 'checkpoint.pth.tar' logger.info(f'Using pretrained model from {pretrain_path}.') model.load_state_dict(torch.load(pretrain_path)['state_dict']) elif args.pretrain_coco: state_dict = load_state_dict_from_url( model_urls['maskrcnn_resnet50_fpn_coco']) keep = lambda k: 'box_predictor' not in k and 'mask_predictor' not in k state_dict = {k: v for k, v in state_dict.items() if keep(k)} model.load_state_dict(state_dict, strict=False) logger.info( 'Using model pre-trained on coco. Removed predictor heads.') else: logger.info('Training MaskRCNN from scratch.') # Synchronize models across processes. model = sync_model(model) model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[device], output_device=device) # Optimizer params = [p for p in model.parameters() if p.requires_grad] if args.optimizer.lower() == 'sgd': optimizer = torch.optim.SGD(params, lr=args.lr, weight_decay=args.weight_decay, momentum=args.momentum) elif args.optimizer.lower() == 'adam': optimizer = torch.optim.Adam(params, lr=args.lr, weight_decay=args.weight_decay) else: raise ValueError(f'Unknown optimizer {args.optimizer}') # Warmup if args.n_epochs_warmup == 0: lambd = lambda epoch: 1 else: n_batches_warmup = args.n_epochs_warmup * (args.epoch_size // args.batch_size) lambd = lambda batch: (batch + 1) / n_batches_warmup lr_scheduler_warmup = torch.optim.lr_scheduler.LambdaLR(optimizer, lambd) lr_scheduler_warmup.last_epoch = start_epoch * args.epoch_size // args.batch_size # LR schedulers # Divide LR by 10 every args.lr_epoch_decay lr_scheduler = torch.optim.lr_scheduler.StepLR( optimizer, step_size=args.lr_epoch_decay, gamma=0.1, ) lr_scheduler.last_epoch = start_epoch - 1 lr_scheduler.step() for epoch in range(start_epoch, end_epoch): meters_train = defaultdict(AverageValueMeter) meters_val = defaultdict(AverageValueMeter) meters_time = defaultdict(AverageValueMeter) h = functools.partial(h_maskrcnn, model=model, cfg=args) def train_epoch(): model.train() iterator = tqdm(ds_iter_train, ncols=80) t = time.time() for n, sample in enumerate(iterator): if n > 0: meters_time['data'].add(time.time() - t) optimizer.zero_grad() t = time.time() loss = h(data=sample, meters=meters_train) meters_time['forward'].add(time.time() - t) iterator.set_postfix(loss=loss.item()) meters_train['loss_total'].add(loss.item()) t = time.time() loss.backward() total_grad_norm = torch.nn.utils.clip_grad_norm_( model.parameters(), max_norm=np.inf, norm_type=2) meters_train['grad_norm'].add( torch.as_tensor(total_grad_norm).item()) optimizer.step() meters_time['backward'].add(time.time() - t) meters_time['memory'].add(torch.cuda.max_memory_allocated() / 1024.**2) if epoch < args.n_epochs_warmup: lr_scheduler_warmup.step() t = time.time() if epoch >= args.n_epochs_warmup: lr_scheduler.step() @torch.no_grad() def validation(): model.train() for sample in tqdm(ds_iter_val, ncols=80): loss = h(data=sample, meters=meters_val) meters_val['loss_total'].add(loss.item()) train_epoch() if epoch % args.val_epoch_interval == 0: validation() test_dict = None if epoch % args.test_epoch_interval == 0: model.eval() test_dict = run_eval(args, model, epoch) log_dict = dict() log_dict.update({ 'grad_norm': meters_train['grad_norm'].mean, 'grad_norm_std': meters_train['grad_norm'].std, 'learning_rate': optimizer.param_groups[0]['lr'], 'time_forward': meters_time['forward'].mean, 'time_backward': meters_time['backward'].mean, 'time_data': meters_time['data'].mean, 'gpu_memory': meters_time['memory'].mean, 'time': time.time(), 'n_iterations': (epoch + 1) * len(ds_iter_train), 'n_datas': (epoch + 1) * args.global_batch_size * len(ds_iter_train), }) for string, meters in zip(('train', 'val'), (meters_train, meters_val)): for k in dict(meters).keys(): log_dict[f'{string}_{k}'] = meters[k].mean log_dict = reduce_dict(log_dict) if get_rank() == 0: log(config=args, model=model, epoch=epoch, log_dict=log_dict, test_dict=test_dict) dist.barrier()