def compute_time_full(model, loss_fun, train_loader, test_loader): """Times model and data loader.""" logger.info("Computing model and loader timings...") # Compute timings test_fw_time = compute_time_eval(model) train_fw_time, train_bw_time = compute_time_train(model, loss_fun) train_fw_bw_time = train_fw_time + train_bw_time train_loader_time = compute_time_loader(train_loader) # Output iter timing iter_times = { "test_fw_time": test_fw_time, "train_fw_time": train_fw_time, "train_bw_time": train_bw_time, "train_fw_bw_time": train_fw_bw_time, "train_loader_time": train_loader_time, } logger.info(logging.dump_log_data(iter_times, "iter_times")) # Output epoch timing epoch_times = { "test_fw_time": test_fw_time * len(test_loader), "train_fw_time": train_fw_time * len(train_loader), "train_bw_time": train_bw_time * len(train_loader), "train_fw_bw_time": train_fw_bw_time * len(train_loader), "train_loader_time": train_loader_time * len(train_loader), } logger.info(logging.dump_log_data(epoch_times, "epoch_times")) # Compute data loader overhead (assuming DATA_LOADER.NUM_WORKERS>1) overhead = max(0, train_loader_time - train_fw_bw_time) / train_fw_bw_time logger.info("Overhead of data loader is {:.2f}%".format(overhead * 100))
def setup_env(): """Sets up environment for training or testing.""" if dist.is_master_proc(): # Ensure that the output dir exists os.makedirs(cfg.OUT_DIR, exist_ok=True) # Save the config config.dump_cfg() # Setup logging logging.setup_logging() # Log the config as both human readable and as a json logger.info("Config:\n{}".format(cfg)) logger.info(logging.dump_log_data(cfg, "cfg")) if cfg.DETERMINSTIC: # Fix the RNG seeds (see RNG comment in core/config.py for discussion) # print('cfg.RNGSEEd', cfg.RNG_SEED) np.random.seed(cfg.RNG_SEED) # print('seed', np.random.get_state()[1][0]) torch.manual_seed(cfg.RNG_SEED) torch.cuda.manual_seed_all(cfg.RNG_SEED) random.seed(cfg.RNG_SEED) torch.backends.cudnn.benchmark = True # torch.backends.cudnn.deterministic = True torch.backends.cudnn.enabled = True else: # Configure the CUDNN backend torch.backends.cudnn.benchmark = cfg.CUDNN.BENCHMARK
def test_full_time(): config.dump_cfg() logging.setup_logging() logger.info("Config:\n{}".format(cfg)) logger.info(logging.dump_log_data(cfg, "cfg")) [train_loader, test_loader] = loader.construct_loader( cfg.SEARCH.DATASET, cfg.SEARCH.SPLIT, cfg.SEARCH.BATCH_SIZE) avg_time = benchmark.compute_full_loader(test_loader, epoch=3) for i, _time in enumerate(avg_time): logger.info("The {}'s epoch average time is: {}".format(i, _time))
def test_full_time(): # Save the config config.dump_cfg() # Setup logging logging.setup_logging() # Log the config as both human readable and as a json logger.info("Config:\n{}".format(cfg)) logger.info(logging.dump_log_data(cfg, "cfg")) # config.assert_and_infer_cfg() test_loader = loader.construct_test_loader() avg_time = benchmark.compute_full_loader(test_loader, epoch=3) for i, _time in enumerate(avg_time): logger.info("The {}'s epoch average time is: {}".format(i, _time))
def setup_model(): """Sets up a model for training or testing and log the results.""" # Build the model model = builders.build_model() logger.info("Model:\n{}".format(model)) # Log model complexity logger.info(logging.dump_log_data(net.complexity(model), "complexity")) # Transfer the model to the current GPU device err_str = "Cannot use more GPU devices than available" assert cfg.NUM_GPUS <= torch.cuda.device_count(), err_str cur_device = torch.cuda.current_device() model = model.cuda(device=cur_device) # Use multi-process data parallel model in the multi-gpu setting if cfg.NUM_GPUS > 1: # Make model replica operate on the current device model = torch.nn.parallel.DistributedDataParallel( module=model, device_ids=[cur_device], output_device=cur_device) # Set complexity function to be module's complexity function model.complexity = model.module.complexity return model
def setup_env(): """Set up environment for training or testing.""" if dist.is_master_proc(): # Ensure the output dir exists and save config os.makedirs(cfg.OUT_DIR, exist_ok=True) config.dump_cfg() # Setup logging logging.setup_logging() # Log the config as both human readable and as a json logger.info("Config:\n{}".format(cfg)) logger.info(logging.dump_log_data(cfg, "cfg")) if cfg.DETERMINSTIC: # Fix RNG seeds np.random.seed(cfg.RNG_SEED) torch.manual_seed(cfg.RNG_SEED) torch.cuda.manual_seed_all(cfg.RNG_SEED) random.seed(cfg.RNG_SEED) torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True torch.backends.cudnn.enabled = True else: # Configure the CUDNN backend torch.backends.cudnn.benchmark = cfg.CUDNN.BENCHMARK
def log_epoch_stats(self, cur_epoch): stats = self.get_epoch_stats(cur_epoch) logger.info(logging.dump_log_data(stats, "test_epoch"))
def log_iter_stats(self, cur_epoch, cur_iter): if (cur_iter + 1) % cfg.LOG_PERIOD != 0: return stats = self.get_iter_stats(cur_epoch, cur_iter) logger.info(logging.dump_log_data(stats, "test_iter"))