def train_model(): """Trains the model.""" # Setup training/testing environment setup_env() # Construct the model, ema, loss_fun, and optimizer model = setup_model() ema = deepcopy(model) loss_fun = builders.build_loss_fun().cuda() optimizer = optim.construct_optimizer(model) # Load checkpoint or initial weights start_epoch = 0 if cfg.TRAIN.AUTO_RESUME and cp.has_checkpoint(): file = cp.get_last_checkpoint() epoch = cp.load_checkpoint(file, model, ema, optimizer)[0] logger.info("Loaded checkpoint from: {}".format(file)) start_epoch = epoch + 1 elif cfg.TRAIN.WEIGHTS: train_weights = get_weights_file(cfg.TRAIN.WEIGHTS) cp.load_checkpoint(train_weights, model, ema) logger.info("Loaded initial weights from: {}".format(train_weights)) # Create data loaders and meters train_loader = data_loader.construct_train_loader() test_loader = data_loader.construct_test_loader() train_meter = meters.TrainMeter(len(train_loader)) test_meter = meters.TestMeter(len(test_loader)) ema_meter = meters.TestMeter(len(test_loader), "test_ema") # Create a GradScaler for mixed precision training scaler = amp.GradScaler(enabled=cfg.TRAIN.MIXED_PRECISION) # Compute model and loader timings if start_epoch == 0 and cfg.PREC_TIME.NUM_ITER > 0: benchmark.compute_time_full(model, loss_fun, train_loader, test_loader) # Perform the training loop logger.info("Start epoch: {}".format(start_epoch + 1)) for cur_epoch in range(start_epoch, cfg.OPTIM.MAX_EPOCH): # Train for one epoch params = (train_loader, model, ema, loss_fun, optimizer, scaler, train_meter) train_epoch(*params, cur_epoch) # Compute precise BN stats if cfg.BN.USE_PRECISE_STATS: net.compute_precise_bn_stats(model, train_loader) net.compute_precise_bn_stats(ema, train_loader) # Evaluate the model test_epoch(test_loader, model, test_meter, cur_epoch) test_epoch(test_loader, ema, ema_meter, cur_epoch) test_err = test_meter.get_epoch_stats(cur_epoch)["top1_err"] ema_err = ema_meter.get_epoch_stats(cur_epoch)["top1_err"] # Save a checkpoint file = cp.save_checkpoint(model, ema, optimizer, cur_epoch, test_err, ema_err) logger.info("Wrote checkpoint to: {}".format(file))
def test_model(): """Evaluates a trained model.""" # Setup training/testing environment setup_env() # Construct the model model = setup_model() # Load model weights if cfg.TEST.WEIGHTS: checkpoint.load_checkpoint(cfg.TEST.WEIGHTS, model) logger.info("Loaded model weights from: {}".format(cfg.TEST.WEIGHTS)) elif checkpoint.has_checkpoint(): last_checkpoint = checkpoint.get_last_checkpoint() checkpoint.load_checkpoint(last_checkpoint, model) logger.info("Loaded checkpoint from: {}".format(last_checkpoint)) else: print("ERROR: NO checkpoint! ") os._exit() # Create data loaders and meters test_loader = loader.construct_test_loader() test_meter = meters.TestMeter(len(test_loader)) # Evaluate the model if cfg.TASK == 'psd' or cfg.TASK == 'fix': result,ce_error=test_epoch_semi(test_loader, model, test_meter, 0) else: result,ce_error=test_epoch_semi(test_loader, model, test_meter, 0) with open(cfg.OUT_DIR+'/result.txt','w') as f: f.write(str(result["top1_err"])+'\n') f.write(str(ce_error[0])+'\n') f.write(str(ce_error[1])+'\n') print(result["top1_err"],ce_error)
def train_model(): """Trains the model.""" # Setup training/testing environment setup_env() # Construct the model, loss_fun, and optimizer model = setup_model() loss_fun = builders.build_loss_fun().cuda() optimizer = optim.construct_optimizer(model) # Load checkpoint or initial weights start_epoch = 0 if cfg.TRAIN.AUTO_RESUME and checkpoint.has_checkpoint(): last_checkpoint = checkpoint.get_last_checkpoint() checkpoint_epoch = checkpoint.load_checkpoint(last_checkpoint, model, optimizer) logger.info("Loaded checkpoint from: {}".format(last_checkpoint)) start_epoch = checkpoint_epoch + 1 elif cfg.TRAIN.WEIGHTS: checkpoint.load_checkpoint(cfg.TRAIN.WEIGHTS, model) logger.info("Loaded initial weights from: {}".format( cfg.TRAIN.WEIGHTS)) # Create data loaders and meters train_loader = loader.construct_train_loader() test_loader = loader.construct_test_loader() train_meter = meters.TrainMeter(len(train_loader)) test_meter = meters.TestMeter(len(test_loader)) # Compute model and loader timings if start_epoch == 0 and cfg.PREC_TIME.NUM_ITER > 0: benchmark.compute_time_full(model, loss_fun, train_loader, test_loader) # Perform the training loop logger.info("Start epoch: {}".format(start_epoch + 1)) for cur_epoch in range(start_epoch, cfg.OPTIM.MAX_EPOCH): if hasattr(cfg, 'search_epoch'): if cur_epoch >= cfg.search_epoch: break # Train for one epoch train_epoch(train_loader, model, loss_fun, optimizer, train_meter, cur_epoch) # Compute precise BN stats if cfg.BN.USE_PRECISE_STATS: net.compute_precise_bn_stats(model, train_loader) # Save a checkpoint if (cur_epoch + 1) % cfg.TRAIN.CHECKPOINT_PERIOD == 0: checkpoint_file = checkpoint.save_checkpoint( model, optimizer, cur_epoch) logger.info("Wrote checkpoint to: {}".format(checkpoint_file)) # Evaluate the model next_epoch = cur_epoch + 1 if next_epoch % cfg.TRAIN.EVAL_PERIOD == 0 or next_epoch == cfg.OPTIM.MAX_EPOCH: stats = test_epoch(test_loader, model, test_meter, cur_epoch) nni.report_intermediate_result(stats['top1_err']) nni.report_final_result(test_meter.min_top1_err)
def train_kd_model(): """Trains the model.""" # Setup training/testing environment setup_env() # Construct the model, loss_fun, and optimizer model = setup_model() loss_fun = builders.build_loss_fun().cuda() optimizer = optim.construct_optimizer(model) # Load checkpoint or initial weights start_epoch = 0 if cfg.TRAIN.AUTO_RESUME and cp.has_checkpoint(): file = cp.get_last_checkpoint() epoch = cp.load_checkpoint(file, model, optimizer) logger.info("Loaded checkpoint from: {}".format(file)) start_epoch = epoch + 1 elif cfg.TRAIN.WEIGHTS: cp.load_checkpoint(cfg.TRAIN.WEIGHTS, model, strict=False) logger.info("Loaded initial weights from: {}".format( cfg.TRAIN.WEIGHTS)) # Create data loaders and meters train_loader = data_loader.construct_train_loader() test_loader = data_loader.construct_test_loader() train_meter = meters.TrainMeter(len(train_loader)) test_meter = meters.TestMeter(len(test_loader)) # Create a GradScaler for mixed precision training scaler = amp.GradScaler(enabled=cfg.TRAIN.MIXED_PRECISION) # Compute model and loader timings if start_epoch == 0 and cfg.PREC_TIME.NUM_ITER > 0: benchmark.compute_time_full(model, loss_fun, train_loader, test_loader) # Perform the training loop logger.info("Start epoch: {}".format(start_epoch + 1)) best_err = np.inf # Create the teacher model teacher = setup_teacher_model() for cur_epoch in range(start_epoch, cfg.OPTIM.MAX_EPOCH): # Train for one epoch params = (train_loader, model, loss_fun, optimizer, scaler, train_meter, teacher) train_kd_epoch(*params, cur_epoch) # Compute precise BN stats if cfg.BN.USE_PRECISE_STATS: net.compute_precise_bn_stats(model, train_loader) # Evaluate the model test_epoch(test_loader, model, test_meter, cur_epoch) # Check if checkpoint is best so far (note: should checkpoint meters as well) stats = test_meter.get_epoch_stats(cur_epoch) best = stats["top1_err"] <= best_err best_err = min(stats["top1_err"], best_err) # Save a checkpoint file = cp.save_checkpoint(model, optimizer, cur_epoch, best) logger.info("Wrote checkpoint to: {}".format(file))
def test_model(): """Evaluates a trained model.""" # Setup training/testing environment setup_env() # Construct the model model = setup_model() # Load model weights checkpoint.load_checkpoint(cfg.TEST.WEIGHTS, model) logger.info("Loaded model weights from: {}".format(cfg.TEST.WEIGHTS)) # Create data loaders and meters test_loader = loader.construct_test_loader() test_meter = meters.TestMeter(len(test_loader)) # Evaluate the model test_epoch(test_loader, model, test_meter, 0)
def train_model(): """Trains the model.""" # Setup training/testing environment setup_env() # Construct the model, loss_fun, and optimizer model = setup_model() loss_fun = builders.build_loss_fun().cuda() optimizer = optim.construct_optimizer(model) # Load checkpoint or initial weights start_epoch = 0 if cfg.TRAIN.AUTO_RESUME and checkpoint.has_checkpoint(): last_checkpoint = checkpoint.get_last_checkpoint() checkpoint_epoch = checkpoint.load_checkpoint(last_checkpoint, model, optimizer) logger.info("Loaded checkpoint from: {}".format(last_checkpoint)) start_epoch = checkpoint_epoch + 1 elif cfg.TRAIN.WEIGHTS: checkpoint.load_checkpoint(cfg.TRAIN.WEIGHTS, model) logger.info("Loaded initial weights from: {}".format( cfg.TRAIN.WEIGHTS)) # Compute precise time if start_epoch == 0 and cfg.PREC_TIME.ENABLED: logger.info("Computing precise time...") prec_time = net.compute_precise_time(model, loss_fun) logger.info(logging.dump_json_stats(prec_time)) net.reset_bn_stats(model) # Create data loaders and meters train_loader = loader.construct_train_loader() test_loader = loader.construct_test_loader() train_meter = meters.TrainMeter(len(train_loader)) test_meter = meters.TestMeter(len(test_loader)) # Perform the training loop logger.info("Start epoch: {}".format(start_epoch + 1)) for cur_epoch in range(start_epoch, cfg.OPTIM.MAX_EPOCH): # Train for one epoch train_epoch(train_loader, model, loss_fun, optimizer, train_meter, cur_epoch) # Compute precise BN stats if cfg.BN.USE_PRECISE_STATS: net.compute_precise_bn_stats(model, train_loader) # Save a checkpoint if (cur_epoch + 1) % cfg.TRAIN.CHECKPOINT_PERIOD == 0: checkpoint_file = checkpoint.save_checkpoint( model, optimizer, cur_epoch) logger.info("Wrote checkpoint to: {}".format(checkpoint_file)) # Evaluate the model next_epoch = cur_epoch + 1 if next_epoch % cfg.TRAIN.EVAL_PERIOD == 0 or next_epoch == cfg.OPTIM.MAX_EPOCH: test_epoch(test_loader, model, test_meter, cur_epoch)
def test_model(): """Evaluates the model.""" # Setup logging logging.setup_logging() # Show the config logger.info("Config:\n{}".format(cfg)) # Fix the RNG seeds (see RNG comment in core/config.py for discussion) np.random.seed(cfg.RNG_SEED) torch.manual_seed(cfg.RNG_SEED) # Configure the CUDNN backend torch.backends.cudnn.benchmark = cfg.CUDNN.BENCHMARK # Build the model (before the loaders to speed up debugging) model = builders.build_model() logger.info("Model:\n{}".format(model)) logger.info(logging.dump_json_stats(net.complexity(model))) # Compute precise time if cfg.PREC_TIME.ENABLED: logger.info("Computing precise time...") loss_fun = builders.build_loss_fun() prec_time = net.compute_precise_time(model, loss_fun) logger.info(logging.dump_json_stats(prec_time)) net.reset_bn_stats(model) # Load model weights checkpoint.load_checkpoint(cfg.TEST.WEIGHTS, model) logger.info("Loaded model weights from: {}".format(cfg.TEST.WEIGHTS)) # Create data loaders test_loader = loader.construct_test_loader() # Create meters test_meter = meters.TestMeter(len(test_loader)) # Evaluate the model test_epoch(test_loader, model, test_meter, 0)
def eval(model_weights, loader, replace=None, cfg=None): cfg_student = STUDENT if cfg is None else cfg print("Start evaluation on {} with weights from {}...".format(cfg_student, model_weights)) meter = meters.TestMeter(len(loader)) if "EfficientNet" in cfg_student: model = effnet(cfg_student, pretrained=False).cuda() else: model = regnety(cfg_student, pretrained=False).cuda() cp.load_checkpoint(model_weights, model, replace=replace) model.eval() meter.reset() start_time = time.time() for cur_iter, (inputs, labels) in enumerate(loader): inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True) preds = model(inputs) top1_err, top5_err = meters.topk_errors(preds, labels, [1, 5]) top1_err, top5_err = top1_err.item(), top5_err.item() meter.update_stats(top1_err, top5_err, inputs.size(0)) if (cur_iter + 1) % 100 == 0: print("iter {}/{}".format(cur_iter + 1, len(loader))) print("Total evaluation time: {}s".format(round(time.time() - start_time))) print("**************************************") print("Top1 accuracy: {:.2f}".format(100 - meter.get_epoch_stats(0)["min_top1_err"])) print("**************************************")
def train_model(): """Trains the model.""" # Setup logging logging.setup_logging() # Show the config logger.info("Config:\n{}".format(cfg)) # Fix the RNG seeds (see RNG comment in core/config.py for discussion) np.random.seed(cfg.RNG_SEED) torch.manual_seed(cfg.RNG_SEED) # Configure the CUDNN backend torch.backends.cudnn.benchmark = cfg.CUDNN.BENCHMARK # Build the model (before the loaders to speed up debugging) model = builders.build_model() logger.info("Model:\n{}".format(model)) logger.info(logging.dump_json_stats(net.complexity(model))) # Define the loss function loss_fun = builders.build_loss_fun() # Construct the optimizer optimizer = optim.construct_optimizer(model) # Load checkpoint or initial weights start_epoch = 0 if cfg.TRAIN.AUTO_RESUME and checkpoint.has_checkpoint(): last_checkpoint = checkpoint.get_last_checkpoint() checkpoint_epoch = checkpoint.load_checkpoint(last_checkpoint, model, optimizer) logger.info("Loaded checkpoint from: {}".format(last_checkpoint)) start_epoch = checkpoint_epoch + 1 elif cfg.TRAIN.WEIGHTS: checkpoint.load_checkpoint(cfg.TRAIN.WEIGHTS, model) logger.info("Loaded initial weights from: {}".format( cfg.TRAIN.WEIGHTS)) # Compute precise time if start_epoch == 0 and cfg.PREC_TIME.ENABLED: logger.info("Computing precise time...") prec_time = net.compute_precise_time(model, loss_fun) logger.info(logging.dump_json_stats(prec_time)) net.reset_bn_stats(model) # Create data loaders train_loader = loader.construct_train_loader() test_loader = loader.construct_test_loader() # Create meters train_meter = meters.TrainMeter(len(train_loader)) test_meter = meters.TestMeter(len(test_loader)) # Perform the training loop logger.info("Start epoch: {}".format(start_epoch + 1)) for cur_epoch in range(start_epoch, cfg.OPTIM.MAX_EPOCH): # Train for one epoch train_epoch(train_loader, model, loss_fun, optimizer, train_meter, cur_epoch) # Compute precise BN stats if cfg.BN.USE_PRECISE_STATS: net.compute_precise_bn_stats(model, train_loader) # Save a checkpoint if checkpoint.is_checkpoint_epoch(cur_epoch): checkpoint_file = checkpoint.save_checkpoint( model, optimizer, cur_epoch) logger.info("Wrote checkpoint to: {}".format(checkpoint_file)) # Evaluate the model if is_eval_epoch(cur_epoch): test_epoch(test_loader, model, test_meter, cur_epoch)
def test_ftta_model(corruptions, levels): """Use feed back to fine-tune some part of the model. (with all kind of corruptions)""" all_results = [] for corruption_level in levels: lvl_results = [] for corruption_type in corruptions: cfg.TRAIN.CORRUPTION = corruption_type cfg.TRAIN.LEVEL = corruption_level cfg.TEST.CORRUPTION = corruption_type cfg.TEST.LEVEL = corruption_level # Setup training/testing environment setup_env() # Construct the model, loss_fun, and optimizer model = setup_model() loss_fun = builders.build_loss_fun().cuda() optimizer = optim.construct_optimizer(model) # Load checkpoint or initial weights start_epoch = 0 checkpoint.load_checkpoint(cfg.TRAIN.WEIGHTS, model, strict=cfg.TRAIN.LOAD_STRICT) logger.info("Loaded initial weights from: {}".format( cfg.TRAIN.WEIGHTS)) # Create data loaders and meters train_loader = loader.construct_train_loader() test_loader = loader.construct_test_loader() train_meter = meters.TrainMeter(len(train_loader)) test_meter = meters.TestMeter(len(test_loader)) # Compute model and loader timings if start_epoch == 0 and cfg.PREC_TIME.NUM_ITER > 0: benchmark.compute_time_full(model, loss_fun, train_loader, test_loader) # Perform the training loop logger.info("Start epoch: {}".format(start_epoch + 1)) for cur_epoch in range(start_epoch, cfg.OPTIM.MAX_EPOCH): if cfg.TRAIN.ADAPTATION != 'test_only': if cfg.TRAIN.ADAPTATION == 'update_bn': bn_update(model, train_loader) elif cfg.TRAIN.ADAPTATION == 'min_entropy': # Train for one epoch train_epoch(train_loader, model, loss_fun, optimizer, train_meter, cur_epoch) bn_update(model, train_loader) # Save a checkpoint if (cur_epoch + 1) % cfg.TRAIN.CHECKPOINT_PERIOD == 0: checkpoint_file = checkpoint.save_checkpoint( model, optimizer, cur_epoch) logger.info( "Wrote checkpoint to: {}".format(checkpoint_file)) # Evaluate the model next_epoch = cur_epoch + 1 if next_epoch % cfg.TRAIN.EVAL_PERIOD == 0 or next_epoch == cfg.OPTIM.MAX_EPOCH: top1 = test_epoch(test_loader, model, test_meter, cur_epoch) lvl_results.append(top1) all_results.append(lvl_results) for lvl_idx in range(len(all_results)): logger.info("corruption level: {}".format(levels[lvl_idx])) logger.info("corruption types: {}".format(corruptions)) logger.info(all_results[lvl_idx]) # show_parameters(model) return all_results
def train_model(): """Trains the model.""" # Setup training/testing environment setup_env() # Construct the model, loss_fun, and optimizer model = setup_model() loss_fun = builders.build_loss_fun().cuda() optimizer = optim.construct_optimizer(model) # Load checkpoint or initial weights start_epoch = 0 if cfg.TRAIN.AUTO_RESUME and checkpoint.has_checkpoint(): last_checkpoint = checkpoint.get_last_checkpoint() checkpoint_epoch = checkpoint.load_checkpoint(last_checkpoint, model, optimizer) logger.info("Loaded checkpoint from: {}".format(last_checkpoint)) start_epoch = checkpoint_epoch + 1 elif cfg.TRAIN.WEIGHTS: checkpoint.load_checkpoint(cfg.TRAIN.WEIGHTS, model) logger.info("Loaded initial weights from: {}".format( cfg.TRAIN.WEIGHTS)) # Create data loaders and meters if cfg.TEST.DATASET == 'imagenet_dataset' or cfg.TRAIN.DATASET == 'imagenet_dataset': dataset = loader.construct_train_loader() train_loader = dataset.train_loader test_loader = dataset.val_loader else: dataset = None train_loader = loader.construct_train_loader() test_loader = loader.construct_test_loader() train_meter = meters.TrainMeter(len(train_loader)) test_meter = meters.TestMeter(len(test_loader)) # Compute model and loader timings if start_epoch == 0 and cfg.PREC_TIME.NUM_ITER > 0: benchmark.compute_time_full(model, loss_fun, train_loader, test_loader) # Perform the training loop logger.info("Start epoch: {}".format(start_epoch + 1)) for cur_epoch in range(start_epoch, cfg.OPTIM.MAX_EPOCH): # Train for one epoch train_epoch(train_loader, model, loss_fun, optimizer, train_meter, cur_epoch) # Compute precise BN stats if cfg.BN.USE_PRECISE_STATS: net.compute_precise_bn_stats(model, train_loader) # Save a checkpoint if (cur_epoch + 1) % cfg.TRAIN.CHECKPOINT_PERIOD == 0: checkpoint_file = checkpoint.save_checkpoint( model, optimizer, cur_epoch) logger.info("Wrote checkpoint to: {}".format(checkpoint_file)) # Evaluate the model next_epoch = cur_epoch + 1 if next_epoch % cfg.TRAIN.EVAL_PERIOD == 0 or next_epoch == cfg.OPTIM.MAX_EPOCH: logger.info("Start testing") test_epoch(test_loader, model, test_meter, cur_epoch) if dataset is not None: logger.info("Reset the dataset") train_loader._dali_iterator.reset() test_loader._dali_iterator.reset() # clear memory if torch.cuda.is_available(): torch.cuda.synchronize() torch.cuda.empty_cache( ) # https://forums.fast.ai/t/clearing-gpu-memory-pytorch/14637 gc.collect()