def train_and_eval(tag, dataroot, test_ratio=0.0, cv_fold=0, reporter=None, metric='last', save_path=None, only_eval=False, local_rank=-1, evaluation_interval=5): total_batch = C.get()["batch"] if local_rank >= 0: dist.init_process_group(backend='nccl', init_method='env://', world_size=int(os.environ['WORLD_SIZE'])) device = torch.device('cuda', local_rank) torch.cuda.set_device(device) C.get()['lr'] *= dist.get_world_size() logger.info( f'local batch={C.get()["batch"]} world_size={dist.get_world_size()} ----> total batch={C.get()["batch"] * dist.get_world_size()}' ) total_batch = C.get()["batch"] * dist.get_world_size() is_master = local_rank < 0 or dist.get_rank() == 0 if is_master: add_filehandler(logger, 'master' + '.log') if not reporter: reporter = lambda **kwargs: 0 max_epoch = C.get()['epoch'] trainsampler, trainloader, validloader, testloader_ = get_dataloaders( C.get()['dataset'], C.get()['batch'], dataroot, test_ratio, split_idx=cv_fold, multinode=(local_rank >= 0)) # create a model & an optimizer model = get_model(C.get()['model'], num_class(C.get()['dataset']), local_rank=local_rank) model_ema = get_model(C.get()['model'], num_class(C.get()['dataset']), local_rank=-1) model_ema.eval() criterion_ce = criterion = CrossEntropyLabelSmooth( num_class(C.get()['dataset']), C.get().conf.get('lb_smooth', 0)) if C.get().conf.get('mixup', 0.0) > 0.0: criterion = CrossEntropyMixUpLabelSmooth( num_class(C.get()['dataset']), C.get().conf.get('lb_smooth', 0)) if C.get()['optimizer']['type'] == 'sgd': optimizer = optim.SGD( model.parameters(), lr=C.get()['lr'], momentum=C.get()['optimizer'].get('momentum', 0.9), weight_decay=0.0, nesterov=C.get()['optimizer'].get('nesterov', True)) elif C.get()['optimizer']['type'] == 'rmsprop': optimizer = RMSpropTF(model.parameters(), lr=C.get()['lr'], weight_decay=0.0, alpha=0.9, momentum=0.9, eps=0.001) else: raise ValueError('invalid optimizer type=%s' % C.get()['optimizer']['type']) lr_scheduler_type = C.get()['lr_schedule'].get('type', 'cosine') if lr_scheduler_type == 'cosine': scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( optimizer, T_max=C.get()['epoch'], eta_min=0.) elif lr_scheduler_type == 'resnet': scheduler = adjust_learning_rate_resnet(optimizer) elif lr_scheduler_type == 'efficientnet': scheduler = torch.optim.lr_scheduler.LambdaLR( optimizer, lr_lambda=lambda x: 0.97**int( (x + C.get()['lr_schedule']['warmup']['epoch']) / 2.4)) else: raise ValueError('invalid lr_schduler=%s' % lr_scheduler_type) if C.get()['lr_schedule'].get( 'warmup', None) and C.get()['lr_schedule']['warmup']['epoch'] > 0: scheduler = GradualWarmupScheduler( optimizer, multiplier=C.get()['lr_schedule']['warmup']['multiplier'], total_epoch=C.get()['lr_schedule']['warmup']['epoch'], after_scheduler=scheduler) if not tag or not is_master: from FastAutoAugment.metrics import SummaryWriterDummy as SummaryWriter logger.warning('tag not provided, no tensorboard log.') else: from tensorboardX import SummaryWriter writers = [ SummaryWriter(log_dir='./logs/%s/%s' % (tag, x)) for x in ['train', 'valid', 'test'] ] if C.get()['optimizer']['ema'] > 0.0 and is_master: # https://discuss.pytorch.org/t/how-to-apply-exponential-moving-average-decay-for-variables/10856/4?u=ildoonet ema = EMA(C.get()['optimizer']['ema']) else: ema = None result = OrderedDict() epoch_start = 1 #TODO: change only eval=False when without save_path ?? if save_path != 'test.pth': # and is_master: --> should load all data(not able to be broadcasted) if save_path and not os.path.exists(save_path): import torch.utils.model_zoo as model_zoo data = model_zoo.load_url( 'https://download.pytorch.org/models/resnet50-19c8e357.pth', model_dir=os.path.join(os.getcwd(), 'FastAutoAugment/models')) if C.get()['dataset'] == 'cifar10': data.pop('fc.weight') data.pop('fc.bias') model_dict = model.state_dict() model_dict.update(data) model.load_state_dict(model_dict) torch.save(model_dict, save_path) logger.info('%s file found. loading...' % save_path) data = torch.load(save_path) key = 'model' if 'model' in data else 'state_dict' if 'epoch' not in data: model.load_state_dict(data) else: logger.info('checkpoint epoch@%d' % data['epoch']) if not isinstance(model, (DataParallel, DistributedDataParallel)): model.load_state_dict({ k.replace('module.', ''): v for k, v in data[key].items() }) else: model.load_state_dict({ k if 'module.' in k else 'module.' + k: v for k, v in data[key].items() }) logger.info('optimizer.load_state_dict+') optimizer.load_state_dict(data['optimizer']) if data['epoch'] < C.get()['epoch']: epoch_start = data['epoch'] else: only_eval = True if ema is not None: ema.shadow = data.get('ema', {}) if isinstance( data.get('ema', {}), dict) else data['ema'].state_dict() del data if local_rank >= 0: for name, x in model.state_dict().items(): dist.broadcast(x, 0) logger.info( f'multinode init. local_rank={dist.get_rank()} is_master={is_master}' ) torch.cuda.synchronize() tqdm_disabled = bool(os.environ.get( 'TASK_NAME', '')) and local_rank != 0 # KakaoBrain Environment if only_eval: logger.info('evaluation only+') model.eval() rs = dict() rs['train'] = run_epoch(model, trainloader, criterion, None, desc_default='train', epoch=0, writer=writers[0], is_master=is_master) with torch.no_grad(): rs['valid'] = run_epoch(model, validloader, criterion, None, desc_default='valid', epoch=0, writer=writers[1], is_master=is_master) rs['test'] = run_epoch(model, testloader_, criterion, None, desc_default='*test', epoch=0, writer=writers[2], is_master=is_master) if ema is not None and len(ema) > 0: model_ema.load_state_dict({ k.replace('module.', ''): v for k, v in ema.state_dict().items() }) rs['valid'] = run_epoch(model_ema, validloader, criterion_ce, None, desc_default='valid(EMA)', epoch=0, writer=writers[1], verbose=is_master, tqdm_disabled=tqdm_disabled) rs['test'] = run_epoch(model_ema, testloader_, criterion_ce, None, desc_default='*test(EMA)', epoch=0, writer=writers[2], verbose=is_master, tqdm_disabled=tqdm_disabled) for key, setname in itertools.product(['loss', 'top1', 'top5'], ['train', 'valid', 'test']): if setname not in rs: continue result['%s_%s' % (key, setname)] = rs[setname][key] result['epoch'] = 0 return result # train loop best_top1 = 0 for epoch in range(epoch_start, max_epoch + 1): if local_rank >= 0: trainsampler.set_epoch(epoch) model.train() rs = dict() rs['train'] = run_epoch(model, trainloader, criterion, optimizer, desc_default='train', epoch=epoch, writer=writers[0], verbose=(is_master and local_rank <= 0), scheduler=scheduler, ema=ema, wd=C.get()['optimizer']['decay'], tqdm_disabled=tqdm_disabled) model.eval() if math.isnan(rs['train']['loss']): raise Exception('train loss is NaN.') if ema is not None and C.get( )['optimizer']['ema_interval'] > 0 and epoch % C.get( )['optimizer']['ema_interval'] == 0: logger.info(f'ema synced+ rank={dist.get_rank()}') if ema is not None: model.load_state_dict(ema.state_dict()) for name, x in model.state_dict().items(): # print(name) dist.broadcast(x, 0) torch.cuda.synchronize() logger.info(f'ema synced- rank={dist.get_rank()}') if is_master and (epoch % evaluation_interval == 0 or epoch == max_epoch): with torch.no_grad(): rs['valid'] = run_epoch(model, validloader, criterion_ce, None, desc_default='valid', epoch=epoch, writer=writers[1], verbose=is_master, tqdm_disabled=tqdm_disabled) rs['test'] = run_epoch(model, testloader_, criterion_ce, None, desc_default='*test', epoch=epoch, writer=writers[2], verbose=is_master, tqdm_disabled=tqdm_disabled) if ema is not None: model_ema.load_state_dict({ k.replace('module.', ''): v for k, v in ema.state_dict().items() }) rs['valid'] = run_epoch(model_ema, validloader, criterion_ce, None, desc_default='valid(EMA)', epoch=epoch, writer=writers[1], verbose=is_master, tqdm_disabled=tqdm_disabled) rs['test'] = run_epoch(model_ema, testloader_, criterion_ce, None, desc_default='*test(EMA)', epoch=epoch, writer=writers[2], verbose=is_master, tqdm_disabled=tqdm_disabled) logger.info( f'epoch={epoch} ' f'[train] loss={rs["train"]["loss"]:.4f} top1={rs["train"]["top1"]:.4f} ' f'[valid] loss={rs["valid"]["loss"]:.4f} top1={rs["valid"]["top1"]:.4f} ' f'[test] loss={rs["test"]["loss"]:.4f} top1={rs["test"]["top1"]:.4f} ' ) if metric == 'last' or rs[metric]['top1'] > best_top1: if metric != 'last': best_top1 = rs[metric]['top1'] for key, setname in itertools.product( ['loss', 'top1', 'top5'], ['train', 'valid', 'test']): result['%s_%s' % (key, setname)] = rs[setname][key] result['epoch'] = epoch writers[1].add_scalar('valid_top1/best', rs['valid']['top1'], epoch) writers[2].add_scalar('test_top1/best', rs['test']['top1'], epoch) reporter(loss_valid=rs['valid']['loss'], top1_valid=rs['valid']['top1'], loss_test=rs['test']['loss'], top1_test=rs['test']['top1']) # save checkpoint if is_master and save_path: logger.info('save model@%d to %s, err=%.4f' % (epoch, save_path, 1 - best_top1)) torch.save( { 'epoch': epoch, 'log': { 'train': rs['train'].get_dict(), 'valid': rs['valid'].get_dict(), 'test': rs['test'].get_dict(), }, 'optimizer': optimizer.state_dict(), 'model': model.state_dict(), 'ema': ema.state_dict() if ema is not None else None, }, save_path) del model result['top1_test'] = best_top1 return result
def train_and_eval( args_save, tag, dataroot, test_ratio=0.0, cv_fold=0, reporter=None, metric="last", save_path=None, only_eval=False, local_rank=-1, evaluation_interval=5, get_dataloaders=None, num_class=None, get_model=None, ): assert get_dataloaders is not None assert num_class is not None assert get_model is not None total_batch = C.get()["batch"] if local_rank >= 0: dist.init_process_group( backend="nccl", init_method="env://", world_size=int(os.environ["WORLD_SIZE"]), ) device = torch.device("cuda", local_rank) torch.cuda.set_device(device) C.get()["lr"] *= dist.get_world_size() logger.info( f'local batch={C.get()["batch"]} world_size={dist.get_world_size()} ----> total batch={C.get()["batch"] * dist.get_world_size()}' ) total_batch = C.get()["batch"] * dist.get_world_size() is_master = local_rank < 0 or dist.get_rank() == 0 if is_master: add_filehandler(logger, args_save + ".log") if not reporter: reporter = lambda **kwargs: 0 max_epoch = C.get()["epoch"] trainsampler, trainloader, validloader, testloader_ = get_dataloaders( C.get()["dataset"], C.get()["batch"], dataroot, test_ratio, split_idx=cv_fold, multinode=(local_rank >= 0), ) # create a model & an optimizer model = get_model( C.get()["model"], num_class(C.get()["dataset"]), local_rank=local_rank ) model_ema = get_model( C.get()["model"], num_class(C.get()["dataset"]), local_rank=-1 ) model_ema.eval() criterion_ce = criterion = CrossEntropyLabelSmooth( num_class(C.get()["dataset"]), C.get().conf.get("lb_smooth", 0) ) if C.get().conf.get("mixup", 0.0) > 0.0: criterion = CrossEntropyMixUpLabelSmooth( num_class(C.get()["dataset"]), C.get().conf.get("lb_smooth", 0) ) if C.get()["optimizer"]["type"] == "sgd": optimizer = optim.SGD( model.parameters(), lr=C.get()["lr"], momentum=C.get()["optimizer"].get("momentum", 0.9), weight_decay=0.0, nesterov=C.get()["optimizer"].get("nesterov", True), ) elif C.get()["optimizer"]["type"] == "rmsprop": optimizer = RMSpropTF( model.parameters(), lr=C.get()["lr"], weight_decay=0.0, alpha=0.9, momentum=0.9, eps=0.001, ) else: raise ValueError("invalid optimizer type=%s" % C.get()["optimizer"]["type"]) lr_scheduler_type = C.get()["lr_schedule"].get("type", "cosine") if lr_scheduler_type == "cosine": scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( optimizer, T_max=C.get()["epoch"], eta_min=0.0 ) elif lr_scheduler_type == "resnet": scheduler = adjust_learning_rate_resnet(optimizer) elif lr_scheduler_type == "efficientnet": scheduler = torch.optim.lr_scheduler.LambdaLR( optimizer, lr_lambda=lambda x: 0.97 ** int((x + C.get()["lr_schedule"]["warmup"]["epoch"]) / 2.4), ) else: raise ValueError("invalid lr_schduler=%s" % lr_scheduler_type) if ( C.get()["lr_schedule"].get("warmup", None) and C.get()["lr_schedule"]["warmup"]["epoch"] > 0 ): scheduler = GradualWarmupScheduler( optimizer, multiplier=C.get()["lr_schedule"]["warmup"]["multiplier"], total_epoch=C.get()["lr_schedule"]["warmup"]["epoch"], after_scheduler=scheduler, ) if not tag or not is_master: from FastAutoAugment.metrics import SummaryWriterDummy as SummaryWriter logger.warning("tag not provided, no tensorboard log.") else: from tensorboardX import SummaryWriter writers = [ SummaryWriter(log_dir="./logs/%s/%s" % (tag, x)) for x in ["train", "valid", "test"] ] if C.get()["optimizer"]["ema"] > 0.0 and is_master: # https://discuss.pytorch.org/t/how-to-apply-exponential-moving-average-decay-for-variables/10856/4?u=ildoonet ema = EMA(C.get()["optimizer"]["ema"]) else: ema = None result = OrderedDict() epoch_start = 1 if ( save_path != "test.pth" ): # and is_master: --> should load all data(not able to be broadcasted) if save_path and os.path.exists(save_path): logger.info("%s file found. loading..." % save_path) data = torch.load(save_path) key = "model" if "model" in data else "state_dict" if "epoch" not in data: model.load_state_dict(data) else: logger.info("checkpoint epoch@%d" % data["epoch"]) if not isinstance(model, (DataParallel, DistributedDataParallel)): model.load_state_dict( {k.replace("module.", ""): v for k, v in data[key].items()} ) else: model.load_state_dict( { k if "module." in k else "module." + k: v for k, v in data[key].items() } ) logger.info("optimizer.load_state_dict+") optimizer.load_state_dict(data["optimizer"]) if data["epoch"] < C.get()["epoch"]: epoch_start = data["epoch"] else: only_eval = True if ema is not None: ema.shadow = ( data.get("ema", {}) if isinstance(data.get("ema", {}), dict) else data["ema"].state_dict() ) del data else: logger.info('"%s" file not found. skip to pretrain weights...' % save_path) if only_eval: logger.warning( "model checkpoint not found. only-evaluation mode is off." ) only_eval = False if local_rank >= 0: for name, x in model.state_dict().items(): dist.broadcast(x, 0) logger.info( f"multinode init. local_rank={dist.get_rank()} is_master={is_master}" ) torch.cuda.synchronize() tqdm_disabled = ( bool(os.environ.get("TASK_NAME", "")) and local_rank != 0 ) # KakaoBrain Environment if only_eval: logger.info("evaluation only+") model.eval() rs = dict() rs["train"] = run_epoch( model, trainloader, criterion, None, desc_default="train", epoch=0, writer=writers[0], is_master=is_master, ) with torch.no_grad(): rs["valid"] = run_epoch( model, validloader, criterion, None, desc_default="valid", epoch=0, writer=writers[1], is_master=is_master, ) rs["test"] = run_epoch( model, testloader_, criterion, None, desc_default="*test", epoch=0, writer=writers[2], is_master=is_master, ) if ema is not None and len(ema) > 0: model_ema.load_state_dict( {k.replace("module.", ""): v for k, v in ema.state_dict().items()} ) rs["valid"] = run_epoch( model_ema, validloader, criterion_ce, None, desc_default="valid(EMA)", epoch=0, writer=writers[1], verbose=is_master, tqdm_disabled=tqdm_disabled, ) rs["test"] = run_epoch( model_ema, testloader_, criterion_ce, None, desc_default="*test(EMA)", epoch=0, writer=writers[2], verbose=is_master, tqdm_disabled=tqdm_disabled, ) for key, setname in itertools.product( ["loss", "top1", "top5"], ["train", "valid", "test"] ): if setname not in rs: continue result["%s_%s" % (key, setname)] = rs[setname][key] result["epoch"] = 0 return result # train loop best_top1 = 0 for epoch in range(epoch_start, max_epoch + 1): if local_rank >= 0: trainsampler.set_epoch(epoch) model.train() rs = dict() rs["train"] = run_epoch( model, trainloader, criterion, optimizer, desc_default="train", epoch=epoch, writer=writers[0], verbose=(is_master and local_rank <= 0), scheduler=scheduler, ema=ema, wd=C.get()["optimizer"]["decay"], tqdm_disabled=tqdm_disabled, ) model.eval() if math.isnan(rs["train"]["loss"]): raise Exception("train loss is NaN.") if ( ema is not None and C.get()["optimizer"]["ema_interval"] > 0 and epoch % C.get()["optimizer"]["ema_interval"] == 0 ): logger.info(f"ema synced+ rank={dist.get_rank()}") if ema is not None: model.load_state_dict(ema.state_dict()) for name, x in model.state_dict().items(): # print(name) dist.broadcast(x, 0) torch.cuda.synchronize() logger.info(f"ema synced- rank={dist.get_rank()}") if is_master and (epoch % evaluation_interval == 0 or epoch == max_epoch): with torch.no_grad(): rs["valid"] = run_epoch( model, validloader, criterion_ce, None, desc_default="valid", epoch=epoch, writer=writers[1], verbose=is_master, tqdm_disabled=tqdm_disabled, ) rs["test"] = run_epoch( model, testloader_, criterion_ce, None, desc_default="*test", epoch=epoch, writer=writers[2], verbose=is_master, tqdm_disabled=tqdm_disabled, ) if ema is not None: model_ema.load_state_dict( { k.replace("module.", ""): v for k, v in ema.state_dict().items() } ) rs["valid"] = run_epoch( model_ema, validloader, criterion_ce, None, desc_default="valid(EMA)", epoch=epoch, writer=writers[1], verbose=is_master, tqdm_disabled=tqdm_disabled, ) rs["test"] = run_epoch( model_ema, testloader_, criterion_ce, None, desc_default="*test(EMA)", epoch=epoch, writer=writers[2], verbose=is_master, tqdm_disabled=tqdm_disabled, ) logger.info( f"epoch={epoch} " f'[train] loss={rs["train"]["loss"]:.4f} top1={rs["train"]["top1"]:.4f} ' f'[valid] loss={rs["valid"]["loss"]:.4f} top1={rs["valid"]["top1"]:.4f} ' f'[test] loss={rs["test"]["loss"]:.4f} top1={rs["test"]["top1"]:.4f} ' ) if metric == "last" or rs[metric]["top1"] > best_top1: if metric != "last": best_top1 = rs[metric]["top1"] for key, setname in itertools.product( ["loss", "top1", "top5"], ["train", "valid", "test"] ): result["%s_%s" % (key, setname)] = rs[setname][key] result["epoch"] = epoch writers[1].add_scalar("valid_top1/best", rs["valid"]["top1"], epoch) writers[2].add_scalar("test_top1/best", rs["test"]["top1"], epoch) reporter( loss_valid=rs["valid"]["loss"], top1_valid=rs["valid"]["top1"], loss_test=rs["test"]["loss"], top1_test=rs["test"]["top1"], ) # save checkpoint if is_master and save_path: logger.info( "save model@%d to %s, err=%.4f" % (epoch, save_path, 1 - best_top1) ) torch.save( { "epoch": epoch, "log": { "train": rs["train"].get_dict(), "valid": rs["valid"].get_dict(), "test": rs["test"].get_dict(), }, "optimizer": optimizer.state_dict(), "model": model.state_dict(), "ema": ema.state_dict() if ema is not None else None, }, save_path, ) del model result["top1_test"] = best_top1 return result