def main_worker(gpu_idx, configs): configs.gpu_idx = gpu_idx if configs.gpu_idx is not None: print("Use GPU: {} for training".format(configs.gpu_idx)) configs.device = torch.device('cuda:{}'.format(configs.gpu_idx)) if configs.distributed: if configs.dist_url == "env://" and configs.rank == -1: configs.rank = int(os.environ["RANK"]) if configs.multiprocessing_distributed: configs.rank = configs.rank * configs.ngpus_per_node + gpu_idx dist.init_process_group(backend=configs.dist_backend, init_method=configs.dist_url, world_size=configs.world_size, rank=configs.rank) configs.is_master_node = (not configs.distributed) or ( configs.distributed and (configs.rank % configs.ngpus_per_node == 0)) # model model = create_model(configs) model = make_data_parallel(model, configs) if configs.is_master_node: num_parameters = get_num_parameters(model) print('number of trained parameters of the model: {}'.format( num_parameters)) if configs.pretrained_path is not None: model = load_pretrained_model(model, configs.pretrained_path, gpu_idx, configs.overwrite_global_2_local) # Load dataset test_loader = create_test_dataloader(configs) test(test_loader, model, configs)
def main_worker(gpu_idx, configs): configs.gpu_idx = gpu_idx if configs.gpu_idx is not None: print("Use GPU: {} for training".format(configs.gpu_idx)) configs.device = torch.device('cuda:{}'.format(configs.gpu_idx)) if configs.distributed: if configs.dist_url == "env://" and configs.rank == -1: configs.rank = int(os.environ["RANK"]) if configs.multiprocessing_distributed: # For multiprocessing distributed training, rank needs to be the # global rank among all the processes configs.rank = configs.rank * configs.ngpus_per_node + gpu_idx dist.init_process_group(backend=configs.dist_backend, init_method=configs.dist_url, world_size=configs.world_size, rank=configs.rank) configs.is_master_node = (not configs.distributed) or ( configs.distributed and (configs.rank % configs.ngpus_per_node == 0)) if configs.is_master_node: logger = Logger(configs.logs_dir, configs.saved_fn) logger.info('>>> Created a new logger') logger.info('>>> configs: {}'.format(configs)) tb_writer = SummaryWriter( log_dir=os.path.join(configs.logs_dir, 'tensorboard')) else: logger = None tb_writer = None # model model = create_model(configs) # Data Parallel model = make_data_parallel(model, configs) # Freeze model model = freeze_model(model, configs.freeze_modules_list) if configs.is_master_node: num_parameters = get_num_parameters(model) logger.info('number of trained parameters of the model: {}'.format( num_parameters)) optimizer = create_optimizer(configs, model) lr_scheduler = create_lr_scheduler(optimizer, configs) best_val_loss = np.inf earlystop_count = 0 is_best = False # optionally load weight from a checkpoint if configs.pretrained_path is not None: model = load_pretrained_model(model, configs.pretrained_path, gpu_idx, configs.overwrite_global_2_local) if logger is not None: logger.info('loaded pretrained model at {}'.format( configs.pretrained_path)) # optionally resume from a checkpoint if configs.resume_path is not None: checkpoint = resume_model(configs.resume_path, configs.arch, configs.gpu_idx) if hasattr(model, 'module'): model.module.load_state_dict(checkpoint['state_dict']) else: model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) lr_scheduler.load_state_dict(checkpoint['lr_scheduler']) best_val_loss = checkpoint['best_val_loss'] earlystop_count = checkpoint['earlystop_count'] configs.start_epoch = checkpoint['epoch'] + 1 if logger is not None: logger.info(">>> Loading dataset & getting dataloader...") # Create dataloader train_loader, val_loader, train_sampler = create_train_val_dataloader( configs) test_loader = create_test_dataloader(configs) if logger is not None: logger.info('number of batches in train set: {}'.format( len(train_loader))) if val_loader is not None: logger.info('number of batches in val set: {}'.format( len(val_loader))) logger.info('number of batches in test set: {}'.format( len(test_loader))) if configs.evaluate: assert val_loader is not None, "The validation should not be None" val_loss = evaluate_one_epoch(val_loader, model, configs.start_epoch - 1, configs, logger) print('Evaluate, val_loss: {}'.format(val_loss)) return for epoch in range(configs.start_epoch, configs.num_epochs + 1): # Get the current learning rate for param_group in optimizer.param_groups: lr = param_group['lr'] if logger is not None: logger.info('{}'.format('*-' * 40)) logger.info('{} {}/{} {}'.format('=' * 35, epoch, configs.num_epochs, '=' * 35)) logger.info('{}'.format('*-' * 40)) logger.info('>>> Epoch: [{}/{}] learning rate: {:.2e}'.format( epoch, configs.num_epochs, lr)) if configs.distributed: train_sampler.set_epoch(epoch) # train for one epoch train_loss = train_one_epoch(train_loader, model, optimizer, epoch, configs, logger) loss_dict = {'train': train_loss} if not configs.no_val: val_loss = evaluate_one_epoch(val_loader, model, epoch, configs, logger) is_best = val_loss <= best_val_loss best_val_loss = min(val_loss, best_val_loss) loss_dict['val'] = val_loss if not configs.no_test: test_loss = evaluate_one_epoch(test_loader, model, epoch, configs, logger) loss_dict['test'] = test_loss # Write tensorboard if tb_writer is not None: tb_writer.add_scalars('Loss', loss_dict, epoch) # Save checkpoint if configs.is_master_node and (is_best or ( (epoch % configs.checkpoint_freq) == 0)): saved_state = get_saved_state(model, optimizer, lr_scheduler, epoch, configs, best_val_loss, earlystop_count) save_checkpoint(configs.checkpoints_dir, configs.saved_fn, saved_state, is_best, epoch) # Check early stop training if configs.earlystop_patience is not None: earlystop_count = 0 if is_best else (earlystop_count + 1) print_string = ' |||\t earlystop_count: {}'.format(earlystop_count) if configs.earlystop_patience <= earlystop_count: print_string += '\n\t--- Early stopping!!!' break else: print_string += '\n\t--- Continue training..., earlystop_count: {}'.format( earlystop_count) if logger is not None: logger.info(print_string) # Adjust learning rate if configs.lr_type == 'plateau': assert (not configs.no_val ), "Only use plateau when having validation set" lr_scheduler.step(val_loss) else: lr_scheduler.step() if tb_writer is not None: tb_writer.close() if configs.distributed: cleanup()
def main_worker(gpu_idx, configs): configs.gpu_idx = gpu_idx if configs.gpu_idx is not None: print("Use GPU: {} for training".format(configs.gpu_idx)) configs.device = torch.device('cuda:{}'.format(configs.gpu_idx)) if configs.distributed: if configs.dist_url == "env://" and configs.rank == -1: configs.rank = int(os.environ["RANK"]) if configs.multiprocessing_distributed: # For multiprocessing distributed training, rank needs to be the # global rank among all the processes configs.rank = configs.rank * configs.ngpus_per_node + gpu_idx dist.init_process_group(backend=configs.dist_backend, init_method=configs.dist_url, world_size=configs.world_size, rank=configs.rank) configs.is_master_node = (not configs.distributed) or ( configs.distributed and (configs.rank % configs.ngpus_per_node == 0)) if configs.is_master_node: logger = Logger(configs.logs_dir, configs.saved_fn) logger.info('>>> Created a new logger') logger.info('>>> configs: {}'.format(configs)) tb_writer = SummaryWriter( log_dir=os.path.join(configs.logs_dir, 'tensorboard')) else: logger = None tb_writer = None # model model = create_model(configs) # load weight from a checkpoint if configs.pretrained_path is not None: assert os.path.isfile( configs.pretrained_path), "=> no checkpoint found at '{}'".format( configs.pretrained_path) model.load_weights(weightfile=configs.pretrained_path) if logger is not None: logger.info('loaded pretrained model at {}'.format( configs.pretrained_path)) # resume weights of model from a checkpoint if configs.resume_path is not None: assert os.path.isfile( configs.resume_path), "=> no checkpoint found at '{}'".format( configs.resume_path) model.load_weights(weightfile=configs.resume_path) if logger is not None: logger.info('resume training model from checkpoint {}'.format( configs.pretrained_path)) # Data Parallel model = make_data_parallel(model, configs) # Make sure to create optimizer after moving the model to cuda optimizer = create_optimizer(configs, model) lr_scheduler = create_lr_scheduler(optimizer, configs) # resume optimizer, lr_scheduler from a checkpoint if configs.resume_path is not None: utils_path = configs.resume_path.replace('Model_', 'Utils_') assert os.path.isfile( utils_path), "=> no checkpoint found at '{}'".format(utils_path) utils_state_dict = torch.load(utils_path, map_location='cuda:{}'.format( configs.gpu_idx)) optimizer.load_state_dict(utils_state_dict['optimizer']) lr_scheduler.load_state_dict(utils_state_dict['lr_scheduler']) configs.start_epoch = utils_state_dict['epoch'] + 1 if configs.is_master_node: num_parameters = get_num_parameters(model) logger.info('number of trained parameters of the model: {}'.format( num_parameters)) if logger is not None: logger.info(">>> Loading dataset & getting dataloader...") # Create dataloader train_loader, val_loader, train_sampler = create_train_val_dataloader( configs) if logger is not None: logger.info('number of batches in train set: {}'.format( len(train_loader))) if val_loader is not None: logger.info('number of batches in val set: {}'.format( len(val_loader))) if configs.evaluate: assert val_loader is not None, "The validation should not be None" eval_metrics = evaluate_one_epoch(val_loader, model, configs.start_epoch - 1, configs, logger) precision, recall, AP, f1, ap_class = eval_metrics print( 'Evaluate - precision: {}, recall: {}, AP: {}, f1: {}, ap_class: {}' .format(precision, recall, AP, f1, ap_class)) return for epoch in range(configs.start_epoch, configs.num_epochs + 1): if logger is not None: logger.info('{}'.format('*-' * 40)) logger.info('{} {}/{} {}'.format('=' * 35, epoch, configs.num_epochs, '=' * 35)) logger.info('{}'.format('*-' * 40)) logger.info('>>> Epoch: [{}/{}]'.format(epoch, configs.num_epochs)) if configs.distributed: train_sampler.set_epoch(epoch) # train for one epoch train_one_epoch(train_loader, model, optimizer, lr_scheduler, epoch, configs, logger, tb_writer) if not configs.no_val: precision, recall, AP, f1, ap_class = evaluate_one_epoch( val_loader, model, epoch, configs, logger) val_metrics_dict = { 'precision': precision, 'recall': recall, 'AP': AP, 'f1': f1, 'ap_class': ap_class } if tb_writer is not None: tb_writer.add_scalars('Validation', val_metrics_dict, epoch) # Save checkpoint if configs.is_master_node and ((epoch % configs.checkpoint_freq) == 0): model_state_dict, utils_state_dict = get_saved_state( model, optimizer, lr_scheduler, epoch, configs) save_checkpoint(configs.checkpoints_dir, configs.saved_fn, model_state_dict, utils_state_dict, epoch) if tb_writer is not None: tb_writer.close() if configs.distributed: cleanup()
def main_worker(gpu_idx, configs): configs.gpu_idx = gpu_idx configs.device = torch.device('cpu' if configs.gpu_idx is None else 'cuda:{}'.format(configs.gpu_idx)) if configs.distributed: if configs.dist_url == "env://" and configs.rank == -1: configs.rank = int(os.environ["RANK"]) if configs.multiprocessing_distributed: # For multiprocessing distributed training, rank needs to be the # global rank among all the processes configs.rank = configs.rank * configs.ngpus_per_node + gpu_idx dist.init_process_group(backend=configs.dist_backend, init_method=configs.dist_url, world_size=configs.world_size, rank=configs.rank) configs.subdivisions = int(64 / configs.batch_size / configs.ngpus_per_node) else: configs.subdivisions = int(64 / configs.batch_size) configs.is_master_node = (not configs.distributed) or ( configs.distributed and (configs.rank % configs.ngpus_per_node == 0)) if configs.is_master_node: logger = Logger(configs.logs_dir, configs.saved_fn) logger.info('>>> Created a new logger') logger.info('>>> configs: {}'.format(configs)) tb_writer = SummaryWriter(log_dir=os.path.join(configs.logs_dir, 'tensorboard')) else: logger = None tb_writer = None # model model = create_model(configs) # load weight from a checkpoint if configs.pretrained_path is not None: assert os.path.isfile(configs.pretrained_path), "=> no checkpoint found at '{}'".format(configs.pretrained_path) model.load_state_dict(torch.load(configs.pretrained_path, map_location='cpu')) if logger is not None: logger.info('loaded pretrained model at {}'.format(configs.pretrained_path)) # resume weights of model from a checkpoint if configs.resume_path is not None: assert os.path.isfile(configs.resume_path), "=> no checkpoint found at '{}'".format(configs.resume_path) model.load_state_dict(torch.load(configs.resume_path, map_location='cpu')) if logger is not None: logger.info('resume training model from checkpoint {}'.format(configs.resume_path)) # Data Parallel model = make_data_parallel(model, configs) # Make sure to create optimizer after moving the model to cuda optimizer = create_optimizer(configs, model) lr_scheduler = create_lr_scheduler(optimizer, configs) configs.step_lr_in_epoch = False if configs.lr_type in ['multi_step', 'cosin', 'one_cycle'] else True # resume optimizer, lr_scheduler from a checkpoint if configs.resume_path is not None: utils_path = configs.resume_path.replace('Model_', 'Utils_') assert os.path.isfile(utils_path), "=> no checkpoint found at '{}'".format(utils_path) utils_state_dict = torch.load(utils_path, map_location='cuda:{}'.format(configs.gpu_idx)) optimizer.load_state_dict(utils_state_dict['optimizer']) lr_scheduler.load_state_dict(utils_state_dict['lr_scheduler']) configs.start_epoch = utils_state_dict['epoch'] + 1 if configs.is_master_node: num_parameters = get_num_parameters(model) logger.info('number of trained parameters of the model: {}'.format(num_parameters)) if logger is not None: logger.info(">>> Loading dataset & getting dataloader...") # Create dataloader train_dataloader, train_sampler = create_train_dataloader(configs) if logger is not None: logger.info('number of batches in training set: {}'.format(len(train_dataloader))) if configs.evaluate: val_dataloader = create_val_dataloader(configs) val_loss = validate(val_dataloader, model, configs) print('val_loss: {:.4e}'.format(val_loss)) return for epoch in range(configs.start_epoch, configs.num_epochs + 1): if logger is not None: logger.info('{}'.format('*-' * 40)) logger.info('{} {}/{} {}'.format('=' * 35, epoch, configs.num_epochs, '=' * 35)) logger.info('{}'.format('*-' * 40)) logger.info('>>> Epoch: [{}/{}]'.format(epoch, configs.num_epochs)) if configs.distributed: train_sampler.set_epoch(epoch) # train for one epoch train_one_epoch(train_dataloader, model, optimizer, lr_scheduler, epoch, configs, logger, tb_writer) if (not configs.no_val) and (epoch % configs.checkpoint_freq == 0): val_dataloader = create_val_dataloader(configs) print('number of batches in val_dataloader: {}'.format(len(val_dataloader))) val_loss = validate(val_dataloader, model, configs) print('val_loss: {:.4e}'.format(val_loss)) if tb_writer is not None: tb_writer.add_scalar('Val_loss', val_loss, epoch) # Save checkpoint if configs.is_master_node and ((epoch % configs.checkpoint_freq) == 0): model_state_dict, utils_state_dict = get_saved_state(model, optimizer, lr_scheduler, epoch, configs) save_checkpoint(configs.checkpoints_dir, configs.saved_fn, model_state_dict, utils_state_dict, epoch) if not configs.step_lr_in_epoch: lr_scheduler.step() if tb_writer is not None: tb_writer.add_scalar('LR', lr_scheduler.get_lr()[0], epoch) if tb_writer is not None: tb_writer.close() if configs.distributed: cleanup()