def test_restore_checkpoint(): # Create Model model = model_builder.create(cfg.MODEL.TYPE, train=True) add_momentum_init_ops(model) init_weights(model) # Fill input blobs roidb = combined_roidb_for_training(cfg.TRAIN.DATASETS, cfg.TRAIN.PROPOSAL_FILES) model_builder.add_training_inputs(model, roidb=roidb) workspace.CreateNet(model.net) # Bookkeeping for checkpoint creation iter_num = 0 checkpoints = {} output_dir = get_output_dir(cfg.TRAIN.DATASETS, training=True) chk_file_path = os.path.join(output_dir, 'model_iter{}.pkl'.format(iter_num)) checkpoints[iter_num] = chk_file_path # Save model weights nu.save_model_to_weights_file(checkpoints[iter_num], model) orig_gpu_0_params, orig_all_params = get_params(model) # Change the model weights init_weights(model) # Reload the weights in the model nu.initialize_gpu_from_weights_file(model, chk_file_path, gpu_id=0) nu.broadcast_parameters(model) shutil.rmtree(cfg.OUTPUT_DIR) _, restored_all_params = get_params(model) # Check if all params are loaded correctly for scoped_name, blob in orig_all_params.items(): np.testing.assert_array_equal(blob, restored_all_params[scoped_name]) # Check if broadcast_parameters works for scoped_name, blob in restored_all_params.items(): unscoped_name = c2_utils.UnscopeName(scoped_name) np.testing.assert_array_equal(blob, orig_gpu_0_params[unscoped_name])
def test_restore_checkpoint(): # Create Model model = model_builder.create(cfg.MODEL.TYPE, train=True) add_momentum_init_ops(model) init_weights(model) # Fill input blobs roidb = combined_roidb_for_training( cfg.TRAIN.DATASETS, cfg.TRAIN.PROPOSAL_FILES ) model_builder.add_training_inputs(model, roidb=roidb) workspace.CreateNet(model.net) # Bookkeeping for checkpoint creation iter_num = 0 checkpoints = {} output_dir = get_output_dir(cfg.TRAIN.DATASETS, training=True) chk_file_path = os.path.join(output_dir, 'model_iter{}.pkl'.format(iter_num)) checkpoints[iter_num] = chk_file_path # Save model weights nu.save_model_to_weights_file(checkpoints[iter_num], model) orig_gpu_0_params, orig_all_params = get_params(model) # Change the model weights init_weights(model) # Reload the weights in the model nu.initialize_gpu_from_weights_file(model, chk_file_path, gpu_id=0) nu.broadcast_parameters(model) shutil.rmtree(cfg.OUTPUT_DIR) _, restored_all_params = get_params(model) # Check if all params are loaded correctly for scoped_name, blob in orig_all_params.items(): np.testing.assert_array_equal(blob, restored_all_params[scoped_name]) # Check if broadcast_parameters works for scoped_name, blob in restored_all_params.items(): unscoped_name = c2_utils.UnscopeName(scoped_name) np.testing.assert_array_equal(blob, orig_gpu_0_params[unscoped_name])
def add_model_training_inputs(model): """Load the training dataset and attach the training inputs to the model.""" logger = logging.getLogger(__name__) logger.info('Loading dataset: {}'.format(cfg.TRAIN.DATASETS)) roidb = combined_roidb_for_training(cfg.TRAIN.DATASETS, cfg.TRAIN.PROPOSAL_FILES) logger.info('{:d} roidb entries'.format(len(roidb))) model_builder.add_training_inputs(model, roidb=roidb)
def add_model_training_inputs(model): """Load the training dataset and attach the training inputs to the model.""" logger = logging.getLogger(__name__) logger.info('Loading dataset: {}'.format(cfg.TRAIN.DATASETS)) roidb = combined_roidb_for_training( cfg.TRAIN.DATASETS, cfg.TRAIN.PROPOSAL_FILES ) logger.info('{:d} roidb entries'.format(len(roidb))) model_builder.add_training_inputs(model, roidb=roidb)
def add_model_training_inputs(model): """Load the training dataset and attach the training inputs to the model.""" logger = logging.getLogger(__name__) logger.info('Loading dataset: {}'.format(cfg.TRAIN.DATASETS)) roidb = combined_roidb_for_training( cfg.TRAIN.DATASETS, cfg.TRAIN.PROPOSAL_FILES ) # print("Trian.py文件 197,roidb的长度为{}".format(len(roidb))) # 如果flip设置为true的话,则比原始数据数量多了 logger.info('{:d} roidb entries'.format(len(roidb))) model_builder.add_training_inputs(model, roidb=roidb)
def add_model_da_training_inputs(model): """Load the training dataset and attach the training inputs to the model.""" logger = logging.getLogger(__name__) logger.info('Loading source dataset: {}'.format(cfg.TRAIN.SOURCE_DATASETS)) source_roidb = combined_roidb_for_training( cfg.TRAIN.SOURCE_DATASETS, cfg.TRAIN.SOURCE_PROPOSAL_FILES, True ) logger.info('{:d} source roidb entries'.format(len(source_roidb))) logger.info('Loading target dataset: {}'.format(cfg.TRAIN.TARGET_DATASETS)) target_roidb = combined_roidb_for_training( cfg.TRAIN.TARGET_DATASETS, cfg.TRAIN.TARGET_PROPOSAL_FILES, False ) logger.info('{:d} target roidb entries'.format(len(target_roidb))) roidb = source_roidb+target_roidb model_builder.add_training_inputs(model, source_roidb=source_roidb, target_roidb=target_roidb)
def add_model_da_training_inputs(model): """Load the training dataset and attach the training inputs to the model.""" logger = logging.getLogger(__name__) logger.info('Loading source dataset: {}'.format(cfg.TRAIN.SOURCE_DATASETS)) source_roidb = combined_roidb_for_training(cfg.TRAIN.SOURCE_DATASETS, cfg.TRAIN.SOURCE_PROPOSAL_FILES, True) logger.info('{:d} source roidb entries'.format(len(source_roidb))) logger.info('Loading target dataset: {}'.format(cfg.TRAIN.TARGET_DATASETS)) target_roidb = combined_roidb_for_training(cfg.TRAIN.TARGET_DATASETS, cfg.TRAIN.TARGET_PROPOSAL_FILES, False) if cfg.TRAIN.PADA: # add indices for the target images for updating their class weights for i, rois in enumerate(target_roidb): rois['im_idx'] = i logger.info('{:d} target roidb entries'.format(len(target_roidb))) # roidb = source_roidb+target_roidb model_builder.add_training_inputs(model, source_roidb=source_roidb, target_roidb=target_roidb)