def test_export_load_dict_save_config(): c1 = Collection( "default", include_regex=["conv2d"], reduction_config=ReductionConfig(), save_config=SaveConfig({ ModeKeys.TRAIN: SaveConfigMode(save_interval=10), ModeKeys.EVAL: SaveConfigMode(start_step=1), }), ) c2 = Collection.from_json(c1.to_json()) assert c1 == c2 assert c1.to_json_dict() == c2.to_json_dict()
) saved_scalars = simple_pt_model(hook, register_loss=register_loss, with_timestamp=with_timestamp) hook.close() verify_files(trial_dir, save_config, saved_scalars) if with_timestamp: check_tf_events(trial_dir, saved_scalars) @pytest.mark.parametrize("collection", [("all", ".*"), ("scalars", "^scalar")]) @pytest.mark.parametrize( "save_config", [ SaveConfig(save_steps=[0, 2, 4, 6, 8]), SaveConfig({ ModeKeys.TRAIN: SaveConfigMode(save_interval=2), ModeKeys.GLOBAL: SaveConfigMode(save_interval=3), ModeKeys.EVAL: SaveConfigMode(save_interval=1), }), ], ) @pytest.mark.parametrize("register_loss", [True, False]) @pytest.mark.parametrize("with_timestamp", [True, False]) def test_pytorch_save_scalar(collection, save_config, register_loss, with_timestamp): helper_pytorch_tests(collection, register_loss, save_config, with_timestamp) delete_local_trials([SMDEBUG_PT_HOOK_TESTS_DIR])
def __init__( self, collection_manager: CollectionManager, default_include_collections: List[str], profiler_config_parser: ProfilerConfigParser, init_step: int = 0, out_dir: Optional[str] = None, export_tensorboard: bool = False, tensorboard_dir: Optional[str] = None, dry_run: bool = False, reduction_config: Optional[ReductionConfig] = None, save_config: Optional[Union[SaveConfig, Dict[ModeKeys, SaveConfigMode]]] = None, include_regex: Optional[List[str]] = None, include_collections: Optional[List[str]] = None, save_all: bool = False, include_workers: str = "one", ): """ A class used to represent the hook which gets attached to the training process. This takes the form appropriate for the framework such as tf.train.SessionRunHook for TF, Callback for keras... ... Attributes ---------- out_dir : str represents a path into which outputs will be written to. The hook raises error if the 'out_dir' already exists. The implementation does not support merging the tensors generated in current job with tensors from previous job. Hence, ensure that the 'out_dir' does not exist. dry_run : bool when dry run is set, behavior is only described in the log file. tensors are not actually saved. save_config: SaveConfig object Takes save config object which is applied as default for all included tensors. A collection can optionally have its own saveconfig object which overrides this for its tensors. reduction_config: ReductionConfig object if passed, this reduction config object is used as default for all tensors included. A collection has its own saveconfig object which overrides this for its tensors. if this is not passed, tensor is saved in full. include_regex: list of str takes as input the list of string representing regular expressions. Tensors whose names match these regular expressions will be saved. These tensors will be available as part of the `default` collection. include_collections: list of str representing collection names takes as input the collections which should be saved. if this is empty, it defaults to including all collections from code save_all: bool a shortcut for saving all tensors in the model. they are all saved in the collection `all` include_workers: str makes the hook save data from all workers profiler_config_parser: ProfilerConfigParser object if passed, use this profiler configuration. by default, set up a new profiler configuration here. """ error_handling_agent.set_hook( self) # This should be the first line in the constructor. self.out_dir = verify_and_get_out_dir(out_dir) self.tensorboard_dir = get_tensorboard_dir( export_tensorboard=export_tensorboard, tensorboard_dir=tensorboard_dir, out_dir=self.out_dir, ) self.dry_run = dry_run self.worker = None # when smdebug is used during an unsupported dist training process # we write data only from the process that has self.first_process set to True. self.first_process = None self.save_all_workers = True if include_workers == "all" else False self.chief_worker = DEFAULT_WORKER_NAME if include_collections is None: include_collections = default_include_collections else: include_collections = flatten(include_collections) self.include_collections = list( set(include_collections).union(set(default_include_collections))) self.save_all = save_all self.save_config = SaveConfig.parse(save_config) if reduction_config is None: reduction_config = ReductionConfig(save_raw_tensor=True) self.reduction_config = reduction_config self.include_regex = include_regex self.collection_manager = collection_manager self.init_step = init_step # The written_tensor_name_for_step dictionary stores # the names of each tensor saved for every step. # This is to detect name clashes. # If a name clash is detected, it is avoided by appending # an index to the tensor name. self.written_tensor_name_for_step = defaultdict(int) self.logger = logger if self.tensorboard_dir is None: self.logger.info( f"tensorboard_dir has not been set for the hook. SMDebug will not be exporting tensorboard summaries." ) if include_regex is not None: collection_manager.get( CollectionKeys.DEFAULT).include(include_regex) if CollectionKeys.DEFAULT not in self.include_collections: self.include_collections.append(CollectionKeys.DEFAULT) self.save_all = save_all if self.save_all: collection_manager.get(CollectionKeys.ALL).include(".*") if CollectionKeys.ALL not in self.include_collections: self.include_collections.append(CollectionKeys.ALL) if (CollectionKeys.DEFAULT not in self.include_collections and collection_manager.get(CollectionKeys.DEFAULT).include_regex): self.logger.warn("The `default` collection was not passed to " "include_collections. So it is not being saved") self._collections_to_save = set() self._collections_to_save_for_step = None self.prepared_collections = False self.tensor_to_collections = {} self.step = init_step self.last_saved_step = None self.mode = ModeKeys.GLOBAL self.mode_steps = {ModeKeys.GLOBAL: init_step} self.writer = None self.profiler_config_parser = profiler_config_parser self.profiler_config_parser.load_config() self.timeline_writer = TimelineFileWriter( profiler_config_parser=profiler_config_parser) self.hvd_reader = None self.is_smdataparallel_profiling = False if is_sagemaker_job() and SageMakerFileMetricsWriter is not None: self.metrics_writer = SageMakerFileMetricsWriter() else: self.metrics_writer = None # Maps ModeKeys to FileWriter objects self.tb_writers = {} # Cache scalars that are being saved through save_scalar() calls self.scalar_cache = [] self.logger.info("Saving to {}".format(self.out_dir)) atexit.register(self._cleanup) # Check if there is any last saved state. Initialize the hook based last saved state. self.training_run = 0 self._initialize_to_last_saved_state() self.custom_tensors_to_save = dict()
def __init__( self, collection_manager: CollectionManager, default_include_collections: List[str], init_step: int = 0, out_dir: Optional[str] = None, export_tensorboard: bool = False, tensorboard_dir: Optional[str] = None, dry_run: bool = False, reduction_config: Optional[ReductionConfig] = None, save_config: Optional[Union[SaveConfig, Dict[ModeKeys, SaveConfigMode]]] = None, include_regex: Optional[List[str]] = None, include_collections: Optional[List[str]] = None, save_all: bool = False, include_workers: str = "one", ): """ A class used to represent the hook which gets attached to the training process. This takes the form appropriate for the framework such as tf.train.SessionRunHook for TF, Callback for keras... ... Attributes ---------- out_dir : str represents a path into which outputs will be written to dry_run : bool when dry run is set, behavior is only described in the log file. tensors are not actually saved. save_config: SaveConfig object Takes save config object which is applied as default for all included tensors. A collection can optionally have its own saveconfig object which overrides this for its tensors. reduction_config: ReductionConfig object if passed, this reduction config object is used as default for all tensors included. A collection has its own saveconfig object which overrides this for its tensors. if this is not passed, tensor is saved in full. include_regex: list of str takes as input the list of string representing regular expressions. Tensors whose names match these regular expressions will be saved. These tensors will be available as part of the `default` collection. include_collections: list of str representing collection names takes as input the collections which should be saved. if this is empty, it defaults to including all collections from code save_all: bool a shortcut for saving all tensors in the model. they are all saved in the collection `all` include_workers: str makes the hook save data from all workers """ self.out_dir = verify_and_get_out_dir(out_dir) self.tensorboard_dir = get_tensorboard_dir( export_tensorboard=export_tensorboard, tensorboard_dir=tensorboard_dir, out_dir=self.out_dir, ) self.dry_run = dry_run self.worker = None self.save_all_workers = True if include_workers == "all" else False self.chief_worker = CONFIG_DEFAULT_WORKER_NAME if include_collections is None: include_collections = default_include_collections else: include_collections = flatten(include_collections) self.include_collections = list( set(include_collections).union(set(default_include_collections)) ) self.save_all = save_all self.save_config = SaveConfig.parse(save_config) if reduction_config is None: reduction_config = ReductionConfig(save_raw_tensor=True) self.reduction_config = reduction_config self.include_regex = include_regex self.collection_manager = collection_manager self.collection_manager.set_num_workers(self._get_num_workers()) self.init_step = init_step self.logger = logger if self.tensorboard_dir is None: self.logger.info( f"tensorboard_dir has not been set for the hook. SMDebug will not be exporting tensorboard summaries." ) if include_regex is not None: collection_manager.get(CollectionKeys.DEFAULT).include(include_regex) if CollectionKeys.DEFAULT not in self.include_collections: self.include_collections.append(CollectionKeys.DEFAULT) self.save_all = save_all if self.save_all: collection_manager.get(CollectionKeys.ALL).include(".*") if CollectionKeys.ALL not in self.include_collections: self.include_collections.append(CollectionKeys.ALL) if ( CollectionKeys.DEFAULT not in self.include_collections and collection_manager.get(CollectionKeys.DEFAULT).include_regex ): self.logger.warn( "The `default` collection was not passed to " "include_collections. So it is not being saved" ) self._collections_to_save = set() self._collections_to_save_for_step = None self.prepared_collections = False self.tensor_to_collections = {} self.step = init_step self.last_saved_step = None self.mode = ModeKeys.GLOBAL self.mode_steps = {ModeKeys.GLOBAL: init_step} self.writer = None if is_sagemaker_job() and SageMakerFileMetricsWriter is not None: self.metrics_writer = SageMakerFileMetricsWriter() else: self.metrics_writer = None # Maps ModeKeys to FileWriter objects self.tb_writers = {} # Cache scalars that are being saved through save_scalar() calls self.scalar_cache = [] self.logger.info("Saving to {}".format(self.out_dir)) atexit.register(self._cleanup) # Check if there is any last saved state. Initialize the hook based last saved state. self.training_run = 0 self._initialize_to_last_saved_state()
def test_tf_save_scalar(): save_config = SaveConfig(save_steps=[0, 2, 4, 6, 8]) collection = ("sm_metrics", "loss") helper_tensorflow_tests(collection, save_config) delete_local_trials([SMDEBUG_TF_HOOK_TESTS_DIR])
def __init__( self, out_dir: Optional[str] = None, export_tensorboard: bool = False, tensorboard_dir: Optional[str] = None, dry_run: bool = False, reduction_config=None, save_config: Optional[SaveConfig] = None, include_regex: Optional[List[str]] = None, include_collections: Optional[List[str]] = None, save_all: bool = False, include_workers: str = "one", hyperparameters: Optional[Dict[str, Any]] = None, train_data: Union[None, Tuple[str, str], DMatrix] = None, validation_data: Union[None, Tuple[str, str], DMatrix] = None, ) -> None: """ This class represents the hook which is meant to be used a callback function in XGBoost. Example ------- >>> from smdebug.xgboost import Hook >>> hook = Hook() >>> xgboost.train(prams, dtrain, callbacks=[hook]) Parameters ---------- out_dir: A path into which outputs will be written. dry_run: When dry_run is True, behavior is only described in the log file, and evaluations are not actually saved. reduction_config: This parameter is not used. Placeholder to keep the API consistent with other hooks. save_config: A SaveConfig object. include_regex: Tensors matching these regular expressions will be available as part of the 'default' collection. include_collections: Tensors that should be saved. If not given, all known collections will be saved. save_all: If true, all evaluations are saved in the collection 'all'. hyperparameters: When this dictionary is given, the key-value pairs will be available in the 'hyperparameters' collection. train_data: When this parameter is a tuple (file path, content type) or an xboost.DMatrix instance, the average feature contributions (SHAP values) will be calcaulted against the provided data set. content type can be either 'csv' or 'libsvm', e.g., train_data = ('/path/to/train/file', 'csv') or train_data = ('/path/to/validation/file', 'libsvm') or train_data = xgboost.DMatrix('train.svm.txt') validation_data: Same as train_data, but for validation data. """ # noqa: E501 if save_config is None: save_config = SaveConfig(save_interval=DEFAULT_SAVE_CONFIG_INTERVAL) collection_manager = CollectionManager() super().__init__( collection_manager=collection_manager, default_include_collections=DEFAULT_INCLUDE_COLLECTIONS, data_type_name=None, out_dir=out_dir, export_tensorboard=export_tensorboard, tensorboard_dir=tensorboard_dir, dry_run=dry_run, reduction_config=None, save_config=save_config, include_regex=include_regex, include_collections=include_collections, save_all=save_all, include_workers=include_workers, ) if reduction_config is not None: msg = "'reduction_config' is not supported and will be ignored." self.logger.warning(msg) self.hyperparameters = hyperparameters self.train_data = self._validate_data(train_data) self.validation_data = self._validate_data(validation_data) self.worker = self._get_worker_name() self._full_shap_values = None set_hook(self)
def test_export_load(): r1 = SaveConfig(save_interval=11, start_step=10, save_steps=[50]) r2 = SaveConfig.from_json(r1.to_json()) assert r1.to_json() == r2.to_json() assert r1 == r2
def test_end_step(): s = SaveConfig(end_step=0) assert s.should_save_step(modes.GLOBAL, 0) is False assert s.should_save_step(modes.GLOBAL, 19) is False assert s.should_save_step(modes.GLOBAL, 100) is False
def test_load_none(): r1 = SaveConfig(start_step=100) assert r1 == SaveConfig.from_json(r1.to_json())
def test_load_empty(): r1 = SaveConfig() assert r1 == SaveConfig.from_json(r1.to_json())