def wrapper(self, *args, **kwargs): # self is an instance of the class return Actuator(fn=f)(self, *args, _pypads_env=LoggerEnv( parameter=dict(), experiment_id=get_experiment_id(), run_id=get_run_id()), **kwargs)
def _add_hook(self, hook, config, callback, call: Call, mappings, data=None): # For every hook we defined on the given function in out mapping file execute it before running the code if not call.has_hook(hook): return self._get_env_setter( _pypads_env=InjectionLoggerEnv(mappings, hook, callback, call, config.parameters, get_experiment_id(), get_run_id(), data=data)) else: logger.debug( f"{hook} defined hook with config {config} is tracked multiple times on {call}. Ignoring second hooking.") return None
def create_dummy_env(data=None) -> LoggerEnv: """ Create a dummy environment to be used for programmatically called logging instead of hooks :param data: :return: """ if data is None: data = {} return LoggerEnv(parameter=dict(), experiment_id=get_experiment_id(), run_id=get_run_id(), data={ **data, **{ "programmatic": True } })
def log(self, obj: Union[BaseStorageModel]): """ :param obj: Entry object to be logged :return: """ rt = obj.storage_type if rt == ResultType.metric: obj: MetricMetaModel stored_meta = self.log_json(obj, obj.uid) mlflow.log_metric(obj.name, obj.data) return stored_meta elif rt == ResultType.parameter: obj: ParameterMetaModel stored_meta = self.log_json(obj, obj.uid) mlflow.log_param(obj.name, obj.data) return stored_meta elif rt == ResultType.artifact: obj: Union[Artifact, ArtifactMetaModel] path = self._log_mem_artifact(path=obj.data, artifact=obj.content(), write_format=obj.file_format) # Todo maybe don't store filesize because of performance (querying for file after storing takes time) for file_info in self.list_files(run_id=get_run_id(), path=os.path.dirname(path)): if file_info.path == os.path.basename(path): obj.file_size = file_info.file_size break obj.data = path stored_meta = self.log_json(obj, obj.uid) return stored_meta elif rt == ResultType.tag: obj: TagMetaModel stored_meta = self.log_json(obj, obj.uid) mlflow.set_tag(obj.name, obj.data) return stored_meta else: return self.log_json(obj, obj.uid)
def _check_result_dependencies(self): from pypads.app.pypads import get_current_pads pads = get_current_pads() missing = [] tracking_objects = [] for dependency in self.result_dependencies: to = pads.results.get_tracked_objects( experiment_name=get_experiment_name(), run_id=get_run_id(), **dependency) if len(to) == 0: missing.append(dependency) else: tracking_objects.append(to) if len(missing) > 0: raise MissingDependencyError( "Can't log " + str(self) + ". Missing results of other loggers: " + ", ".join([str(d) for d in missing])) return tracking_objects
class RunObjectModel(BackendObjectModel): experiment: Optional[ExperimentReference] = Field( default_factory=lambda: get_reference(ExperimentModel()) if get_experiment_id() is not None else None) run: Optional[RunReference] = Field(default_factory=lambda: get_reference( RunModel()) if get_run_id() is not None else None)
def __real_call__(self, *args, _pypads_env: LoggerEnv = None, **kwargs): logger.debug("Called on Import function " + str(self)) _return = super().__real_call__(*args, _pypads_env=_pypads_env or LoggerEnv(parameter=dict(), experiment_id=get_experiment_id(), run_id=get_run_id(), data={"category: ImportLogger"}), **kwargs) return _return