def write_one(writer, dset, **writer_args): """Call one writer Args: writer (String): Name of writer. dset (Dataset): Model run data. """ plugins.call_one(package_name=__name__, plugin_name=writer, dset=dset, **writer_args)
def get_orbit(time, apriori_orbit=None, **kwargs): """Get an apriori orbit for the given time epochs The specification of the apriori orbit is matched with a filename in this orbit-directory. If it is not passed in as an argument, the apriori orbit to use is read from the configuration. Args: time (Time): Time epochs at the satellite for which to calculate the apriori orbit. apriori_orbit (String): Optional specification of which apriori orbit to use (see above). Returns: AprioriOrbit: Apriori orbit object. """ apriori_orbit = config.tech.get("apriori_orbit", apriori_orbit).str if apriori_orbit not in ["broadcast", "precise", "slr"]: log.fatal( "Configuration value '{}' for option 'apriori_orbit' is unknown. It should be either 'broadcast' " "and/or 'precise', or 'slr'. ", apriori_orbit, ) return plugins.call_one(package_name=__name__, plugin_name=apriori_orbit, time=time, **kwargs)
def get(dset, **obs_args): """Construct a Dataset for the pipeline based on observations Args: dset: A Dataset that will be filled with observations and necessary fields """ # TODO: These can probably be removed rundate = dset.rundate pipeline = dset.vars["tech"] session = dset.vars["session"] plugins.call_one(package_name=__name__, plugin_name=pipeline, dset=dset, rundate=rundate, session=session, **obs_args)
def write(default_stage): """Call all writers specified in the configuration The list of writers to use is taken from the config file of the given technique. Each writer is passed a :class:`~where.data.dataset.Dataset` with data for the modelrun and should write the relevant parts of the data to file. By default the last dataset for the default_stage is sent to the writer, but that is possible to override with the following notation: output = writer_1 # Use last dataset of default_stage output = writer_1:calculate # Use last dataset of "calculate" stage output = writer_1:calculate/2 # Use dataset 2 of "calculate" stage Args: default_stage (String): Name of stage to read dataset from by default. """ dsets = dict() prefix = config.analysis.get("analysis", default="").str output_list = config.tech.output.list writer_and_dset = [o.partition(":")[::2] for o in output_list] rundate = config.analysis.rundate.date tech = config.analysis.tech.str session = config.analysis.session.str for writer, dset_str in writer_and_dset: # Read the datasets if dset_str not in dsets: stage, _, dset_id = dset_str.partition("/") stage, _, dset_name = stage.partition(":") stage = stage if stage else default_stage dset_name = dset_name if dset_name else session dset_id = int(dset_id) if dset_id else "last" dsets[dset_str] = data.Dataset(rundate, tech=tech, stage=stage, dataset_name=dset_name, dataset_id=dset_id, session=session) # Call the writers plugins.call_one(package_name=__name__, plugin_name=writer, prefix=prefix, dset=dsets[dset_str])
def get(rundate, pipeline, session, **obs_args): """Construct a Dataset for the pipeline based on observations Args: rundate: Start date of the observations. pipeline: Which pipeline to construct the Dataset for. session: Name of session. Returns: Dataset: A Dataset with observations and necessary fields """ dset = data.Dataset.anonymous() plugins.call_one(package_name=__name__, plugin_name=pipeline, dset=dset, rundate=rundate, session=session, **obs_args) return dset
def get_crf_factory(celestial_reference_frame): """Get a factory for a given celestial reference frame The factory knows how to create RadioSource objects for a given reference frame, for instance `icrf2`. Args: celestial_reference_frame (String): Specification of which reference frame to use (see `get_crf`). Returns: CrfFactory: Factory that knows how to create RadioSource objects. """ return plugins.call_one(package_name=__name__, plugin_name=celestial_reference_frame)
def file_vars(): """Get a list of file variables for the current pipeline The active analysis variables are also made available, but may be overridden by the pipeline. """ file_vars = dict(config.analysis.config.as_dict(), **config.date_vars(config.analysis.rundate.date)) pipeline_file_vars = plugins.call_one(package_name=__name__, plugin_name=config.analysis.tech.str, part="file_vars") file_vars.update(pipeline_file_vars) return file_vars
def get_crf_factory(time, celestial_reference_frame): """Get a factory for a given celestial reference frame The factory knows how to create RadioSource objects for a given reference frame, for instance `icrf2`. Args: celestial_reference_frame (String): Specification of which reference frame to use (see `get_crf`). Returns: CrfFactory: Factory that knows how to create RadioSource objects. """ name, _, catalog = celestial_reference_frame.partition(":") kwargs = dict(catalog=catalog) if catalog else dict() return plugins.call_one(package_name=__name__, plugin_name=name, time=time, **kwargs)
def apply_remover(remover: str, dset: "Dataset", **kwargs: Dict[Any, Any]) -> None: """Apply defined remover for a given session Args: remover: The remover name. dset: Dataset containing analysis data. kwargs: Input arguments to the remover. """ log.info(f"Apply remover {remover!r}") keep_idx = plugins.call_one(package_name=__name__, plugin_name=remover, dset=dset, **kwargs) log.info(f"Keeping {sum(keep_idx)} of {dset.num_obs} observations") dset.subset(keep_idx)
def call(pipeline_, stage_, **stage_args): """Call one stage of the pipeline The underscore-postfix of `pipeline_` and `stage_` is used so that it does not interfere with the stage_args. Args: pipeline_ (String): The pipeline. stage_ (String): The stage. stage_args: Arguments that will be passed to the stage-function. Returns: The return value of the stage-function. """ return plugins.call_one(package_name=__name__, plugin_name=pipeline_, part=stage_, **stage_args)
def get_trf_factory(time, reference_frame): """Get a factory for a given reference frame The factory knows how to create TrfSite objects for a given reference frame, for instance `itrf:2014`. Args: time (Time): Time epochs for which to calculate the reference frame. reference_frame (String): Specification of which reference frame to use (see `get_trf`). Returns: TrfFactory: Factory that knows how to create TrfSite objects. """ name, _, version = reference_frame.partition(":") kwargs = dict(version=version) if version else dict() return plugins.call_one(package_name=__name__, plugin_name=name, time=time, **kwargs)
def options(): """List the command line options for starting the different pipelines Returns: Dict: Command line options pointing to pipelines """ options = dict() plugin_files = plugins.list_all(package_name=__name__) for pipeline in plugin_files: try: pipeline_options = plugins.call_one(package_name=__name__, plugin_name=pipeline, part="options") except exceptions.UnknownPluginError: continue options.update({opt: pipeline for opt in pipeline_options}) return options
def list_sessions(rundate, pipeline): """Get a list of sessions for a given rundate for a pipeline Args: rundate (Date): The model run date. pipeline (String): Name of pipeline. Returns: List: Strings with the names of the sessions. """ try: return plugins.call_one(package_name=__name__, plugin_name=pipeline, part="list_sessions", rundate=rundate) except exceptions.UnknownPluginError: return [ "" ] # If sessions is not defined in the pipeline, return a list with one unnamed session
def get_satellite(satellite_name, **kwargs): """Get a satellite object by name Args: satellite_name (String): Name used to look up satellite. kwargs (Dict): Arguments that will be passed to the satellite object. Returns: A satellite object describing the satellite. """ try: plugin, part = satellites()[satellite_name.lower()] except KeyError: log.fatal("Unknown satellite '{}'. Defined satellites are {}.", satellite_name, ", ".join(names())) return plugins.call_one(package_name=__name__, plugin_name=plugin, part=part, **kwargs)
def get_session(rundate, pipeline): """Read session from command line options The session is validated for the given pipeline. Uses the `validate_session`-plugin for validation. Args: pipeline (String): Name of pipeline. Returns: String: Name of session. """ session = util.read_option_value("--session", default="") try: return plugins.call_one(package_name=__name__, plugin_name=pipeline, part="validate_session", rundate=rundate, session=session) except exceptions.UnknownPluginError: return session # Simply return session if it can not be validated
def get(datasource_name, **kwargs): """Read data from the given data source Simple data sources that only return data directly from a parser does not need an explicit apriori-file. This is handled by looking in the parser-directory if a data source is not found in the apriori directory. The import of where.parsers is done locally to avoid circular imports. Args: datasource_name (String): Name of apriori data source kwargs: Input arguments to the data source Returns: The data from the data source (data type depends on source) """ try: return plugins.call_one(package_name=__name__, plugin_name=datasource_name, **kwargs) except exceptions.UnknownPluginError as apriori_err: from where import parsers try: data = parsers.parse_key(file_key=datasource_name, **kwargs).as_dict() log.dev( f"Called parsers.parse_key({datasource_name}) in apriori.get()" ) return data except (AttributeError) as att: try: data = parsers.parse(datasource_name, **kwargs) log.dev( f"Called parsers.parse({datasource_name}) in apriori.get()" ) return data except exceptions.UnknownPluginError: raise apriori_err from None