Ejemplo n.º 1
0
    def go(self):
        """
        This is where the work of the recipe gets done.
        Subclasses should define their own go() method, but remember to call
        this one to perform necessary initialisation.
        """
        # Every recipe needs a job identifier
        if "job_name" not in self.inputs:
            raise PipelineException("Job undefined")

        if "start_time" not in self.inputs:
            import datetime
            self.inputs["start_time"] = datetime.datetime.utcnow().replace(microsecond=0).isoformat()

        # Config is passed in from spawning recipe. But if this is the start
        # of a pipeline, it won't have one.
        if not hasattr(self, "config"):
            self.config = self._read_config()

        # Ensure we have a runtime directory
        if 'runtime_directory' not in self.inputs:
            self.inputs["runtime_directory"] = self.config.get(
                "DEFAULT", "runtime_directory"
            )
        else:
            self.config.set('DEFAULT', 'runtime_directory', self.inputs['runtime_directory'])
        if not os.access(self.inputs['runtime_directory'], os.F_OK):
            raise IOError("Runtime directory doesn't exist")

        # ...and task files, if applicable
        if "task_files" not in self.inputs:
            try:
                self.inputs["task_files"] = utilities.string_to_list(
                    self.config.get('DEFAULT', "task_files")
                )
            except NoOptionError:
                self.inputs["task_files"] = []
        self.task_definitions = ConfigParser(self.config.defaults())
        print("Reading task definition file(s): %s" % \
                             ",".join(self.inputs["task_files"]), file=sys.stderr)
        self.task_definitions.read(self.inputs["task_files"])

        # Specify the working directory on the compute nodes
        if 'working_directory' not in self.inputs:
            self.inputs['working_directory'] = self.config.get(
                "DEFAULT", "working_directory"
            )
        else:
            self.config.set("DEFAULT", "working_directory", self.inputs['working_directory'])

        try:
            self.recipe_path = [
                os.path.join(root, 'master') for root in utilities.string_to_list(
                    self.config.get('DEFAULT', "recipe_directories")
                )
            ]
        except NoOptionError:
            self.recipe_path = []


        # At this point, the recipe inputs must be complete. If not, exit.
        if not self.inputs.complete():
            raise PipelineException(
                "Required inputs not available: %s" %
                " ".join(self.inputs.missing())
            )

        # Only configure handlers if our parent is the root logger.
        # Otherwise, our parent should have done it for us.
        if isinstance(self.logger.parent, logging.RootLogger):
            self._setup_logging()

        self.logger.debug("Pipeline start time: %s" % self.inputs['start_time'])
Ejemplo n.º 2
0
 def coerce(self, value):
     if isinstance(value, str):
         return string_to_list(value)
     else:
         return value
Ejemplo n.º 3
0
    "cwd": os.getcwd(),
    "start_time": start_time,
})
config.read(config_file)
config.set('DEFAULT', 'runtime_directory', runtime_directory)
config.set('DEFAULT', 'default_working_directory', working_directory)

# Extract input file list from parset
to_process = input_parset.getStringVector(
    'ObsSW.Observation.DataProducts.measurementSets')

# Read config file to establish location of parset directory to use
parset_directory = config.get("layout", "parset_directory")
create_directory(parset_directory)

# For each task (currently only ndppp), extract and write parset
tasks = ConfigParser(config.defaults())
tasks.read(string_to_list(config.get("DEFAULT", "task_files")))
ndppp_parset_location = tasks.get("ndppp", "parset")
input_parset.makeSubset(
    "ObsSW.Observation.ObservationControl.PythonControl.DPPP.").writeFile(
        ndppp_parset_location)

# Run pipeline & wait for result
subprocess.check_call([
    'python', pipeline_definition, '-j', tree_id, '-d', '--config',
    config_file, '--runtime-directory', runtime_directory,
    '--default-working-directory', working_directory, '--start-time',
    start_time
])
Ejemplo n.º 4
0
# Extract runtime, working, results directories from input parset
runtime_directory = input_parset.getString("ObsSW.Observation.ObservationControl.PythonControl.runtimeDirectory")
working_directory = input_parset.getString("ObsSW.Observation.ObservationControl.PythonControl.workingDirectory")
results_directory = input_parset.getString("ObsSW.Observation.ObservationControl.PythonControl.resultDirectory")

# Set up configuration for later processing stages
config = ConfigParser({
    "job_name": tree_id,
    "cwd": os.getcwd(),
    "start_time": start_time,
})
config.read(config_file)
config.set('DEFAULT', 'runtime_directory', runtime_directory)
config.set('DEFAULT', 'default_working_directory', working_directory)

# Extract input file list from parset
to_process = input_parset.getStringVector('ObsSW.Observation.DataProducts.measurementSets')

# Read config file to establish location of parset directory to use
parset_directory = config.get("layout", "parset_directory")
create_directory(parset_directory)

# For each task (currently only ndppp), extract and write parset
tasks = ConfigParser(config.defaults())
tasks.read(string_to_list(config.get("DEFAULT", "task_files")))
ndppp_parset_location = tasks.get("ndppp", "parset")
input_parset.makeSubset("ObsSW.Observation.ObservationControl.PythonControl.DPPP.").writeFile(ndppp_parset_location)

# Run pipeline & wait for result
subprocess.check_call(['python', pipeline_definition, '-j', tree_id, '-d', '--config', config_file, '--runtime-directory', runtime_directory, '--default-working-directory', working_directory, '--start-time', start_time])