Exemplo n.º 1
0
 def _get_io_product_specs(self):
     """
     Get input- and output-data product specifications from the
     parset-file, and do some sanity checks.
     """
     dps = self.parset.makeSubset(
         self.parset.fullModuleName('DataProducts') + '.'
     )
     self.input_data = DataMap([
         tuple(os.path.join(location, filename).split(':')) + (skip,)
             for location, filename, skip in zip(
                 dps.getStringVector('Input_Correlated.locations'),
                 dps.getStringVector('Input_Correlated.filenames'),
                 dps.getBoolVector('Input_Correlated.skip'))
     ])
     self.logger.debug("%d Input_Correlated data products specified" %
                       len(self.input_data))
     self.output_data = DataMap([
         tuple(os.path.join(location, filename).split(':')) + (skip,)
             for location, filename, skip in zip(
                 dps.getStringVector('Output_Correlated.locations'),
                 dps.getStringVector('Output_Correlated.filenames'),
                 dps.getBoolVector('Output_Correlated.skip'))
     ])
     self.logger.debug("%d Output_Correlated data products specified" %
                       len(self.output_data))
     # Sanity checks on input- and output data product specifications
     if not validate_data_maps(self.input_data, self.output_data):
         raise PipelineException(
             "Validation of input/output data product specification failed!"
         )
Exemplo n.º 2
0
    def run_task(self, configblock, datafiles=[], **kwargs):
        """
        A task is a combination of a recipe and a set of parameters.
        Tasks can be prefedined in the task file set in the pipeline
        configuration (default: tasks.cfg).

        Here, we load a task configuration and execute it.
        This is a "shorthand" version of
        :meth:`lofarpipe.cuisine.WSRTrecipe.WSRTrecipe.cook_recipe`.
        """
        self.logger.info("Running task: %s" % (configblock,))

        # Does the task definition exist?
        try:
            recipe = self.task_definitions.get(configblock, "recipe")
        except NoSectionError:
            raise PipelineException(
                "%s not found -- check your task definitions" % configblock
            )

        # Build inputs dict.
        # First, take details from caller.
        inputs = LOFARinput(self.inputs)
        inputs['args'] = datafiles

        # Add parameters from the task file.
        # Note that we neither need the recipe name nor any items from the
        # DEFAULT config.
        parameters = dict(self.task_definitions.items(configblock))
        del parameters['recipe']
        for key in list(dict(self.config.items("DEFAULT")).keys()):
            del parameters[key]
        inputs.update(parameters)

        # Update inputs with provided kwargs, if any.
        inputs.update(kwargs)

        # Default outputs dict.
        outputs = LOFARoutput()

        # Cook the recipe and return the results"
        try:
            self.cook_recipe(recipe, inputs, outputs)
        except CookError:
            self.logger.warn(
                "%s reports failure (using %s recipe)" % (configblock, recipe)
            )
            raise PipelineRecipeFailed("%s failed" % configblock)

        # Get the (optional) node xml information
        if "return_xml" in outputs:
            return_node = xml.parseString(
                                outputs['return_xml']).documentElement
            # If no active stack, fail silently.
            add_child_to_active_stack_head(self, return_node)

        return outputs
Exemplo n.º 3
0
 def _validate_io_product_specs(self):
     """
     Sanity checks on input- and output data product specifications
     """
     if not validate_data_maps(self.input_data['data'],
                               self.input_data['instrument'],
                               self.output_data['data']):
         raise PipelineException(
             "Validation of input/output data product specification failed!"
         )
Exemplo n.º 4
0
    def go(self):
        self.logger.info("Building sky model")
        super(skymodel, self).go()

        ra_min = self.inputs['ra'] - self.inputs['search_size']
        ra_max = self.inputs['ra'] + self.inputs['search_size']
        dec_min = self.inputs['dec'] - self.inputs['search_size']
        dec_max = self.inputs['dec'] + self.inputs['search_size']

        try:
            with closing(
                    db.connect(
                        hostname=self.inputs["db_host"],
                        port=int(self.inputs["db_port"]),
                        database=self.inputs["db_dbase"],
                        username=self.inputs["db_user"],
                        password=self.inputs["db_password"])) as db_connection:
                with closing(db_connection.cursor()) as db_cursor:
                    db_cursor.execute(query_central % (float(
                        self.inputs['ra']), float(self.inputs['dec']), "VLSS"))
                    central_source = db_cursor.fetchone()
                    if central_source:
                        self.outputs["source_name"], self.outputs[
                            "source_flux"] = central_source
                    else:
                        raise PipelineException(
                            "Error reading central source from database; got %s"
                            % str(central_source))
                    self.logger.info("Central source is %s; flux %f" %
                                     (self.outputs["source_name"],
                                      self.outputs["source_flux"]))
                    db_cursor.execute(query_skymodel % (
                        4,
                        4,  # Only using VLSS for now
                        float(ra_min),
                        float(ra_max),
                        float(dec_min),
                        float(dec_max),
                        float(self.inputs['min_flux'])))
                    results = db_cursor.fetchall()

        except db.Error as my_error:
            self.logger.warn("Failed to build sky model: %s " % (my_error))
            return 1

        try:
            with open(self.inputs['skymodel_file'], 'w') as file:
                file.write(header_line)
                file.writelines(", ".join(line) + ",\n" for line in results)
        except Exception as e:
            self.logger.warn("Failed to write skymodel file")
            self.logger.warn(str(e))
            return 1

        return 0
Exemplo n.º 5
0
    def _bbs(self,
             timeslice_map_path,
             parmdbs_map_path,
             sourcedb_map_path,
             skip=False):
        """
        Perform a calibration step. First with a set of sources from the
        gsm and in later iterations also on the found sources
        """
        # create parset for bbs run
        parset = self.parset.makeSubset("BBS.")
        parset_path = self._write_parset_to_file(
            parset, "bbs", "Parset for calibration with a local sky model")

        # create the output file path
        output_mapfile = self._write_datamap_to_file(
            None, "bbs_output", "Mapfile with calibrated measurement sets.")

        converted_sourcedb_map_path = self._write_datamap_to_file(
            None, "source_db", "correctly shaped mapfile for input sourcedbs")

        if skip:
            return output_mapfile

        # The create db step produces a mapfile with a single sourcelist for
        # the different timeslices. Generate a mapfile with copies of the
        # sourcelist location: This allows validation of maps in combination
        # get the original map data
        sourcedb_map = DataMap.load(sourcedb_map_path)
        parmdbs_map = MultiDataMap.load(parmdbs_map_path)
        converted_sourcedb_map = []

        # sanity check for correcy output from previous recipes
        if not validate_data_maps(sourcedb_map, parmdbs_map):
            self.logger.error("The input files for bbs do not contain "
                              "matching host names for each entry content:")
            self.logger.error(repr(sourcedb_map))
            self.logger.error(repr(parmdbs_map))
            raise PipelineException("Invalid input data for imager_bbs recipe")

        self.run_task("imager_bbs",
                      timeslice_map_path,
                      parset=parset_path,
                      instrument_mapfile=parmdbs_map_path,
                      sourcedb_mapfile=sourcedb_map_path,
                      mapfile=output_mapfile,
                      working_directory=self.scratch_directory)

        return output_mapfile
Exemplo n.º 6
0
    def go(self):
        super(StatefulRecipe, self).go()
        statefile = os.path.join(self.config.get('layout', 'job_directory'),
                                 'statefile')
        try:
            statefile = open(statefile, 'br')
            inputs, self.state = pickle.load(statefile)
            statefile.close()

            # What's the correct thing to do if inputs differ from the saved
            # state? start_time will always change.
            for key, value in inputs.items():
                if key != "start_time" and self.inputs[key] != value:
                    raise PipelineException(
                        "Input %s (%s) differs from saved state (%s)" %
                        (key, str(self.inputs[key]), inputs[key]))

            self.completed = list(reversed(self.state))
        except (IOError, EOFError):
            # Couldn't load state
            self.completed = []
Exemplo n.º 7
0
    def run(self, cmd_in, unsave=False, cwd=None):
        """
            Add the cmd as a subprocess to the current group: The process is
            started!
            cmd can be suplied as a single string (white space seperated)
            or as a list of strings
            """

        if type(cmd_in) == type(""):  #todo ugly
            cmd = cmd_in.split()
        elif type(cmd_in) == type([]):
            cmd = cmd_in
        else:
            raise Exception(
                "SubProcessGroup.run() expects a string or" +
                "list[string] as arguments suplied: {0}".format(type(cmd)))

        # Run subprocess
        process = subprocess.Popen(cmd,
                                   cwd=cwd,
                                   stdin=subprocess.PIPE,
                                   stdout=subprocess.PIPE,
                                   stderr=subprocess.PIPE)
        # save the process
        self.process_group.append((cmd, process))

        # TODO: SubProcessGroup could saturate a system with to much
        # concurent calss: artifical limit to 20 subprocesses
        if not unsave and (len(self.process_group) > 20):
            self.logger.error(
                "Subprocessgroup could hang with more"
                "then 20 concurent calls, call with unsave = True to run"
                "with more than 20 subprocesses")
            raise PipelineException("Subprocessgroup could hang with more"
                                    "then 20 concurent calls. Aborting")

        if self.logger == None:
            print "Subprocess started: {0}".format(cmd)
        else:
            self.logger.info("Subprocess started: {0}".format(cmd))
Exemplo n.º 8
0
    def wrapper(self, configblock, datafiles=[], **kwargs):
        try:

            my_state = self.completed.pop()
        except (AttributeError, IndexError):
            my_state = ('', '')

        if configblock == my_state[0]:
            # We have already run this task and stored its state, or...
            self.logger.info(
                "Task %s already exists in saved state; skipping" %
                configblock)

            # Get the (optional) node xml information, normally added
            # in remotecommand.py
            if "return_xml" in my_state[1]:
                return_node = xml.parseString(
                    my_state[1]['return_xml']).documentElement

                add_child_to_active_stack_head(self, return_node)
                # If no active stack, do nothing

            return my_state[1]

        elif my_state[0] != '':
            # There is a stored task, but it doesn't match this one, or...
            self.logger.error(
                "Stored state does not match pipeline definition; bailing out")
            raise PipelineException(
                "Stored state does not match pipeline definition")
        else:
            # We need to run this task now.
            outputs = run_task(self, configblock, datafiles, **kwargs)
            self.state.append((configblock, outputs))
            self._save_state()
            return outputs
Exemplo n.º 9
0
    def _read_config(self):
        # If a config file hasn't been specified, use the default
        if "config" not in self.inputs:
            # Possible config files, in order of preference:
            conf_locations = (
                os.path.join(sys.path[0], 'pipeline.cfg'),
                os.path.join(os.path.expanduser('~'), '.pipeline.cfg')
            )
            for path in conf_locations:
                if os.access(path, os.R_OK):
                    self.inputs["config"] = path
                    break
            if "config" not in self.inputs:
                raise PipelineException("Configuration file not found")

        config = ConfigParser({
            "job_name": self.inputs["job_name"],
            "start_time": self.inputs["start_time"],
            "cwd": os.getcwd()
        })
        print("Reading configuration file: %s" % \
                              self.inputs["config"], file=sys.stderr)
        config.read(self.inputs["config"])
        return config
Exemplo n.º 10
0
    def _long_baseline(self, input_ms_map_path, target_mapfile,
                       add_beam_tables, output_ms_mapfile):
        """
        Copy ms to correct location, combine the ms in slices and combine
        the time slices into a large virtual measurement set
        """
        # Create the dir where found and processed ms are placed
        # ms_per_image_map_path contains all the original ms locations:
        # this list contains possible missing files
        processed_ms_dir = os.path.join(self.scratch_directory, "subbands")

        # get the parameters, create a subset for ndppp, save
        ndppp_parset = self.parset.makeSubset("DPPP.")
        ndppp_parset_path = self._write_parset_to_file(
            ndppp_parset, "prepare_imager_ndppp", "parset for ndpp recipe")

        # create the output file paths
        # [1] output -> prepare_output
        output_mapfile = self._write_datamap_to_file(None, "prepare_output")
        time_slices_mapfile = self._write_datamap_to_file(
            None, "prepare_time_slices")
        ms_per_image_mapfile = self._write_datamap_to_file(
            None, "ms_per_image")

        # get some parameters from the imaging pipeline parset:
        subbandgroups_per_ms = self.parset.getInt(
            "LongBaseline.subbandgroups_per_ms")
        subbands_per_subbandgroup = self.parset.getInt(
            "LongBaseline.subbands_per_subbandgroup")

        outputs = self.run_task(
            "long_baseline",
            input_ms_map_path,
            parset=ndppp_parset_path,
            target_mapfile=target_mapfile,
            subbandgroups_per_ms=subbandgroups_per_ms,
            subbands_per_subbandgroup=subbands_per_subbandgroup,
            mapfile=output_mapfile,
            slices_mapfile=time_slices_mapfile,
            ms_per_image_mapfile=ms_per_image_mapfile,
            working_directory=self.scratch_directory,
            processed_ms_dir=processed_ms_dir,
            add_beam_tables=add_beam_tables,
            output_ms_mapfile=output_ms_mapfile)

        # validate that the prepare phase produced the correct data
        output_keys = list(outputs.keys())
        if not ('mapfile' in output_keys):
            error_msg = "The imager_prepare master script did not"\
                    "return correct data. missing: {0}".format('mapfile')
            self.logger.error(error_msg)
            raise PipelineException(error_msg)
        if not ('slices_mapfile' in output_keys):
            error_msg = "The imager_prepare master script did not"\
                    "return correct data. missing: {0}".format(
                                                        'slices_mapfile')
            self.logger.error(error_msg)
            raise PipelineException(error_msg)
        if not ('ms_per_image_mapfile' in output_keys):
            error_msg = "The imager_prepare master script did not"\
                    "return correct data. missing: {0}".format(
                                                'ms_per_image_mapfile')
            self.logger.error(error_msg)
            raise PipelineException(error_msg)

        # Return the mapfiles paths with processed data
        return output_mapfile, outputs["slices_mapfile"], ms_per_image_mapfile, \
            processed_ms_dir
Exemplo n.º 11
0
    def pipeline_logic(self):
        """
        Define the individual tasks that comprise the current pipeline.
        This method will be invoked by the base-class's `go()` method.
        """
        # *********************************************************************
        # 1. Prepare phase, collect data from parset and input mapfiles
        # Create a parameter-subset containing only python-control stuff.
        py_parset = self.parset.makeSubset(
            'ObsSW.Observation.ObservationControl.PythonControl.')

        # Get input/output-data products specifications.
        self._get_io_product_specs()

        # Create some needed directories
        job_dir = self.config.get("layout", "job_directory")
        mapfile_dir = os.path.join(job_dir, "mapfiles")
        create_directory(mapfile_dir)
        parset_dir = os.path.join(job_dir, "parsets")
        create_directory(parset_dir)

        # *********************************************************************
        # 2. Copy the instrument files to the correct node
        # The instrument files are currently located on the wrong nodes
        # Copy to correct nodes and assign the instrument table the now
        # correct data

        # Copy the instrument files to the corrent nodes: failures might happen
        # update both intrument and datamap to contain only successes!
        self._copy_instrument_files(mapfile_dir)

        # Write input- and output data map-files.
        data_mapfile = os.path.join(mapfile_dir, "data.mapfile")
        self.input_data['data'].save(data_mapfile)
        copied_instrument_mapfile = os.path.join(mapfile_dir,
                                                 "copied_instrument.mapfile")
        self.input_data['instrument'].save(copied_instrument_mapfile)
        self.logger.debug("Wrote input data mapfile: %s" % data_mapfile)

        # Save copied files to a new mapfile
        corrected_mapfile = os.path.join(mapfile_dir, "corrected_data.mapfile")
        self.output_data['data'].save(corrected_mapfile)
        self.logger.debug("Wrote output corrected data mapfile: %s" %
                          corrected_mapfile)

        # Validate number of copied files, abort on zero files copied
        if len(self.input_data['data']) == 0:
            self.logger.warn("No input data files to process. Bailing out!")
            return 0

        self.logger.debug("Processing: %s" %
                          ', '.join(str(f) for f in self.input_data['data']))

        # *********************************************************************
        # 3. Create database needed for performing work:
        #    - GVDS, describing data on the compute nodes
        #    - SourceDB, for skymodel (A-team)
        #    - ParmDB for outputtting solutions
        with duration(self, "vdsmaker"):
            gvds_file = self.run_task("vdsmaker", data_mapfile)['gvds']

        # Read metadata (e.g., start- and end-time) from the GVDS file.
        with duration(self, "vdsreader"):
            vdsinfo = self.run_task("vdsreader", gvds=gvds_file)

        # Create an empty parmdb for DPPP
        with duration(self, "setupparmdb"):
            parmdb_mapfile = self.run_task("setupparmdb",
                                           data_mapfile)['mapfile']

        # Create a sourcedb to be used by the demixing phase of DPPP
        # The user-supplied sky model can either be a name, in which case the
        # pipeline will search for a file <name>.skymodel in the default search
        # path $LOFARROOT/share/pipeline/skymodels; or a full path.
        # It is an error if the file does not exist.
        skymodel = py_parset.getString('PreProcessing.SkyModel')
        if not os.path.isabs(skymodel):
            skymodel = os.path.join(
                # This should really become os.environ['LOFARROOT']
                self.config.get('DEFAULT', 'lofarroot'),
                'share',
                'pipeline',
                'skymodels',
                skymodel + '.skymodel')
        if not os.path.isfile(skymodel):
            raise PipelineException("Skymodel %s does not exist" % skymodel)
        with duration(self, "setupsourcedb"):
            sourcedb_mapfile = self.run_task("setupsourcedb",
                                             data_mapfile,
                                             skymodel=skymodel,
                                             suffix='.dppp.sourcedb',
                                             type='blob')['mapfile']

        # *********************************************************************
        # 4. Run NDPPP to demix the A-Team sources
        # Create a parameter-subset for DPPP and write it to file.
        ndppp_parset = os.path.join(parset_dir, "NDPPP.parset")
        py_parset.makeSubset('DPPP.').writeFile(ndppp_parset)

        # Run the Default Pre-Processing Pipeline (DPPP);
        with duration(self, "ndppp"):
            dppp_mapfile = self.run_task(
                "ndppp",
                data_mapfile,
                data_start_time=vdsinfo['start_time'],
                data_end_time=vdsinfo['end_time'],
                demix_always=py_parset.getStringVector(
                    'PreProcessing.demix_always'),
                demix_if_needed=py_parset.getStringVector(
                    'PreProcessing.demix_if_needed'),
                parset=ndppp_parset,
                parmdb_mapfile=parmdb_mapfile,
                sourcedb_mapfile=sourcedb_mapfile,
                mapfile=os.path.join(mapfile_dir, 'dppp.mapfile'))['mapfile']

        # ********************************************************************
        # 5. Run bss using the instrument file from the target observation
        # Create an empty sourcedb for BBS
        with duration(self, "setupsourcedb"):
            sourcedb_mapfile = self.run_task("setupsourcedb",
                                             data_mapfile)['mapfile']

        # Create a parameter-subset for BBS and write it to file.
        bbs_parset = os.path.join(parset_dir, "BBS.parset")
        py_parset.makeSubset('BBS.').writeFile(bbs_parset)

        # Run BBS to calibrate the target source(s).
        with duration(self, "bbs_reducer"):
            bbs_mapfile = self.run_task(
                "bbs_reducer",
                dppp_mapfile,
                parset=bbs_parset,
                instrument_mapfile=copied_instrument_mapfile,
                sky_mapfile=sourcedb_mapfile)['data_mapfile']

        # *********************************************************************
        # 6. Copy the MS's to their final output destination.
        # When the copier recipe has run, the map-file named in
        # corrected_mapfile will contain an updated map of output files.
        with duration(self, "copier"):
            self.run_task("copier",
                          mapfile_source=bbs_mapfile,
                          mapfile_target=corrected_mapfile,
                          mapfiles_dir=mapfile_dir,
                          mapfile=corrected_mapfile)

        # *********************************************************************
        # 7. Create feedback for further processing by the LOFAR framework
        metadata_file = "%s_feedback_Correlated" % (self.parset_file, )
        with duration(self, "get_metadata"):
            self.run_task(
                "get_metadata",
                corrected_mapfile,
                parset_prefix=(self.parset.getString('prefix') +
                               self.parset.fullModuleName('DataProducts')),
                product_type="Correlated",
                metadata_file=metadata_file)

        self.send_feedback_processing(parameterset())
        self.send_feedback_dataproducts(parameterset(metadata_file))

        return 0
Exemplo n.º 12
0
    def pipeline_logic(self):
        """
        Define the individual tasks that comprise the current pipeline.
        This method will be invoked by the base-class's `go()` method.
        """
        self.logger.info("Starting imager pipeline")

        # Define scratch directory to be used by the compute nodes.
        self.scratch_directory = os.path.join(
            self.inputs['working_directory'], self.inputs['job_name'])
        # Get input/output-data products specifications.
        self._get_io_product_specs()

        # remove prepending parset identifiers, leave only pipelinecontrol
        full_parset = self.parset
        self.parset = self.parset.makeSubset(
            self.parset.fullModuleName('PythonControl') + '.')  # remove this

        # Create directories to store communication and data files

        job_dir = self.config.get("layout", "job_directory")

        self.parset_dir = os.path.join(job_dir, "parsets")
        create_directory(self.parset_dir)
        self.mapfile_dir = os.path.join(job_dir, "mapfiles")
        create_directory(self.mapfile_dir)

        # *********************************************************************
        # (INPUT) Get the input from external sources and create pipeline types
        # Input measure ment sets
        input_mapfile = os.path.join(self.mapfile_dir, "uvdata.mapfile")
        self.input_data.save(input_mapfile)
        # storedata_map(input_mapfile, self.input_data)
        self.logger.debug(
            "Wrote input UV-data mapfile: {0}".format(input_mapfile))

        # Provides location for the scratch directory and concat.ms location
        target_mapfile = os.path.join(self.mapfile_dir, "target.mapfile")
        self.target_data.save(target_mapfile)
        self.logger.debug(
            "Wrote target mapfile: {0}".format(target_mapfile))

        # images datafiles
        output_image_mapfile = os.path.join(self.mapfile_dir, "images.mapfile")
        self.output_data.save(output_image_mapfile)
        self.logger.debug(
            "Wrote output sky-image mapfile: {0}".format(output_image_mapfile))

        # Location of the output measurement set
        output_correlated_mapfile = os.path.join(self.mapfile_dir, 
                                                 "correlated.mapfile")
        self.output_correlated_data.save(output_correlated_mapfile)
        self.logger.debug(
            "Wrote output correlated mapfile: {0}".format(output_correlated_mapfile))

        # Get pipeline parameters from the toplevel recipe
        # TODO: This is a backdoor option to manually add beamtables when these
        # are missing on the provided ms. There is NO use case for users of the
        # pipeline
        add_beam_tables = self.parset.getBool(
                                    "Imaging.addBeamTables", False)


        number_of_major_cycles = self.parset.getInt(
                                    "Imaging.number_of_major_cycles")

        # Almost always a users wants a partial succes above a failed pipeline
        output_result_of_last_succesfull_cycle = self.parset.getBool(
                            "Imaging.output_on_error", True)


        if number_of_major_cycles < 3:
            self.logger.error(
                "The number of major cycles must be 3 or higher, correct"
                " the key: Imaging.number_of_major_cycles")
            raise PipelineException(
                     "Incorrect number_of_major_cycles in the parset")


        # ******************************************************************
        # (1) prepare phase: copy and collect the ms
        concat_ms_map_path, timeslice_map_path, ms_per_image_map_path, \
            processed_ms_dir = self._prepare_phase(input_mapfile,
                                    target_mapfile, add_beam_tables)

        # We start with an empty source_list map. It should contain n_output
        # entries all set to empty strings
        source_list_map_path = os.path.join(self.mapfile_dir,
                                        "initial_sourcelist.mapfile")
        source_list_map = DataMap.load(target_mapfile) # copy the output map
        for item in source_list_map:
            item.file = ""             # set all to empty string
        source_list_map.save(source_list_map_path)

        succesfull_cycle_mapfiles_dict = None
        for idx_cycle in range(number_of_major_cycles):
            try:
                # *****************************************************************
                # (2) Create dbs and sky model
                parmdbs_path, sourcedb_map_path = self._create_dbs(
                            concat_ms_map_path, timeslice_map_path, idx_cycle,
                            source_list_map_path = source_list_map_path,
                            skip_create_dbs = False)


                # *****************************************************************
                # (3)  bbs_imager recipe.
                bbs_output = self._bbs(concat_ms_map_path, timeslice_map_path, 
                        parmdbs_path, sourcedb_map_path, idx_cycle, skip = False)

            
                # TODO: Extra recipe: concat timeslices using pyrap.concatms
                # (see prepare) redmine issue #6021
                # Done in imager_bbs.p at the node level after calibration 

                # *****************************************************************
                # (4) Get parameters awimager from the prepare_parset and inputs
                aw_image_mapfile, maxbaseline = self._aw_imager(concat_ms_map_path,
                            idx_cycle, sourcedb_map_path, number_of_major_cycles,
                            skip = False)

                # *****************************************************************
                # (5) Source finding
                source_list_map_path, found_sourcedb_path = self._source_finding(
                        aw_image_mapfile, idx_cycle, skip = False)
                # should the output be a sourcedb? instead of a sourcelist

                # save the active mapfiles: locations and content
                # Used to output last succesfull cycle on error
                mapfiles_to_save = {'aw_image_mapfile':aw_image_mapfile,
                                    'source_list_map_path':source_list_map_path,
                                    'found_sourcedb_path':found_sourcedb_path,
                                    'concat_ms_map_path':concat_ms_map_path}
                succesfull_cycle_mapfiles_dict = self._save_active_mapfiles(idx_cycle, 
                                      self.mapfile_dir, mapfiles_to_save)

            # On exception there is the option to output the results of the 
            # last cycle without errors
            except KeyboardInterrupt as ex:
                raise ex

            except Exception as ex:
                self.logger.error("Encountered an fatal exception during self"
                                  "calibration. Aborting processing and return"
                                  " the last succesfull cycle results")
                self.logger.error(str(ex))

                # if we are in the first cycle always exit with exception
                if idx_cycle == 0:
                    raise ex

                if not output_result_of_last_succesfull_cycle:
                    raise ex
                
                # restore the mapfile variables
                aw_image_mapfile = succesfull_cycle_mapfiles_dict['aw_image_mapfile']
                source_list_map_path = succesfull_cycle_mapfiles_dict['source_list_map_path']
                found_sourcedb_path = succesfull_cycle_mapfiles_dict['found_sourcedb_path']
                concat_ms_map_path = succesfull_cycle_mapfiles_dict['concat_ms_map_path']

                # set the number_of_major_cycles to the correct number
                number_of_major_cycles = idx_cycle - 1
                max_cycles_reached = False
                break
            else:
                max_cycles_reached = True


        # TODO: minbaseline should be a parset value as is maxbaseline..
        minbaseline = 0

        # *********************************************************************
        # (6) Finalize:
        placed_data_image_map, placed_correlated_map =  \
                                        self._finalize(aw_image_mapfile, 
            processed_ms_dir, ms_per_image_map_path, source_list_map_path,
            minbaseline, maxbaseline, target_mapfile, output_image_mapfile,
            found_sourcedb_path, concat_ms_map_path, output_correlated_mapfile)

        # *********************************************************************
        # (7) Get metadata
        # create a parset with information that is available on the toplevel

        self._get_meta_data(number_of_major_cycles, placed_data_image_map,
                       placed_correlated_map, full_parset, 
                       max_cycles_reached)


        return 0
Exemplo n.º 13
0
    def pipeline_logic(self):
        """
        Define the individual tasks that comprise the current pipeline.
        This method will be invoked by the base-class's `go()` method.
        """
        # *********************************************************************
        # 1. Prepare phase, collect data from parset and input mapfiles.
        py_parset = self.parset.makeSubset(
            self.parset.fullModuleName('PythonControl') + '.')

        # Get input/output-data products specifications.
        self._get_io_product_specs()

        job_dir = self.config.get("layout", "job_directory")
        parset_dir = os.path.join(job_dir, "parsets")
        mapfile_dir = os.path.join(job_dir, "mapfiles")

        # Create directories for temporary parset- and map files
        create_directory(parset_dir)
        create_directory(mapfile_dir)

        # Write input- and output data map-files
        input_data_mapfile = os.path.join(mapfile_dir, "input_data.mapfile")
        self.input_data.save(input_data_mapfile)
        output_data_mapfile = os.path.join(mapfile_dir, "output_data.mapfile")
        self.output_data.save(output_data_mapfile)

        if len(self.input_data) == 0:
            self.logger.warn("No input data files to process. Bailing out!")
            return 0

        self.logger.debug("Processing: %s" %
            ', '.join(str(f) for f in self.input_data))

        # *********************************************************************
        # 2. Create VDS-file and databases. The latter are needed when doing
        #    demixing within DPPP.
        with duration(self, "vdsmaker"):
            gvds_file = self.run_task("vdsmaker", input_data_mapfile)['gvds']

        # Read metadata (start, end times, pointing direction) from GVDS.
        with duration(self, "vdsreader"):
            vdsinfo = self.run_task("vdsreader", gvds=gvds_file)

        # Create a parameter database that will be used by the NDPPP demixing
        with duration(self, "setupparmdb"):
            parmdb_mapfile = self.run_task(
                "setupparmdb", input_data_mapfile,
                mapfile=os.path.join(mapfile_dir, 'dppp.parmdb.mapfile'),
                suffix='.dppp.parmdb'
            )['mapfile']
                
        # Create a source database from a user-supplied sky model
        # The user-supplied sky model can either be a name, in which case the
        # pipeline will search for a file <name>.skymodel in the default search
        # path $LOFARROOT/share/pipeline/skymodels; or a full path.
        # It is an error if the file does not exist.
        skymodel = py_parset.getString('PreProcessing.SkyModel')
        if not os.path.isabs(skymodel):
            skymodel = os.path.join(
                # This should really become os.environ['LOFARROOT']
                self.config.get('DEFAULT', 'lofarroot'),
                'share', 'pipeline', 'skymodels', skymodel + '.skymodel'
            )
        if not os.path.isfile(skymodel):
            raise PipelineException("Skymodel %s does not exist" % skymodel)
        with duration(self, "setupsourcedb"):
            sourcedb_mapfile = self.run_task(
                "setupsourcedb", input_data_mapfile,
                mapfile=os.path.join(mapfile_dir, 'dppp.sourcedb.mapfile'),
                skymodel=skymodel,
                suffix='.dppp.sourcedb',
                type='blob'
            )['mapfile']


        # *********************************************************************
        # 3. Average and flag data, using NDPPP.

        ndppp_parset = os.path.join(parset_dir, "NDPPP.parset")
        py_parset.makeSubset('DPPP.').writeFile(ndppp_parset)

        # Run the Default Pre-Processing Pipeline (DPPP);
        with duration(self, "ndppp"):
            output_data_mapfile = self.run_task("ndppp",
                (input_data_mapfile, output_data_mapfile),
                data_start_time=vdsinfo['start_time'],
                data_end_time=vdsinfo['end_time'],
                demix_always=
                    py_parset.getStringVector('PreProcessing.demix_always'),
                demix_if_needed=
                    py_parset.getStringVector('PreProcessing.demix_if_needed'),
                parset=ndppp_parset,
                parmdb_mapfile=parmdb_mapfile,
                sourcedb_mapfile=sourcedb_mapfile
            )['mapfile']

        # *********************************************************************
        # 6. Create feedback file for further processing by the LOFAR framework
        # Create a parset containing the metadata
        metadata_file = "%s_feedback_Correlated" % (self.parset_file,)
        with duration(self, "get_metadata"):
            self.run_task("get_metadata", output_data_mapfile,
                parset_prefix=(
                    self.parset.getString('prefix') +
                    self.parset.fullModuleName('DataProducts')),
                product_type="Correlated",
                metadata_file=metadata_file)

        self.send_feedback_processing(parameterset({'feedback_version': feedback_version}))
        self.send_feedback_dataproducts(parameterset(metadata_file))

        return 0
Exemplo n.º 14
0
    def _get_fov_and_station_diameter(self, measurement_set):
        """
        _field_of_view calculates the fov, which is dependend on the
        station type, location and mode:
        For details see:
        (1) http://www.astron.nl/radio-observatory/astronomers/lofar-imaging-capabilities-sensitivity/lofar-imaging-capabilities/lofar
        
        """
        # Open the ms
        table_ms = pt.table(measurement_set)

        # Get antenna name and observation mode
        antenna = pt.table(table_ms.getkeyword("ANTENNA"))
        antenna_name = antenna.getcell('NAME', 0)
        antenna.close()

        observation = pt.table(table_ms.getkeyword("OBSERVATION"))
        antenna_set = observation.getcell('LOFAR_ANTENNA_SET', 0)
        observation.close()

        # static parameters for the station diameters ref (1)
        hba_core_diameter = 30.8
        hba_remote_diameter = 41.1
        lba_inner = 32.3
        lba_outer = 81.3

        # use measurement set information to assertain antenna diameter
        station_diameter = None
        if antenna_name.count('HBA'):
            if antenna_name.count('CS'):
                station_diameter = hba_core_diameter
            elif antenna_name.count('RS'):
                station_diameter = hba_remote_diameter
        elif antenna_name.count('LBA'):
            if antenna_set.count('INNER'):
                station_diameter = lba_inner
            elif antenna_set.count('OUTER'):
                station_diameter = lba_outer

        # raise exception if the antenna is not of a supported type
        if station_diameter == None:
            self.logger.error(
                    'Unknown antenna type for antenna: {0} , {1}'.format(\
                              antenna_name, antenna_set))
            raise PipelineException(
                    "Unknown antenna type encountered in Measurement set")

        # Get the wavelength
        spectral_window_table = pt.table(table_ms.getkeyword(
                                                            "SPECTRAL_WINDOW"))
        freq = float(spectral_window_table.getcell("REF_FREQUENCY", 0))
        wave_length = pt.taql('CALC C()') / freq
        spectral_window_table.close()

        # Now calculate the FOV see ref (1)
        # alpha_one is a magic parameter: The value 1.3 is representative for a
        # WSRT dish, where it depends on the dish illumination
        alpha_one = 1.3

        # alpha_one is in radians so transform to degrees for output
        fwhm = alpha_one * (wave_length / station_diameter) * (180 / math.pi)
        fov = fwhm / 2.0
        table_ms.close()

        return fov, station_diameter
Exemplo n.º 15
0
    def _get_imaging_parameters(self, measurement_set, parset,
                autogenerate_parameters, specify_fov, fov):
        """
        (1) calculate and format some parameters that are determined runtime.
        Based  on values in the measurementset and input parameter (set):
        
        a. <string> The cellsize
        b. <int> The npixels in a each of the two dimension of the image
        c. <string> The largest baseline in the ms smaller then the maxbaseline
        d. <string> The number of projection planes
        
        The calculation of these parameters is done in three steps:
        
        1. Calculate intermediate results based on the ms. 
        2. The calculation of the actual target values using intermediate
           result       
        """
        # *********************************************************************
        # 1. Get partial solutions from the parameter set
        # Get the parset and a number of raw parameters from this parset
        parset_object = get_parset(parset)
        baseline_limit = parset_object.getInt('maxbaseline')

        # Get the longest baseline
        max_baseline = pt.taql(
                        'CALC sqrt(max([select sumsqr(UVW[:2]) from ' + \
            '{0} where sumsqr(UVW[:2]) <{1} giving as memory]))'.format(\
            measurement_set, baseline_limit *
            baseline_limit))[0]  # ask ger van diepen for details if ness.
        # Calculate the wave_length
        table_ms = pt.table(measurement_set)
        table_spectral_window = pt.table(
                                        table_ms.getkeyword("SPECTRAL_WINDOW"))
        freq = table_spectral_window.getcell("REF_FREQUENCY", 0)

        table_spectral_window.close()
        wave_length = pt.taql('CALC C()') / freq
        wave_length = wave_length[0]

        # Calculate the cell_size from the ms
        arc_sec_in_degree = 3600
        arc_sec_in_rad = (180.0 / math.pi) * arc_sec_in_degree
        cell_size = (1.0 / 3) * (wave_length / float(max_baseline))\
             * arc_sec_in_rad

        # Calculate the number of pixels in x and y dim
        #    fov and diameter depending on the antenna name
        fov_from_ms, station_diameter = self._get_fov_and_station_diameter(
                                                            measurement_set)

        # use fov for to calculate a semi 'user' specified npix and cellsize
        # The npix thus depends on the ms cellsize and fov
        # Do not use use supplied Fov if autogenerating
        if not autogenerate_parameters and specify_fov:
            if fov == 0.0:
                raise PipelineException("fov set to 0.0: invalid value.")

        # else use full resolution (calculate the fov)
        else:
            self.logger.info("Using fov calculated on measurement data: " +
                             str(fov_from_ms))
            fov = fov_from_ms

        # ********************************************************************
        # 2. Calculate the ms based output variables
        # 'optimal' npix based on measurement set calculations or user specified
        npix = (arc_sec_in_degree * fov) / cell_size

        # Get the closest power of two larger then the calculated pixel size
        npix = self._nearest_ceiled_power2(npix)

        # Get the max w with baseline < 10000
        w_max = pt.taql('CALC max([select UVW[2] from ' + \
            '{0} where sumsqr(UVW[:2]) <{1} giving as memory])'.format(
            measurement_set, baseline_limit * baseline_limit))[0]

        # Calculate number of projection planes
        w_proj_planes = min(257, math.floor((max_baseline * wave_length) /
                                             (station_diameter ** 2)))
        w_proj_planes = int(round(w_proj_planes))

        # MAximum number of proj planes set to 1024: George Heald, Ger van
        # Diepen if this exception occurs
        maxsupport = max(1024, npix)
        if w_proj_planes > maxsupport:
            raise Exception("The number of projections planes for the current"
                            + "measurement set is to large.")

        # *********************************************************************
        # 3. if the npix from the parset is different to the ms calculations,
        # calculate a sizeconverter value  (to be applied to the cellsize)
        if npix < 256:
            self.logger.warn("Using a image size smaller then 256x256:"
            " This leads to problematic imaging in some instances!!")

        # If we are not autocalculating based on ms or fov, use the npix
        # and cell_size specified in the parset
        # keep the wmax and w_proj_planes
        if (not autogenerate_parameters and not specify_fov):
            npix = parset_object.getString('npix')
            cell_size_formatted = parset_object.getString('cellsize')
        else:
            cell_size_formatted = str(
                        int(round(cell_size))) + 'arcsec'

        self.logger.info("Using the following awimager parameters:"
            " cell_size: {0}, npix: {1},".format(
                        cell_size_formatted, npix) +
             " w_max: {0}, w_proj_planes: {1}".format(w_max, w_proj_planes))

        return cell_size_formatted, str(npix), str(w_max), str(w_proj_planes)
Exemplo n.º 16
0
    def pipeline_logic(self):
        """
        Define the individual tasks that comprise the current pipeline.
        This method will be invoked by the base-class's `go()` method.
        """
        # *********************************************************************
        # 1. Get input from parset, validate and cast to pipeline 'data types'
        #    Only perform work on existing files
        #    Created needed directories
        # Create a parameter-subset containing only python-control stuff.
        py_parset = self.parset.makeSubset(
            self.parset.fullModuleName('PythonControl') + '.')

        # Get input/output-data products specifications.
        self._get_io_product_specs()

        job_dir = self.config.get("layout", "job_directory")
        parset_dir = os.path.join(job_dir, "parsets")
        mapfile_dir = os.path.join(job_dir, "mapfiles")

        # Create directories for temporary parset- and map files
        create_directory(parset_dir)
        create_directory(mapfile_dir)

        # Write input- and output data map-files
        input_correlated_mapfile = os.path.join(mapfile_dir,
                                                "input_correlated.mapfile")
        output_correlated_mapfile = os.path.join(mapfile_dir,
                                                 "output_correlated.mapfile")
        output_instrument_mapfile = os.path.join(mapfile_dir,
                                                 "output_instrument.mapfile")
        self.input_data['correlated'].save(input_correlated_mapfile)
        self.output_data['correlated'].save(output_correlated_mapfile)
        self.output_data['instrument'].save(output_instrument_mapfile)

        if len(self.input_data['correlated']) == 0:
            self.logger.warn("No input data files to process. Bailing out!")
            return 0

        self.logger.debug(
            "Processing: %s" %
            ', '.join(str(f) for f in self.input_data['correlated']))

        # *********************************************************************
        # 2. Create database needed for performing work:
        #    Vds, descibing data on the nodes
        #    sourcedb, For skymodel (A-team)
        #    parmdb for outputtting solutions
        # Produce a GVDS file describing the data on the compute nodes.
        with duration(self, "vdsmaker"):
            gvds_file = self.run_task("vdsmaker",
                                      input_correlated_mapfile)['gvds']

        # Read metadata (start, end times, pointing direction) from GVDS.
        with duration(self, "vdsreader"):
            vdsinfo = self.run_task("vdsreader", gvds=gvds_file)

        # Create an empty parmdb for DPPP
        with duration(self, "setupparmdb"):
            parmdb_mapfile = self.run_task("setupparmdb",
                                           input_correlated_mapfile,
                                           mapfile=os.path.join(
                                               mapfile_dir,
                                               'dppp.parmdb.mapfile'),
                                           suffix='.dppp.parmdb')['mapfile']

        # Create a sourcedb to be used by the demixing phase of DPPP
        # The user-supplied sky model can either be a name, in which case the
        # pipeline will search for a file <name>.skymodel in the default search
        # path $LOFARROOT/share/pipeline/skymodels; or a full path.
        # It is an error if the file does not exist.
        skymodel = py_parset.getString('PreProcessing.SkyModel')
        if not os.path.isabs(skymodel):
            skymodel = os.path.join(
                # This should really become os.environ['LOFARROOT']
                self.config.get('DEFAULT', 'lofarroot'),
                'share',
                'pipeline',
                'skymodels',
                skymodel + '.skymodel')
        if not os.path.isfile(skymodel):
            raise PipelineException("Skymodel %s does not exist" % skymodel)
        with duration(self, "setupsourcedb"):
            sourcedb_mapfile = self.run_task("setupsourcedb",
                                             input_correlated_mapfile,
                                             mapfile=os.path.join(
                                                 mapfile_dir,
                                                 'dppp.sourcedb.mapfile'),
                                             skymodel=skymodel,
                                             suffix='.dppp.sourcedb',
                                             type='blob')['mapfile']

        # *********************************************************************
        # 3. Run NDPPP to demix the A-Team sources
        #    TODOW: Do flagging?
        # Create a parameter-subset for DPPP and write it to file.
        ndppp_parset = os.path.join(parset_dir, "NDPPP.parset")
        py_parset.makeSubset('DPPP.').writeFile(ndppp_parset)

        # Run the Default Pre-Processing Pipeline (DPPP);
        with duration(self, "ndppp"):
            dppp_mapfile = self.run_task(
                "ndppp",
                input_correlated_mapfile,
                data_start_time=vdsinfo['start_time'],
                data_end_time=vdsinfo['end_time'],
                demix_always=py_parset.getStringVector(
                    'PreProcessing.demix_always'),
                demix_if_needed=py_parset.getStringVector(
                    'PreProcessing.demix_if_needed'),
                parset=ndppp_parset,
                parmdb_mapfile=parmdb_mapfile,
                sourcedb_mapfile=sourcedb_mapfile)['mapfile']

        # *********************************************************************
        # 4. Run BBS with a model of the calibrator
        #    Create a parmdb for calibration solutions
        #    Create sourcedb with known calibration solutions
        #    Run bbs with both
        # Create an empty parmdb for BBS
        with duration(self, "setupparmdb"):
            parmdb_mapfile = self.run_task("setupparmdb",
                                           dppp_mapfile,
                                           mapfile=os.path.join(
                                               mapfile_dir,
                                               'bbs.parmdb.mapfile'),
                                           suffix='.bbs.parmdb')['mapfile']

        # Create a sourcedb based on sourcedb's input argument "skymodel"
        with duration(self, "setupsourcedb"):
            sourcedb_mapfile = self.run_task(
                "setupsourcedb",
                input_correlated_mapfile,
                skymodel=os.path.join(
                    self.config.get('DEFAULT', 'lofarroot'), 'share',
                    'pipeline', 'skymodels',
                    py_parset.getString('Calibration.SkyModel') + '.skymodel'),
                mapfile=os.path.join(mapfile_dir, 'bbs.sourcedb.mapfile'),
                suffix='.bbs.sourcedb')['mapfile']

        # Create a parameter-subset for BBS and write it to file.
        bbs_parset = os.path.join(parset_dir, "BBS.parset")
        py_parset.makeSubset('BBS.').writeFile(bbs_parset)

        # Run BBS to calibrate the calibrator source(s).
        with duration(self, "bbs_reducer"):
            bbs_mapfile = self.run_task(
                "bbs_reducer",
                dppp_mapfile,
                parset=bbs_parset,
                instrument_mapfile=parmdb_mapfile,
                sky_mapfile=sourcedb_mapfile)['data_mapfile']

        # *********************************************************************
        # 5. Perform gain outlier correction on the found calibration solutions
        #    Swapping outliers in the gains with the median
        # Export the calibration solutions using gainoutliercorrection and store
        # the results in the files specified in the instrument mapfile.
        export_instrument_model = py_parset.getBool(
            'Calibration.exportCalibrationParameters', False)

        with duration(self, "gainoutliercorrection"):
            self.run_task("gainoutliercorrection",
                          (parmdb_mapfile, output_instrument_mapfile),
                          sigma=1.0,
                          export_instrument_model=export_instrument_model
                          )  # TODO: Parset parameter

        # *********************************************************************
        # 6. Copy corrected MS's to their final output destination.
        with duration(self, "copier"):
            self.run_task("copier",
                          mapfile_source=bbs_mapfile,
                          mapfile_target=output_correlated_mapfile,
                          mapfiles_dir=mapfile_dir,
                          mapfile=output_correlated_mapfile)

        # *********************************************************************
        # 7. Create feedback file for further processing by the LOFAR framework
        #    a. get metadata of the measurement sets
        #    b. get metadata of the instrument models
        #    c. join the two files and write the final feedback file
        correlated_metadata = os.path.join(parset_dir, "correlated.metadata")
        instrument_metadata = os.path.join(parset_dir, "instrument.metadata")
        with duration(self, "get_metadata"):
            self.run_task(
                "get_metadata",
                output_correlated_mapfile,
                parset_file=correlated_metadata,
                parset_prefix=(self.parset.getString('prefix') +
                               self.parset.fullModuleName('DataProducts')),
                product_type="Correlated")

        with duration(self, "get_metadata"):
            self.run_task(
                "get_metadata",
                output_instrument_mapfile,
                parset_file=instrument_metadata,
                parset_prefix=(self.parset.getString('prefix') +
                               self.parset.fullModuleName('DataProducts')),
                product_type="InstrumentModel")

        parset = parameterset(correlated_metadata)
        parset.adoptFile(instrument_metadata)
        parset.writeFile(self.parset_feedback_file)

        return 0
Exemplo n.º 17
0
    def go(self):
        """
        This is where the work of the recipe gets done.
        Subclasses should define their own go() method, but remember to call
        this one to perform necessary initialisation.
        """
        # Every recipe needs a job identifier
        if "job_name" not in self.inputs:
            raise PipelineException("Job undefined")

        if "start_time" not in self.inputs:
            import datetime
            self.inputs["start_time"] = datetime.datetime.utcnow().replace(microsecond=0).isoformat()

        # Config is passed in from spawning recipe. But if this is the start
        # of a pipeline, it won't have one.
        if not hasattr(self, "config"):
            self.config = self._read_config()

        # Ensure we have a runtime directory
        if 'runtime_directory' not in self.inputs:
            self.inputs["runtime_directory"] = self.config.get(
                "DEFAULT", "runtime_directory"
            )
        else:
            self.config.set('DEFAULT', 'runtime_directory', self.inputs['runtime_directory'])
        if not os.access(self.inputs['runtime_directory'], os.F_OK):
            raise IOError("Runtime directory doesn't exist")

        # ...and task files, if applicable
        if "task_files" not in self.inputs:
            try:
                self.inputs["task_files"] = utilities.string_to_list(
                    self.config.get('DEFAULT', "task_files")
                )
            except NoOptionError:
                self.inputs["task_files"] = []
        self.task_definitions = ConfigParser(self.config.defaults())
        print("Reading task definition file(s): %s" % \
                             ",".join(self.inputs["task_files"]), file=sys.stderr)
        self.task_definitions.read(self.inputs["task_files"])

        # Specify the working directory on the compute nodes
        if 'working_directory' not in self.inputs:
            self.inputs['working_directory'] = self.config.get(
                "DEFAULT", "working_directory"
            )
        else:
            self.config.set("DEFAULT", "working_directory", self.inputs['working_directory'])

        try:
            self.recipe_path = [
                os.path.join(root, 'master') for root in utilities.string_to_list(
                    self.config.get('DEFAULT', "recipe_directories")
                )
            ]
        except NoOptionError:
            self.recipe_path = []


        # At this point, the recipe inputs must be complete. If not, exit.
        if not self.inputs.complete():
            raise PipelineException(
                "Required inputs not available: %s" %
                " ".join(self.inputs.missing())
            )

        # Only configure handlers if our parent is the root logger.
        # Otherwise, our parent should have done it for us.
        if isinstance(self.logger.parent, logging.RootLogger):
            self._setup_logging()

        self.logger.debug("Pipeline start time: %s" % self.inputs['start_time'])
    def pipeline_logic(self):
        """
        Define the individual tasks that comprise the current pipeline.
        This method will be invoked by the base-class's `go()` method.
        """
        # *********************************************************************
        # 1. Prepare phase, collect data from parset and input mapfiles.
        py_parset = self.parset.makeSubset(
            self.parset.fullModuleName('PythonControl') + '.')

        # Get input/output-data products specifications.
        self._get_io_product_specs()

        job_dir = self.config.get("layout", "job_directory")
        parset_dir = os.path.join(job_dir, "parsets")
        mapfile_dir = os.path.join(job_dir, "mapfiles")

        # Create directories for temporary parset- and map files
        create_directory(parset_dir)
        create_directory(mapfile_dir)

        # Write input- and output data map-files
        input_correlated_mapfile = os.path.join(
            mapfile_dir, "input_correlated.mapfile"
        )
        output_correlated_mapfile = os.path.join(
            mapfile_dir, "output_correlated.mapfile"
        )
        output_instrument_mapfile = os.path.join(
            mapfile_dir, "output_instrument.mapfile"
        )
        self.input_data['correlated'].save(input_correlated_mapfile)
        self.output_data['correlated'].save(output_correlated_mapfile)
        self.output_data['instrument'].save(output_instrument_mapfile)

        if len(self.input_data['correlated']) == 0:
            self.logger.warn("No input data files to process. Bailing out!")
            return 0

        self.logger.debug("Processing: %s" %
            ', '.join(str(f) for f in self.input_data['correlated']))

        # *********************************************************************
        # 2. Create VDS-file and databases. The latter are needed when doing
        #    demixing within DPPP.
        with duration(self, "vdsmaker"):
            gvds_file = self.run_task(
                "vdsmaker", input_correlated_mapfile
            )['gvds']

        # Read metadata (start, end times, pointing direction) from GVDS.
        with duration(self, "vdsreader"):
            vdsinfo = self.run_task("vdsreader", gvds=gvds_file)

        # Create a parameter database that will be used by the NDPPP demixing
        with duration(self, "setupparmdb"):
            parmdb_mapfile = self.run_task(
                "setupparmdb", input_correlated_mapfile,
                mapfile=os.path.join(mapfile_dir, 'dppp.parmdb.mapfile'),
                suffix='.dppp.parmdb'
            )['mapfile']
                
        # Create a source database from a user-supplied sky model
        # The user-supplied sky model can either be a name, in which case the
        # pipeline will search for a file <name>.skymodel in the default search
        # path $LOFARROOT/share/pipeline/skymodels; or a full path.
        # It is an error if the file does not exist.
        skymodel = py_parset.getString('PreProcessing.SkyModel')
        if not os.path.isabs(skymodel):
            skymodel = os.path.join(
                # This should really become os.environ['LOFARROOT']
                self.config.get('DEFAULT', 'lofarroot'),
                'share', 'pipeline', 'skymodels', skymodel + '.skymodel'
            )
        if not os.path.isfile(skymodel):
            raise PipelineException("Skymodel %s does not exist" % skymodel)
        with duration(self, "setupsourcedb"):
            sourcedb_mapfile = self.run_task(
                "setupsourcedb", input_correlated_mapfile,
                mapfile=os.path.join(mapfile_dir, 'dppp.sourcedb.mapfile'),
                skymodel=skymodel,
                suffix='.dppp.sourcedb',
                type='blob'
            )['mapfile']

        # *********************************************************************
        # 3. Average and flag data, using NDPPP.
        ndppp_parset = os.path.join(parset_dir, "NDPPP.parset")
        py_parset.makeSubset('DPPP.').writeFile(ndppp_parset)

        # Run the Default Pre-Processing Pipeline (DPPP);
        with duration(self, "ndppp"):
            dppp_mapfile = self.run_task(
                "ndppp", input_correlated_mapfile,
                data_start_time=vdsinfo['start_time'],
                data_end_time=vdsinfo['end_time'],
                demix_always=
                    py_parset.getStringVector('PreProcessing.demix_always'),
                demix_if_needed=
                    py_parset.getStringVector('PreProcessing.demix_if_needed'),
                parset=ndppp_parset,
                parmdb_mapfile=parmdb_mapfile,
                sourcedb_mapfile=sourcedb_mapfile
            )['mapfile']

        # *********************************************************************
        # 4. Create a sourcedb from the user-supplied sky model, 
        #    and an empty parmdb.
        skymodel = py_parset.getString('Calibration.SkyModel')

        # The user-supplied sky model can either be a name, in which case the
        # pipeline will search for a file <name>.skymodel in the default search
        # path $LOFARROOT/share/pipeline/skymodels; or a full path.
        # It is an error if the file does not exist.
        if not os.path.isabs(skymodel):
            skymodel = os.path.join(
                # This should really become os.environ['LOFARROOT']
                self.config.get('DEFAULT', 'lofarroot'),
                'share', 'pipeline', 'skymodels', skymodel + '.skymodel'
            )
        if not os.path.isfile(skymodel):
            raise PipelineException("Skymodel %s does not exist" % skymodel)
        with duration(self, "setupsourcedb"):
            sourcedb_mapfile = self.run_task(
                "setupsourcedb", dppp_mapfile,
                skymodel=skymodel,
                suffix='.bbs.sourcedb'
            )['mapfile']

        with duration(self, "setupparmdb"):
            parmdb_mapfile = self.run_task(
                "setupparmdb", dppp_mapfile,
                suffix='.bbs.parmdb'
            )['mapfile']

        # *********************************************************************
        # 5. Run BBS to calibrate the data.

        # Create a parameter subset for BBS
        bbs_parset = os.path.join(parset_dir, "BBS.parset")
        py_parset.makeSubset('BBS.').writeFile(bbs_parset)
        with duration(self, "bbs_reducer"):
            bbs_mapfile = self.run_task(
                "bbs_reducer", dppp_mapfile,
                parset=bbs_parset,
                instrument_mapfile=parmdb_mapfile,
                sky_mapfile=sourcedb_mapfile
            )['data_mapfile']

        # *********************************************************************
        # 6. Copy output products to their final destination.
        #    a. copy the measurement sets
        #    b. copy the calculated instrument models
        #  When the copier recipe has run, the map-files named in
        #  output_correlated_mapfile and output_instrument_mapfile will
        #  contain an updated map of output files.
        with duration(self, "copier"):
            self.run_task("copier",
                mapfile_source=bbs_mapfile,
                mapfile_target=output_correlated_mapfile,
                mapfiles_dir=mapfile_dir,
                mapfile=output_correlated_mapfile
            )

        with duration(self, "copier"):
            self.run_task("copier",
                mapfile_source=parmdb_mapfile,
                mapfile_target=output_instrument_mapfile,
                mapfiles_dir=mapfile_dir,
                mapfile=output_instrument_mapfile
            )

        # *********************************************************************
        # 7. Create feedback file for further processing by the LOFAR framework
        #    a. get metadata of the measurement sets
        #    b. get metadata of the instrument models
        #    c. join the two files and write the final feedback file
        correlated_metadata = os.path.join(parset_dir, "correlated.metadata")
        instrument_metadata = os.path.join(parset_dir, "instrument.metadata")
        with duration(self, "get_metadata"):
            self.run_task("get_metadata", output_correlated_mapfile,
                parset_file=correlated_metadata,
                parset_prefix=(
                    self.parset.getString('prefix') +
                    self.parset.fullModuleName('DataProducts')),
                product_type="Correlated")

        with duration(self, "get_metadata"):
            self.run_task("get_metadata", output_instrument_mapfile,
                parset_file=instrument_metadata,
                parset_prefix=(
                    self.parset.getString('prefix') +
                    self.parset.fullModuleName('DataProducts')),
                product_type="InstrumentModel")

        parset = parameterset(correlated_metadata)
        parset.adoptFile(instrument_metadata)
        parset.writeFile(self.parset_feedback_file)

        return 0