Esempio n. 1
0
    def run(self, executable, nthreads, strategy, indirect, skip_flagged, wd, *infiles):
        with log_time(self.logger):
            self.logger.info("Processing %s" % " ".join(infiles))

            try:
                if not os.access(executable, os.X_OK):
                    raise ExecutableMissing(executable)

                working_dir = tempfile.mkdtemp(dir=wd,suffix=".%s" % (os.path.basename(__file__),))
                cmd = [executable, "-j", str(nthreads)]
                if strategy:
                    if os.path.exists(strategy):
                        cmd.extend(["-strategy", strategy])
                    else:
                        raise Exception("Strategy definition not available")
                if indirect:
                    cmd.extend(["-indirect-read"])
                if skip_flagged:
                    cmd.extend(["-skip-flagged"])
                cmd.extend(infiles)
                with CatchLog4CPlus(
                    working_dir,
                    self.logger.name,
                    os.path.basename(executable)
                ) as logger:
                    catch_segfaults(cmd, working_dir, None, logger)
            except ExecutableMissing, e:
                self.logger.error("%s not found" % (e.args[0]))
                return 1
            except CalledProcessError, e:
                self.logger.error(str(e))
                return 1
Esempio n. 2
0
    def run(self, executable, file_pattern, input_dir, output_file, clobber):
        if not os.path.exists(input_dir):
            self.logger.error("Input directory (%s) not found" % input_dir)
            return 1

        self.logger.info("Processing %s" % input_dir)

        if os.path.exists(output_file):
            if clobber:
                self.logger.warn(
                    "Deleting previous version of results: %s" % output_file
                )
                os.unlink(output_file)
            else:
                self.logger.error(
                    "Refusing to overwrite existing file %s" % output_file
                )
                return 1

        input_files = glob.glob(os.path.join(input_dir, file_pattern))

        command_line = [executable] + input_files + [output_file]
        try:
            catch_segfaults(command_line, None, None, self.logger)
        except Exception, e:
            self.logger.error(str(e))
            return 1
Esempio n. 3
0
    def _create_mask(self, npix, cell_size, output_image,
                     concatenated_measurement_set, executable,
                     working_directory, log4_cplus_name, sourcedb_path,
                     mask_patch_size, image_path_directory):
        """
        (3) create a casa image containing an mask blocking out the
        sources in the provided sourcedb.
        
        It expects:
        
        a. the ms for which the mask will be created, it is used to de
           termine some image details: (eg. pointing)
        b. parameters for running within the catchsegfault framework
        c. and the size of the mask_pach.
           To create a mask, first a empty measurement set is created using
           awimager: ready to be filled with mask data 
           
        This function is a wrapper around some functionality written by:
        [email protected]
        
        steps: 
        1. Create a parset with image paramters used by:
        2. awimager run. Creating an empty casa image.
        3. Fill the casa image with mask data
           
        """
        # ********************************************************************
        # 1. Create the parset used to make a mask
        mask_file_path = output_image + ".mask"

        mask_patch_dictionary = {
            "npix": str(npix),
            "cellsize": str(cell_size),
            "image": str(mask_file_path),
            "ms": str(concatenated_measurement_set),
            "operation": "empty",
            "stokes": "'I'"
        }
        mask_parset = Parset.fromDict(mask_patch_dictionary)
        mask_parset_path = os.path.join(image_path_directory, "mask.par")
        mask_parset.writeFile(mask_parset_path)
        self.logger.debug(
            "Write parset for awimager mask creation: {0}".format(
                mask_parset_path))

        # *********************************************************************
        # 2. Create an empty mask using awimager
        cmd = [executable, mask_parset_path]
        self.logger.info(" ".join(cmd))
        try:
            with CatchLog4CPlus(
                    working_directory,
                    self.logger.name + "." + os.path.basename(log4_cplus_name),
                    os.path.basename(executable)) as logger:
                catch_segfaults(cmd, working_directory, self.environment,
                                logger)
        # Thrown by catch_segfault
        except CalledProcessError, exception:
            self.logger.error(str(exception))
            return 1
Esempio n. 4
0
    def _create_mask(self, npix, cell_size, output_image,
                     concatenated_measurement_set, executable,
                     working_directory, log4_cplus_name, sourcedb_path,
                     mask_patch_size, image_path_directory):
        """
        (3) create a casa image containing an mask blocking out the
        sources in the provided sourcedb.
        
        It expects:
        
        a. the ms for which the mask will be created, it is used to de
           termine some image details: (eg. pointing)
        b. parameters for running within the catchsegfault framework
        c. and the size of the mask_pach.
           To create a mask, first a empty measurement set is created using
           awimager: ready to be filled with mask data 
           
        This function is a wrapper around some functionality written by:
        [email protected]
        
        steps: 
        1. Create a parset with image paramters used by:
        2. awimager run. Creating an empty casa image.
        3. Fill the casa image with mask data
           
        """
        # ********************************************************************
        # 1. Create the parset used to make a mask
        mask_file_path = output_image + ".mask"

        mask_patch_dictionary = {"npix": str(npix),
                                 "cellsize": str(cell_size),
                                 "image": str(mask_file_path),
                                 "ms": str(concatenated_measurement_set),
                                 "operation": "empty",
                                 "stokes": "'I'"
                                 }
        mask_parset = Parset.fromDict(mask_patch_dictionary)
        mask_parset_path = os.path.join(image_path_directory, "mask.par")
        mask_parset.writeFile(mask_parset_path)
        self.logger.debug(
                "Write parset for awimager mask creation: {0}".format(
                                                      mask_parset_path))

        # *********************************************************************
        # 2. Create an empty mask using awimager
        cmd = [executable, mask_parset_path]
        self.logger.info(" ".join(cmd))
        try:
            with CatchLog4CPlus(working_directory,
                    self.logger.name + "." + os.path.basename(log4_cplus_name),
                    os.path.basename(executable)
            ) as logger:
                catch_segfaults(cmd, working_directory, self.environment,
                                        logger)
        # Thrown by catch_segfault
        except CalledProcessError, exception:
            self.logger.error(str(exception))
            return 1
    def run(self, infile, outfile, executable, environment, sigma,
            use_parmexportcal):
        self.environment.update(environment)
        if os.path.exists(infile):
            self.logger.info("Processing {0}".format(infile))
        else:
            self.logger.error(
                "Instrument model file %s does not exist" % infile
                )
            return 1

        # Create output directory (if it doesn't already exist)
        create_directory(os.path.dirname(outfile))

        # Remove the target outfile if there: parexportcall fail otherwise
        if os.path.exists(outfile):
            shutil.rmtree(outfile)

        # ********************************************************************
        # 1. Select correction method
        if not use_parmexportcal:
            # ****************************************************************
            # 3. use gainoutliercorrect from Swinbank
            self.logger.info(
                "Using the gainoutlier correction based on editparmdb")
            self._filter_stations_parmdb(infile, outfile, sigma)
            return 0

        # else:
        if not os.access(executable, os.X_OK):
            self.logger.error(
                "Could not find parmexport call executable at: {0}".format(
                                    executable))
            self.logger.error("bailing out!")
            return 1

        # ********************************************************************
        # 2. Call parmexportcal for gain correction
        self.logger.info(
            "Using the gainoutlier correction based on parmexportcal")
        try:
            temp_dir = tempfile.mkdtemp()
            with CatchLog4CPlus(
                temp_dir,
                self.logger.name + '.' + os.path.basename(infile),
                os.path.basename(executable)
            ) as logger:
                cmd = [executable, '-in', infile, '-out', outfile]
                self.logger.debug(
                    "Parmexportcal call: {0} ".format(" ".join(cmd)))
                catch_segfaults(
                    cmd,
                    temp_dir,
                    self.environment,
                    logger
                )
        except Exception, excp:
            self.logger.error(str(excp))
            return 1
Esempio n. 6
0
    def run(self, infile, outfile, executable, environment, sigma,
            use_parmexportcal):
        self.environment.update(environment)
        if os.path.exists(infile):
            self.logger.info("Processing {0}".format(infile))
        else:
            self.logger.error(
                "Instrument model file %s does not exist" % infile
                )
            return 1

        # Create output directory (if it doesn't already exist)
        create_directory(os.path.dirname(outfile))

        # Remove the target outfile if there: parexportcall fail otherwise
        if os.path.exists(outfile):
            shutil.rmtree(outfile)

        # ********************************************************************
        # 1. Select correction method
        if not use_parmexportcal:
            # ****************************************************************
            # 3. use gainoutliercorrect from Swinbank
            self.logger.info(
                "Using the gainoutlier correction based on editparmdb")
            self._filter_stations_parmdb(infile, outfile, sigma)
            return 0

        # else:
        if not os.access(executable, os.X_OK):
            self.logger.error(
                "Could not find parmexport call executable at: {0}".format(
                                    executable))
            self.logger.error("bailing out!")
            return 1

        # ********************************************************************
        # 2. Call parmexportcal for gain correction
        self.logger.info(
            "Using the gainoutlier correction based on parmexportcal")
        try:
            temp_dir = tempfile.mkdtemp(suffix=".%s" % (os.path.basename(__file__),))
            with CatchLog4CPlus(
                temp_dir,
                self.logger.name + '.' + os.path.basename(infile),
                os.path.basename(executable)
            ) as logger:
                cmd = [executable, '-in', infile, '-out', outfile]
                self.logger.debug(
                    "Parmexportcal call: {0} ".format(" ".join(cmd)))
                catch_segfaults(
                    cmd,
                    temp_dir,
                    self.environment,
                    logger
                )
        except Exception, excp:
            self.logger.error(str(excp))
            return 1
Esempio n. 7
0
 def _execute(self, cmd):
     try:
         temp_dir = tempfile.mkdtemp()
         with CatchLog4CPlus(temp_dir, self.logger.name,
                             os.path.basename(cmd[0])) as logger:
             catch_segfaults(cmd, temp_dir, self.environment, self.logger)
     except Exception, e:
         self.logger.error(str(e))
         return False
Esempio n. 8
0
 def _dppp_call(self, working_dir, ndppp, cmd, environment):
     """
     Muckable function running the dppp executable.
     Wraps dppp with catchLog4CPLus and catch_segfaults
     """
     with CatchLog4CPlus(working_dir, self.logger.name +
          "." + os.path.basename("long_baseline_ndppp"),
               os.path.basename(ndppp)) as logger:
         catch_segfaults(cmd, working_dir, environment,
                               logger, cleanup = None)
Esempio n. 9
0
 def _execute(self, cmd):
     try:
         temp_dir = tempfile.mkdtemp()
         with CatchLog4CPlus(temp_dir,
                             self.logger.name,
                             os.path.basename(cmd[0])
         ) as logger:
             catch_segfaults(cmd, temp_dir, self.environment, self.logger)
     except Exception, e:
         self.logger.error(str(e))
         return False
Esempio n. 10
0
 def _dppp_call(self, working_dir, ndppp, cmd, environment):
     """
     Muckable function running the dppp executable.
     Wraps dppp with catchLog4CPLus and catch_segfaults
     """
     # TODO: cpu limited is static at this location
     environment['OMP_NUM_THREADS'] = str(8)
     self.logger.debug("Using %s threads for ndppp" % 8)
     with CatchLog4CPlus(working_dir, self.logger.name +
          "." + os.path.basename("imager_prepare_ndppp"),
               os.path.basename(ndppp)) as logger:
         catch_segfaults(cmd, working_dir, environment,
                logger, cleanup = None, usageStats=self.resourceMonitor)
Esempio n. 11
0
 def _execute(self, cmd):
     try:
         temp_dir = tempfile.mkdtemp(suffix=".%s" % (os.path.basename(__file__),))
         with CatchLog4CPlus(temp_dir,
                             self.logger.name,
                             os.path.basename(cmd[0])
         ) as logger:
             catch_segfaults(cmd, temp_dir, self.environment, self.logger)
     except Exception as e:
         self.logger.error(str(e))
         return False
     finally:
         shutil.rmtree(temp_dir)
     return True
Esempio n. 12
0
 def _dppp_call(self, working_dir, ndppp, cmd, environment):
     """
     Muckable function running the dppp executable.
     Wraps dppp with catchLog4CPLus and catch_segfaults
     """
     with CatchLog4CPlus(
             working_dir, self.logger.name + "." +
             os.path.basename("imager_prepare_ndppp"),
             os.path.basename(ndppp)) as logger:
         catch_segfaults(cmd,
                         working_dir,
                         environment,
                         logger,
                         cleanup=None)
Esempio n. 13
0
    def _create_source_db(self,
                          source_list,
                          sourcedb_target_path,
                          working_directory,
                          create_sourcdb_exec,
                          append=False):
        """
        Convert a sourcelist to a sourcedb:
        
        1. Remove existing sourcedb if not appending (sourcedb fails else)
        2. Call the sourcedb executable with the supplied parameters
         
        """
        # *********************************************************************
        # 1. remove existing sourcedb if not appending
        if (append == False) and os.path.isdir(sourcedb_target_path):
            shutil.rmtree(sourcedb_target_path)
            self.logger.debug(
                "Removed existing sky model: {0}".format(sourcedb_target_path))

        # *********************************************************************
        # 2. The command and parameters to be run
        cmd = [
            create_sourcdb_exec,
            "in={0}".format(source_list),
            "out={0}".format(sourcedb_target_path),
            "format=<",  # format according to Ger van Diepen
            "append=true"
        ]  # Always set append: no effect on non exist db
        self.logger.info(' '.join(cmd))

        try:
            with CatchLog4CPlus(
                    working_directory,
                    self.logger.name + "." + os.path.basename("makesourcedb"),
                    os.path.basename(create_sourcdb_exec)) as logger:
                catch_segfaults(cmd,
                                working_directory,
                                self.environment,
                                logger,
                                cleanup=None)

        except Exception as exception:
            self.logger.error("Execution of external failed:")
            self.logger.error(" ".join(cmd))
            self.logger.error("exception details:")
            self.logger.error(str(exception))
            return 1

        return 0
Esempio n. 14
0
    def _create_source_db(self,
                          source_list,
                          sourcedb_target_path,
                          working_directory,
                          executable,
                          append=False):
        """
        _create_source_db consumes a sourcelist text file and produces a 
        source db (pyraptable).
        If the append parameter is set to true. It expects an already existing
        sourcedb on the supplied path and will then append the sources in
        the list. typically used multiple iterations of the imaging pipeline, 
        with self calibration 
        """
        #remove existing sourcedb if not appending
        if (append == False) and os.path.isdir(sourcedb_target_path):
            shutil.rmtree(sourcedb_target_path)
            self.logger.debug(
                "Removed existing sky model: {0}".format(sourcedb_target_path))

        # The command and parameters to be run
        cmd = [
            executable,
            "in={0}".format(source_list),
            "out={0}".format(sourcedb_target_path),
            "format=<",  # format according to Ger van Diepen
            "append=true"
        ]  # Always set append flag: no effect on non exist
        # db

        try:
            with CatchLog4CPlus(
                    working_directory,
                    self.logger.name + "." + os.path.basename("makesourcedb"),
                    os.path.basename(executable)) as logger:
                catch_segfaults(cmd,
                                working_directory,
                                self.environment,
                                logger,
                                cleanup=None)

        except subprocess.CalledProcessError as called_proc_error:
            self.logger.error("Execution of external failed:")
            self.logger.error(" ".join(cmd))
            self.logger.error("exception details:")
            self.logger.error(str(called_proc_error))
            return 1

        return 0
Esempio n. 15
0
 def execute(self, executable, args):
     try:
         # ****************************************************************
         # Run
         cmd = [executable] + args
         with CatchLog4CPlus(
                 self.work_dir,
                 self.logger.name + "." + os.path.basename(self.infile),
                 os.path.basename(self.executable),
         ) as logger:
             # Catch segfaults and retry
             catch_segfaults(cmd, self.work_dir, self.environment, logger)
     except CalledProcessError, err:
         # CalledProcessError isn't properly propagated by IPython
         self.logger.error(str(err))
         return 1
Esempio n. 16
0
    def run(self, files, executable, parset, environment):
        """
        Run the bbs-reducer executable. 
        *Arguments*
        - `files`: argument is a tuple of (MS-file, parmdb-file, sourcedb-file)
        - `executable`: full path to the bbs-reducer executable
        - `parset`: full path to the parset-file
        - `environment`: environment variables to use
        """
        self.logger.debug("files       = %s" % str(files))
        self.logger.debug("executable  = %s" % executable)
        self.logger.debug("parset      = %s" % parset)
        self.logger.debug("environment = %s" % environment)

        self.environment.update(environment)
        ms, parmdb, sourcedb = files

        # Time execution of this job
        with log_time(self.logger):
            if os.path.exists(ms):
                self.logger.info("Processing %s" % ms)
            else:
                self.logger.error("Measurement Set %s does not exist" % ms)
                return 1

            # Run bbs-reducer. Catch log output from bbs-reducer and stdout.
            scratch_dir = mkdtemp(suffix=".%s" %
                                  (os.path.basename(__file__), ))
            try:
                cmd = [
                    executable,
                    "--parmdb=%s" % parmdb,
                    "--sourcedb=%s" % sourcedb, ms, parset
                ]
                with CatchLog4CPlus(
                        scratch_dir,
                        self.logger.name + "." + os.path.basename(ms),
                        os.path.basename(executable),
                ) as logger:
                    catch_segfaults(cmd, scratch_dir, self.environment, logger)
            except CalledProcessError as err:
                self.logger.error(str(err))
                return 1
            finally:
                shutil.rmtree(scratch_dir)

        return 0
Esempio n. 17
0
    def run(self, executable, catalogue, skydb, dbtype):
        """
        Contains all functionality
        """
        with log_time(self.logger):
            # ****************************************************************
            # 1. Create output directory if it does not yet exist.
            skydb_dir = os.path.dirname(skydb)
            try:
                os.makedirs(skydb_dir)
                self.logger.debug("Created output directory %s" % skydb_dir)
            except FileExistsError:
                pass

            # ****************************************************************
            # 2 Remove any old sky database
            #   Create the sourcedb
            shutil.rmtree(skydb, ignore_errors=True)

            self.logger.info("Creating skymodel: %s" % (skydb))
            scratch_dir = tempfile.mkdtemp(suffix=".%s" %
                                           (os.path.basename(__file__), ))
            try:
                cmd = [
                    executable,
                    "in=%s" % catalogue,
                    "out=%s" % skydb,
                    "outtype=%s" % dbtype, "format=<", "append=false"
                ]
                with CatchLog4CPlus(
                        scratch_dir,
                        self.logger.name + "." + os.path.basename(skydb),
                        os.path.basename(executable)) as logger:
                    catch_segfaults(cmd, scratch_dir, None, logger)

            # *****************************************************************
            # 3. Validate performance and cleanup temp files
            except CalledProcessError as err:
                # For CalledProcessError isn't properly propagated by IPython
                # Temporary workaround...
                self.logger.error(str(err))
                return 1
            finally:
                shutil.rmtree(scratch_dir)

        return 0
Esempio n. 18
0
    def run(self, executable, nthreads, strategy, indirect, skip_flagged, wd,
            *infiles):
        with log_time(self.logger):
            self.logger.info("Processing %s" % " ".join(infiles))

            try:
                if not os.access(executable, os.X_OK):
                    raise ExecutableMissing(executable)

                working_dir = tempfile.mkdtemp(dir=wd,
                                               suffix=".%s" %
                                               (os.path.basename(__file__), ))
                cmd = [executable, "-j", str(nthreads)]
                if strategy:
                    if os.path.exists(strategy):
                        cmd.extend(["-strategy", strategy])
                    else:
                        raise Exception("Strategy definition not available")
                if indirect:
                    cmd.extend(["-indirect-read"])
                if skip_flagged:
                    cmd.extend(["-skip-flagged"])
                cmd.extend(infiles)
                with CatchLog4CPlus(working_dir, self.logger.name,
                                    os.path.basename(executable)) as logger:
                    catch_segfaults(cmd, working_dir, None, logger)
            except ExecutableMissing as e:
                self.logger.error("%s not found" % (e.args[0]))
                return 1
            except CalledProcessError as e:
                self.logger.error(str(e))
                return 1
            except Exception as e:
                self.logger.exception(e)
                return 1
            finally:
                # Try and clean up the working directory, but don't worry if
                # it fails -- it might not have been greated before throwing
                # the exception
                try:
                    shutil.rmtree(working_dir)
                except:
                    pass

            return 0
Esempio n. 19
0
    def run(self, executable, catalogue, skydb, dbtype):
        """
        Contains all functionality
        """
        with log_time(self.logger):
            # ****************************************************************
            # 1. Create output directory if it does not yet exist.
            skydb_dir = os.path.dirname(skydb)
            try:
                os.makedirs(skydb_dir)
                self.logger.debug("Created output directory %s" % skydb_dir)
            except OSError, err:
                # Ignore error if directory already exists, otherwise re-raise
                if err[0] != errno.EEXIST:
                    raise

            # ****************************************************************
            # 2 Remove any old sky database
            #   Create the sourcedb
            shutil.rmtree(skydb, ignore_errors=True)

            self.logger.info("Creating skymodel: %s" % (skydb))
            scratch_dir = tempfile.mkdtemp()
            try:
                cmd = [executable,
                       "in=%s" % catalogue,
                       "out=%s" % skydb,
                       "outtype=%s" % dbtype,
                       "format=<",
                       "append=false"
                      ]
                with CatchLog4CPlus(
                    scratch_dir,
                    self.logger.name + "." + os.path.basename(skydb),
                    os.path.basename(executable)
                ) as logger:
                    catch_segfaults(cmd, scratch_dir, None, logger)

            # *****************************************************************
            # 3. Validate performance and cleanup temp files
            except CalledProcessError, err:
                # For CalledProcessError isn't properly propagated by IPython
                # Temporary workaround...
                self.logger.error(str(err))
                return 1
Esempio n. 20
0
 def execute(self, executable, args):
     try:
     # ****************************************************************
     # Run
         cmd = [executable] + args
         with CatchLog4CPlus(
             self.work_dir,
             self.logger.name + "." + os.path.basename(self.infile),
             os.path.basename(self.executable),
         ) as logger:
             # Catch segfaults and retry
             catch_segfaults(
                 cmd, self.work_dir, self.environment, logger
             )
     except CalledProcessError, err:
         # CalledProcessError isn't properly propagated by IPython
         self.logger.error(str(err))
         return 1
Esempio n. 21
0
    def run(self, files, executable, parset, environment):
        """
        Run the bbs-reducer executable. 
        *Arguments*
        - `files`: argument is a tuple of (MS-file, parmdb-file, sourcedb-file)
        - `executable`: full path to the bbs-reducer executable
        - `parset`: full path to the parset-file
        - `environment`: environment variables to use
        """
        self.logger.debug("files       = %s" % str(files))
        self.logger.debug("executable  = %s" % executable)
        self.logger.debug("parset      = %s" % parset)
        self.logger.debug("environment = %s" % environment)
        
        self.environment.update(environment)
        ms, parmdb, sourcedb = files
        
        # Time execution of this job
        with log_time(self.logger):
            if os.path.exists(ms):
                self.logger.info("Processing %s" % ms)
            else:
                self.logger.error("Measurement Set %s does not exist" % ms)
                return 1

            # Run bbs-reducer. Catch log output from bbs-reducer and stdout.
            scratch_dir = mkdtemp(suffix=".%s" % (os.path.basename(__file__),))
            try:
                cmd = [executable,
                       "--parmdb=%s" % parmdb, 
                       "--sourcedb=%s" % sourcedb,
                       ms, parset
                      ]
                with CatchLog4CPlus(
                    scratch_dir,
                    self.logger.name + "." + os.path.basename(ms),
                    os.path.basename(executable),
                ) as logger:
                    catch_segfaults(cmd, scratch_dir, self.environment, logger)
            except CalledProcessError, err:
                self.logger.error(str(err))
                return 1
            finally:
    def _create_source_db(self, source_list, sourcedb_target_path,
                          working_directory, create_sourcdb_exec, append = False):
        """
        Convert a sourcelist to a sourcedb:
        
        1. Remove existing sourcedb if not appending (sourcedb fails else)
        2. Call the sourcedb executable with the supplied parameters
         
        """
        # *********************************************************************
        # 1. remove existing sourcedb if not appending
        if (append == False) and os.path.isdir(sourcedb_target_path):
            shutil.rmtree(sourcedb_target_path)
            self.logger.debug("Removed existing sky model: {0}".format(
                                            sourcedb_target_path))

        # *********************************************************************
        # 2. The command and parameters to be run
        cmd = [create_sourcdb_exec, "in={0}".format(source_list),
               "out={0}".format(sourcedb_target_path),
               "format=<", # format according to Ger van Diepen
               "append=true"] # Always set append: no effect on non exist db
        self.logger.info(' '.join(cmd))

        try:
            with CatchLog4CPlus(working_directory,
                 self.logger.name + "." + os.path.basename("makesourcedb"),
                 os.path.basename(create_sourcdb_exec)
            ) as logger:
                catch_segfaults(cmd, working_directory, self.environment,
                                            logger, cleanup = None)

        except Exception, exception:
            self.logger.error("Execution of external failed:")
            self.logger.error(" ".join(cmd))
            self.logger.error("exception details:")
            self.logger.error(str(exception))
            return 1
Esempio n. 23
0
    def run(self, executable, file_pattern, input_dir, output_file, clobber):
        if not os.path.exists(input_dir):
            self.logger.error("Input directory (%s) not found" % input_dir)
            return 1

        self.logger.info("Processing %s" % input_dir)

        if os.path.exists(output_file):
            if clobber:
                self.logger.warn("Deleting previous version of results: %s" % output_file)
                os.unlink(output_file)
            else:
                self.logger.error("Refusing to overwrite existing file %s" % output_file)
                return 1

        input_files = glob.glob(os.path.join(input_dir, file_pattern))

        command_line = [executable] + input_files + [output_file]
        try:
            catch_segfaults(command_line, None, None, self.logger)
        except Exception, e:
            self.logger.error(str(e))
            return 1
Esempio n. 24
0
    def _create_source_db(self, source_list, sourcedb_target_path,
                          working_directory, executable, append=False):
        """
        _create_source_db consumes a sourcelist text file and produces a 
        source db (pyraptable).
        If the append parameter is set to true. It expects an already existing
        sourcedb on the supplied path and will then append the sources in
        the list. typically used multiple iterations of the imaging pipeline, 
        with self calibration 
        """
        #remove existing sourcedb if not appending
        if (append == False) and os.path.isdir(sourcedb_target_path):
            shutil.rmtree(sourcedb_target_path)
            self.logger.debug("Removed existing sky model: {0}".format(
                                            sourcedb_target_path))

        # The command and parameters to be run
        cmd = [executable, "in={0}".format(source_list),
               "out={0}".format(sourcedb_target_path),
               "format=<", # format according to Ger van Diepen
               "append=true"] # Always set append flag: no effect on non exist
                              # db

        try:
            with CatchLog4CPlus(working_directory,
                 self.logger.name + "." + os.path.basename("makesourcedb"),
                 os.path.basename(executable)
            ) as logger:
                catch_segfaults(cmd, working_directory, self.environment,
                                            logger, cleanup=None)

        except subprocess.CalledProcessError, called_proc_error:
            self.logger.error("Execution of external failed:")
            self.logger.error(" ".join(cmd))
            self.logger.error("exception details:")
            self.logger.error(str(called_proc_error))
            return 1
Esempio n. 25
0
 def run(self, infile, clusterdesc, outfile, executable):
     with log_time(self.logger):
         if os.path.exists(infile):
             self.logger.info("Processing %s" % (infile))
         else:
             self.logger.error("Dataset %s does not exist" % (infile))
             return 1
         try:
             if not os.access(executable, os.X_OK):
                 raise ExecutableMissing(executable)
             cmd = [executable, clusterdesc, infile, outfile]
             return catch_segfaults(cmd, None, None, self.logger).returncode
         except ExecutableMissing, e:
             self.logger.error("%s not found" % (e.args[0]))
             return 1
         except CalledProcessError, e:
             # For CalledProcessError isn't properly propagated by IPython
             # Temporary workaround...
             self.logger.error(str(e))
             self.logger.info("A common cause for this failure is the usage"
                    "of an incorrect cluster.desc file in the pipeline.cfg")
             return 1
Esempio n. 26
0
 def run(self, infile, clusterdesc, outfile, executable):
     with log_time(self.logger):
         if os.path.exists(infile):
             self.logger.info("Processing %s" % (infile))
         else:
             self.logger.error("Dataset %s does not exist" % (infile))
             return 1
         try:
             if not os.access(executable, os.X_OK):
                 raise ExecutableMissing(executable)
             cmd = [executable, clusterdesc, infile, outfile]
             return catch_segfaults(cmd, None, None, self.logger).returncode
         except ExecutableMissing, e:
             self.logger.error("%s not found" % (e.args[0]))
             return 1
         except CalledProcessError, e:
             # For CalledProcessError isn't properly propagated by IPython
             # Temporary workaround...
             self.logger.error(str(e))
             self.logger.info(
                 "A common cause for this failure is the usage"
                 "of an incorrect cluster.desc file in the pipeline.cfg")
             return 1
Esempio n. 27
0
    def run(self, infile, executable, args, kwargs, work_dir='/tmp', parsetasfile=True, args_format='', environment=''):
        """
        This method contains all the needed functionality
        """

        # Debugging info
        self.logger.debug("infile            = %s" % infile)
        self.logger.debug("executable        = %s" % executable)
        self.logger.debug("working directory = %s" % work_dir)
        self.logger.debug("arguments         = %s" % args)
        self.logger.debug("arg dictionary    = %s" % kwargs)
        self.logger.debug("environment       = %s" % environment)

        self.environment.update(environment)

        self.work_dir = work_dir
        self.infile = infile
        self.executable = executable

        self.msout_original = kwargs['msout'].rstrip('/')
        kwargs.pop('msout')
        self.msout_destination_dir = os.path.dirname(self.msout_original)

        # Set up scratch paths
        scratch_dir = kwargs['local_scratch_dir']
        kwargs.pop('local_scratch_dir')
        try:
            os.mkdir(scratch_dir)
        except OSError:
            pass
        self.scratch_dir = tempfile.mkdtemp(dir=scratch_dir)
        self.logger.info('Using {} as scratch directory'.format(self.scratch_dir))
        self.msout_scratch = os.path.join(self.scratch_dir, os.path.basename(self.msout_original))
        args.append('msout=' + self.msout_scratch)

        # Time execution of this job
        with log_time(self.logger):
            #if os.path.exists(infile):
            self.logger.info("Processing %s" % infile)

            # Check if script is present
            if not os.path.isfile(executable):
                self.logger.error("Executable %s not found" % executable)
                return 1

            # hurray! race condition when running with more than one process on one filesystem
            if not os.path.isdir(work_dir):
                try:
                    os.mkdir(work_dir, )
                except OSError as exc:  # Python >2.5
                    if exc.errno == errno.EEXIST and os.path.isdir(work_dir):
                        pass
                    else:
                        raise

            argsformat = args_format['args_format']
            if not parsetasfile:
                if argsformat == 'gnu':
                    for k, v in kwargs.items():
                        args.append('--' + k + '=' + v)
                if argsformat == 'lofar':
                    for k, v in kwargs.items():
                        args.append(k + '=' + v)
                if argsformat == 'argparse':
                    for k, v in kwargs.items():
                        args.append('--' + k + ' ' + v)
                if argsformat == 'wsclean':
                    for k, v in kwargs.items():
                        multargs = v.split(' ')
                        if multargs:
                            multargs.reverse()
                            for item in multargs:
                                args.insert(0, item)
                        else:
                            args.insert(0, v)
                        args.insert(0, '-'+ k)

            else:
                nodeparset = Parset()
                parsetname = os.path.join(work_dir, os.path.basename(infile) + '.parset')
                for k, v in kwargs.items():
                    nodeparset.add(k, v)
                nodeparset.writeFile(parsetname)
                args.insert(0, parsetname)

            try:
            # ****************************************************************
            #Run
                cmd = [executable] + args
                with CatchLog4CPlus(
                    work_dir,
                    self.logger.name + "." + os.path.basename(infile),
                    os.path.basename(executable),
                ) as logger:
                    # Catch segfaults and retry
                    catch_segfaults(
                        cmd, work_dir, self.environment, logger
                    )
            except CalledProcessError, err:
                # CalledProcessError isn't properly propagated by IPython
                self.logger.error(str(err))
                self.cleanup()
                return 1
            except Exception, err:
                self.logger.error(str(err))
                self.cleanup()
                return 1
Esempio n. 28
0
    def run(self,
            infile,
            executable,
            args,
            kwargs,
            work_dir='/tmp',
            parsetasfile=True,
            args_format='',
            environment=''):
        """
        This method contains all the needed functionality
        """

        # Debugging info
        self.logger.debug("infile            = %s" % infile)
        self.logger.debug("executable        = %s" % executable)
        self.logger.debug("working directory = %s" % work_dir)
        self.logger.debug("arguments         = %s" % args)
        self.logger.debug("arg dictionary    = %s" % kwargs)
        self.logger.debug("environment       = %s" % environment)

        self.environment.update(environment)

        self.work_dir = work_dir
        self.infile = infile
        self.executable = executable

        if 'replace-sourcedb' in kwargs:
            self.replace_sourcedb = kwargs['replace-sourcedb']
            kwargs.pop('replace-sourcedb')
        if 'replace-parmdb' in kwargs:
            self.replace_parmdb = kwargs['replace-parmdb']
            kwargs.pop('replace-parmdb')
        if 'dry-run' in kwargs:
            self.dry_run = kwargs['dry-run']
            kwargs.pop('dry-run')
        if 'sourcedb' in kwargs:
            self.sourcedb = kwargs['sourcedb']
            kwargs.pop('sourcedb')
        if 'parmdb' in kwargs:
            self.parmdb = kwargs['parmdb']
            kwargs.pop('parmdb')
        if 'sourcedb-name' in kwargs:
            self.sourcedb_basename = kwargs['sourcedb-name']
            self.replace_sourcedb = True
            kwargs.pop('sourcedb-name')
        if 'parmdb-name' in kwargs:
            self.parmdb_basename = kwargs['parmdb-name']
            self.replace_parmdb = True
            kwargs.pop('parmdb-name')
        if 'force' in kwargs:
            self.replace_parmdb = True
            self.replace_sourcedb = True
            kwargs.pop('force')
        numthreads = 1
        if 'numthreads' in kwargs:
            numthreads = kwargs['numthreads']
            kwargs.pop('numthreads')
        args.append('--numthreads=' + str(numthreads))
        if 'observation' in kwargs:
            self.observation = kwargs.pop('observation')
        if 'catalog' in kwargs:
            self.catalog = kwargs.pop('catalog')

        self.createsourcedb()
        self.createparmdb()
        if not 'no-columns' in kwargs:
            #if not kwargs['no-columns']:
            self.addcolumns()
        else:
            kwargs.pop('no-columns')

        args.append('--sourcedb=' + self.sourcedb_path)
        args.append('--parmdb=' + self.parmdb_path)

        args.append(self.observation)
        #catalog = None

        # Time execution of this job
        with log_time(self.logger):
            #if os.path.exists(infile):
            self.logger.info("Processing %s" % infile)

            # Check if script is present
            if not os.path.isfile(executable):
                self.logger.error("Executable %s not found" % executable)
                return 1

            # hurray! race condition when running with more than one process on one filesystem
            if not os.path.isdir(work_dir):
                try:
                    os.mkdir(work_dir, )
                except OSError as exc:  # Python >2.5
                    if exc.errno == errno.EEXIST and os.path.isdir(work_dir):
                        pass
                    else:
                        raise

            if parsetasfile:
                nodeparset = Parset()
                parsetname = os.path.join(work_dir,
                                          os.path.basename(infile) + '.parset')
                for k, v in list(kwargs.items()):
                    nodeparset.add(k, v)
                nodeparset.writeFile(parsetname)
                #args.insert(0, parsetname)
                args.append(parsetname)

            #if catalog is not None:
            #    args.append(catalog)

            try:
                # ****************************************************************
                #Run
                cmd = [executable] + args
                with CatchLog4CPlus(
                        work_dir,
                        self.logger.name + "." + os.path.basename(infile),
                        os.path.basename(executable),
                ) as logger:
                    # Catch segfaults and retry
                    catch_segfaults(cmd, work_dir, self.environment, logger)
            except CalledProcessError as err:
                # CalledProcessError isn't properly propagated by IPython
                self.logger.error(str(err))
                return 1
            except Exception as err:
                self.logger.error(str(err))
                return 1
        # We need some signal to the master script that the script ran ok.
        self.outputs['ok'] = True
        return 0
Esempio n. 29
0
    def run(self,
            infile,
            executable,
            args,
            kwargs,
            work_dir='/tmp',
            parsetasfile=True,
            args_format='',
            environment=''):
        """
        This method contains all the needed functionality
        """

        # Debugging info
        self.logger.debug("infile            = %s" % infile)
        self.logger.debug("executable        = %s" % executable)
        self.logger.debug("working directory = %s" % work_dir)
        self.logger.debug("arguments         = %s" % args)
        self.logger.debug("arg dictionary    = %s" % kwargs)
        self.logger.debug("environment       = %s" % environment)

        self.environment.update(environment)

        self.work_dir = work_dir
        self.infile = infile
        self.executable = executable

        self.msout_original = kwargs['msout'].rstrip('/')
        kwargs.pop('msout')
        self.msout_destination_dir = os.path.dirname(self.msout_original)
        self.scratch_dir = tempfile.mkdtemp(dir=kwargs['local_scratch_dir'])
        kwargs.pop('local_scratch_dir')
        self.logger.info('Using {} as scratch directory'.format(
            self.scratch_dir))

        # Set up scratch paths
        self.msout_scratch = os.path.join(
            self.scratch_dir, os.path.basename(self.msout_original))
        args.append('msout=' + self.msout_scratch)

        # Time execution of this job
        with log_time(self.logger):
            #if os.path.exists(infile):
            self.logger.info("Processing %s" % infile)

            # Check if script is present
            if not os.path.isfile(executable):
                self.logger.error("Executable %s not found" % executable)
                return 1

            # hurray! race condition when running with more than one process on one filesystem
            if not os.path.isdir(work_dir):
                try:
                    os.mkdir(work_dir, )
                except OSError as exc:  # Python >2.5
                    if exc.errno == errno.EEXIST and os.path.isdir(work_dir):
                        pass
                    else:
                        raise

            argsformat = args_format['args_format']
            if not parsetasfile:
                if argsformat == 'gnu':
                    for k, v in kwargs.items():
                        args.append('--' + k + '=' + v)
                if argsformat == 'lofar':
                    for k, v in kwargs.items():
                        args.append(k + '=' + v)
                if argsformat == 'argparse':
                    for k, v in kwargs.items():
                        args.append('--' + k + ' ' + v)
                if argsformat == 'wsclean':
                    for k, v in kwargs.items():
                        multargs = v.split(' ')
                        if multargs:
                            multargs.reverse()
                            for item in multargs:
                                args.insert(0, item)
                        else:
                            args.insert(0, v)
                        args.insert(0, '-' + k)

            else:
                nodeparset = Parset()
                parsetname = os.path.join(work_dir,
                                          os.path.basename(infile) + '.parset')
                for k, v in kwargs.items():
                    nodeparset.add(k, v)
                nodeparset.writeFile(parsetname)
                args.insert(0, parsetname)

            try:
                # ****************************************************************
                #Run
                cmd = [executable] + args
                with CatchLog4CPlus(
                        work_dir,
                        self.logger.name + "." + os.path.basename(infile),
                        os.path.basename(executable),
                ) as logger:
                    # Catch segfaults and retry
                    catch_segfaults(cmd, work_dir, self.environment, logger)
            except CalledProcessError, err:
                # CalledProcessError isn't properly propagated by IPython
                self.logger.error(str(err))
                self.cleanup()
                return 1
            except Exception, err:
                self.logger.error(str(err))
                self.cleanup()
                return 1
Esempio n. 30
0
    def run(self, infile, executable, args, kwargs, work_dir='/tmp', parsetasfile=False, args_format='', environment=''):
        """
        This method contains all the needed functionality
        """
        # Debugging info
        self.logger.debug("infile            = %s" % infile)
        self.logger.debug("executable        = %s" % executable)
        self.logger.debug("working directory = %s" % work_dir)
        self.logger.debug("arguments         = %s" % args)
        self.logger.debug("arg dictionary    = %s" % kwargs)
        self.logger.debug("environment       = %s" % environment)

        self.environment.update(environment)

        # Time execution of this job
        with log_time(self.logger):
            #if os.path.exists(infile):
            self.logger.info("Processing %s" % infile)
           # else:
           #     self.logger.error("Dataset %s does not exist" % infile)
           #     return 1

            # Check if executable is present
            if not os.access(executable, os.X_OK):
                self.logger.error("Executable %s not found" % executable)
                return 1

            # hurray! race condition when running with more than one process on one filesystem
            if not os.path.isdir(work_dir):
                try:
                    os.mkdir(work_dir, )
                except OSError as exc:  # Python >2.5
                    if exc.errno == errno.EEXIST and os.path.isdir(work_dir):
                        pass
                    else:
                        raise

            argsformat = args_format['args_format']
            # deal with multiple input files for wsclean
            if argsformat == 'wsclean':
                for i in reversed(xrange(len(args))):
                    if str(args[i]).startswith('[') and str(args[i]).endswith(']'):
                        tmplist = args.pop(i).lstrip('[').rstrip(']').split(',')
                        for val in reversed(tmplist):
                            args.insert(i, val.strip(' \'\"'))
            if not parsetasfile:
                if argsformat == 'gnu':
                    for k, v in kwargs.items():
                        args.append('--' + k + '=' + v)
                if argsformat == 'lofar':
                    for k, v in kwargs.items():
                        args.append(k + '=' + v)
                if argsformat == 'argparse':
                    for k, v in kwargs.items():
                        args.append('--' + k + ' ' + v)
                if argsformat == 'wsclean':
                    for k, v in kwargs.items():
                        if str(v).startswith('[') and str(v).endswith(']'):
                            v = v.lstrip('[').rstrip(']').replace(' ', '')
                            multargs = v.split(',')
                        else:
                            multargs = v.split(' ')
                        if multargs:
                            multargs.reverse()
                            for item in multargs:
                                args.insert(0, item)
                        else:
                            args.insert(0, v)
                        args.insert(0, '-'+ k)

            else:
                nodeparset = Parset()
                parsetname = os.path.join(work_dir, os.path.basename(infile) + '.parset')
                for k, v in kwargs.items():
                    nodeparset.add(k, v)
                nodeparset.writeFile(parsetname)
                if argsformat == 'losoto':
                    args.append(parsetname)
                else: 
                    args.insert(0,parsetname)

            try:
            # ****************************************************************
            # Run
                cmd = [executable] + args
                with CatchLog4CPlus(
                    work_dir,
                    self.logger.name + "." + os.path.basename(infile),
                    os.path.basename(executable),
                ) as logger:
                    # Catch segfaults and retry
                    catch_segfaults(
                        cmd, work_dir, self.environment, logger
                    )
            except CalledProcessError, err:
                # CalledProcessError isn't properly propagated by IPython
                self.logger.error(str(err))
                return 1
            except Exception, err:
                self.logger.error(str(err))
                return 1
Esempio n. 31
0
    def run(self, executable, environment, parset, working_directory,
            output_image, concatenated_measurement_set, sourcedb_path,
             mask_patch_size, autogenerate_parameters, specify_fov, fov, 
             major_cycle, nr_cycles, perform_self_cal):
        """
        :param executable: Path to awimager executable
        :param environment: environment for catch_segfaults (executable runner)
        :param parset: parameters for the awimager,
        :param working_directory: directory the place temporary files
        :param output_image: location and filesname to story the output images
          the multiple images are appended with type extentions
        :param concatenated_measurement_set: Input measurement set
        :param sourcedb_path: Path the the sourcedb used to create the image 
          mask
        :param mask_patch_size: Scaling of the patch around the source in the 
          mask
        :param autogenerate_parameters: Turns on the autogeneration of: 
           cellsize, npix, wprojplanes, wmax, fov
        :param fov: if  autogenerate_parameters is false calculate 
           imageparameter (cellsize, npix, wprojplanes, wmax) relative to this 
           fov
        :param major_cycle: number of the self calibration cycle to determine 
            the imaging parameters: cellsize, npix, wprojplanes, wmax, fov            
        :param nr_cycles: The requested number of self cal cycles           
        :param perform_self_cal: Bool used to control the selfcal functionality
            or the old semi-automatic functionality       
        :rtype: self.outputs["image"] The path to the output image
        """
        self.logger.info("Start selfcal_awimager node run:")
        log4_cplus_name = "selfcal_awimager"
        self.environment.update(environment)

        with log_time(self.logger):
            # Read the parameters as specified in the parset
            parset_object = get_parset(parset)

            # *************************************************************
            # 1. Calculate awimager parameters that depend on measurement set
            # and the parset
            if perform_self_cal:
                # Calculate awimager parameters that depend on measurement set
                # and the parset              
                self.logger.info(
                   "Calculating selfcalibration parameters  ")
                cell_size, npix, w_max, w_proj_planes, \
                   UVmin, UVmax, robust, threshold =\
                        self._get_selfcal_parameters(
                            concatenated_measurement_set,
                            parset, major_cycle, nr_cycles) 

                self._save_selfcal_info(concatenated_measurement_set, 
                                        major_cycle, npix, UVmin, UVmax)

            else:
                self.logger.info(
                   "Calculating parameters.. ( NOT selfcalibration)")
                cell_size, npix, w_max, w_proj_planes = \
                    self._get_imaging_parameters(
                            concatenated_measurement_set,
                            parset,
                            autogenerate_parameters,
                            specify_fov,
                            fov)

            self.logger.info("Using autogenerated parameters; ")
            self.logger.info(
                 "Calculated parameters: cell_size: {0}, npix: {1}".format(
                     cell_size, npix))

            self.logger.info("w_max: {0}, w_proj_planes: {1} ".format(
                        w_max, w_proj_planes))

            # ****************************************************************
            # 2. Get the target image location from the mapfile for the parset.
            # Create target dir if it not exists
            image_path_head = os.path.dirname(output_image)
            create_directory(image_path_head)
            self.logger.debug("Created directory to place awimager output"
                              " files: {0}".format(image_path_head))

            # ****************************************************************
            # 3. Create the mask
            #mask_file_path = self._create_mask(npix, cell_size, output_image,
            #             concatenated_measurement_set, executable,
            #             working_directory, log4_cplus_name, sourcedb_path,
            #              mask_patch_size, image_path_head)
            # *****************************************************************
            # 4. Update the parset with calculated parameters, and output image
            patch_dictionary = {'uselogger': 'True',  # enables log4cpluscd log
                               'ms': str(concatenated_measurement_set),
                               'cellsize': str(cell_size),
                               'npix': str(npix),
                               'wmax': str(w_max),
                               'wprojplanes': str(w_proj_planes),
                               'image': str(output_image),
                               'maxsupport': str(npix)
                               # 'mask':str(mask_file_path),  #TODO REINTRODUCE
                               # MASK, excluded to speed up in this debug stage                               
                               }

            # Add some aditional keys from the self calibration method
            if perform_self_cal:
                self_cal_patch_dict = {
                               'weight': 'briggs', 
                               'padding': str(1.18),
                               'niter' : str(1000000), 
                               'operation' : 'mfclark',
                               'timewindow' : '300',
                               'fits' : '',
                               'threshold' : str(threshold),
                               'robust' : str(robust),
                               'UVmin' : str(UVmin), 
                               'UVmax' : str(UVmax),
                               'maxbaseline' : str(10000000),
                               'select' : str("sumsqr(UVW[:2])<1e12"), 
                               }
                patch_dictionary.update(self_cal_patch_dict)

            # save the parset at the target dir for the image
            calculated_parset_path = os.path.join(image_path_head,
                                                       "parset.par")

            try:
                temp_parset_filename = patch_parset(parset, patch_dictionary)
                # Copy tmp file to the final location
                shutil.copyfile(temp_parset_filename, calculated_parset_path)
                self.logger.debug("Wrote parset for awimager run: {0}".format(
                                                    calculated_parset_path))
            finally:
                # remove temp file
                os.remove(temp_parset_filename)

            # *****************************************************************
            # 5. Run the awimager with the parameterset

            cmd = [executable, calculated_parset_path]
            self.logger.debug("Parset used for awimager run:")
            self.logger.debug(cmd)
            try:
                with CatchLog4CPlus(working_directory,
                        self.logger.name + "." +
                        os.path.basename(log4_cplus_name),
                        os.path.basename(executable)
                ) as logger:
                    catch_segfaults(cmd, working_directory, self.environment,
                                            logger, usageStats=self.resourceMonitor)

            # Thrown by catch_segfault
            except CalledProcessError, exception:
                self.logger.error(str(exception))
                return 1

            except Exception, exception:
                self.logger.error(str(exception))
                return 1
Esempio n. 32
0
    def run(self, infile, executable, args, kwargs, work_dir='/tmp', parsetasfile=False, args_format='', environment=''):
        """
        This function contains all the needed functionality
        """
        # Debugging info
        self.logger.debug("infile            = %s" % infile)
        self.logger.debug("executable        = %s" % executable)
        self.logger.debug("working directory = %s" % work_dir)
        self.logger.debug("arguments         = %s" % args)
        self.logger.debug("arg dictionary    = %s" % kwargs)
        self.logger.debug("environment       = %s" % environment)

        self.environment.update(environment)

        # Time execution of this job
        with log_time(self.logger):
            if infile[0] == '[':
                infiles = [ms.strip(" []\'\"") for ms in infile.split(',')]
                reffile = infiles[0]
            else:
                reffile = infile

            if os.path.exists(reffile):
                self.logger.info("Processing %s" % reffile)
            else:
                self.logger.error("Dataset %s does not exist" % reffile)
                return 1

            # Check if executable is present
            if not os.access(executable, os.X_OK):
                self.logger.error("Executable %s not found" % executable)
                return 1

            # race condition when running with more than one process on one filesystem
            if not os.path.isdir(work_dir):
                try:
                    os.mkdir(work_dir, )
                except OSError as exc:  # Python >2.5
                    if exc.errno == errno.EEXIST and os.path.isdir(work_dir):
                        pass
                    else:
                        raise

            if not parsetasfile:
                self.logger.error("Nodescript \"executable_casa.py\" requires \"parsetasfile\" to be True!")
                return 1
            else:
                nodeparset = Parset()
                sublist = []
                for k, v in kwargs.items():
                    nodeparset.add(k, v)
                    if str(k).find('.'):
                        if not str(k).split('.')[0] in sublist:
                            sublist.append(str(k).split('.')[0])

                #quick hacks below. for proof of concept.
                casastring = ''
                for sub in sublist:
                    subpar = nodeparset.makeSubset(nodeparset.fullModuleName(sub) + '.')
                    casastring = sub + '('
                    for k in subpar.keys():
                        if str(subpar[k]).find('/') == 0:
                            casastring += str(k) + '=' + "'" + str(subpar[k]) + "'" + ','
                        elif str(subpar[k]).find('casastr/') == 0:
                            casastring += str(k) + '=' + "'" + str(subpar[k]).strip('casastr/') + "'" + ','
                        elif str(subpar[k]).lower() == 'false' or str(subpar[k]).lower() == 'true':
                            casastring += str(k) + '=' + str(subpar[k]) + ','
                        else:
                            # Test if int/float or list of int/float
                            try:
                                self.logger.info('value: {}'.format(subpar[k]))
                                test = float(str(subpar[k]))
                                is_int_float = True
                            except:
                                is_int_float = False
                            if is_int_float:
                                casastring += str(k) + '=' + str(subpar[k]) + ','
                            else:
                                if '[' in str(subpar[k]) or '(' in str(subpar[k]):
                                    # Check if list of int/float or strings
                                    list_vals = [f.strip() for f in str(subpar[k]).strip('[]()').split(',')]
                                    is_int_float = True
                                    for list_val in list_vals:
                                        try:
                                            test = float(list_val)
                                        except:
                                            is_int_float = False
                                            break
                                    if is_int_float:
                                        casastring += str(k) + '=' + str(subpar[k]) + ','
                                    else:
                                        casastring += str(k) + '=' + '[{}]'.format(','.join(["'"+list_val+"'" for list_val in list_vals])) + ','
                                else:
                                    # Simple string
                                    casastring += str(k) + '=' + "'" + str(subpar[k]) + "'" + ','

                    casastring = casastring.rstrip(',')
                    casastring += ')\n'

                # 1) return code of a casapy is not properly recognized by the pipeline
                # wrapping in shellscript works for succesful runs.
                # failed runs seem to hang the pipeline...
                # 2) casapy can not have two instances running from the same directory.
                # create tmp dirs
                casapydir = tempfile.mkdtemp(dir=work_dir)
                if casastring != '':
                    casafilename = os.path.join(work_dir, os.path.basename(reffile) + '.casacommand.py')
                    casacommandfile = open(casafilename, 'w')
                    casacommandfile.write(casastring)
                    casacommandfile.close()
                    args.append(casafilename)

                somename = os.path.join(work_dir, os.path.basename(reffile) + '.casashell.sh')
                commandstring = ''
                commandstring += executable
                for item in args:
                    if str(item).find(' ') > -1 or str(item).find('[') > -1:
                        commandstring += ' "' + item + '"'
                    else:
                        commandstring += ' ' + item

                crap = open(somename, 'w')
                crap.write('#!/bin/bash \n')
                crap.write('echo "Trying CASAPY command" \n')
                crap.write(commandstring + ' >& casa.log\n')
                crap.close()

                # file permissions
                st = os.stat(somename)
                os.chmod(somename, st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)

            try:
                # ****************************************************************
                # Run
                cmd = [somename]
                with CatchLog4CPlus(
                    casapydir,
                    self.logger.name + "." + os.path.basename(reffile),
                    os.path.basename(executable),
                ) as logger:
                    # Catch segfaults and retry
                    catch_segfaults(
                        cmd, casapydir, self.environment, logger
                    )
            except CalledProcessError, err:
                # CalledProcessError isn't properly propagated by IPython
                self.logger.error(str(err))
                return 1
            except Exception, err:
                self.logger.error(str(err))
                return 1
Esempio n. 33
0
    def run(self, executable, environment, parset, working_directory,
            output_image, concatenated_measurement_set, sourcedb_path,
             mask_patch_size, autogenerate_parameters, specify_fov, fov, 
             major_cycle, nr_cycles, perform_self_cal):
        """
        :param executable: Path to awimager executable
        :param environment: environment for catch_segfaults (executable runner)
        :param parset: parameters for the awimager,
        :param working_directory: directory the place temporary files
        :param output_image: location and filesname to story the output images
          the multiple images are appended with type extentions
        :param concatenated_measurement_set: Input measurement set
        :param sourcedb_path: Path the the sourcedb used to create the image 
          mask
        :param mask_patch_size: Scaling of the patch around the source in the 
          mask
        :param autogenerate_parameters: Turns on the autogeneration of: 
           cellsize, npix, wprojplanes, wmax, fov
        :param fov: if  autogenerate_parameters is false calculate 
           imageparameter (cellsize, npix, wprojplanes, wmax) relative to this 
           fov
        :param major_cycle: number of the self calibration cycle to determine 
            the imaging parameters: cellsize, npix, wprojplanes, wmax, fov            
        :param nr_cycles: The requested number of self cal cycles           
        :param perform_self_cal: Bool used to control the selfcal functionality
            or the old semi-automatic functionality       
        :rtype: self.outputs["image"] The path to the output image
        """
        self.logger.info("Start selfcal_awimager node run:")
        log4_cplus_name = "selfcal_awimager"
        self.environment.update(environment)

        with log_time(self.logger):
            # Read the parameters as specified in the parset
            parset_object = get_parset(parset)

            # *************************************************************
            # 1. Calculate awimager parameters that depend on measurement set
            # and the parset
            if perform_self_cal:
                # Calculate awimager parameters that depend on measurement set
                # and the parset              
                self.logger.info(
                   "Calculating selfcalibration parameters  ")
                cell_size, npix, w_max, w_proj_planes, \
                   UVmin, UVmax, robust, threshold =\
                        self._get_selfcal_parameters(
                            concatenated_measurement_set,
                            parset, major_cycle, nr_cycles) 

                self._save_selfcal_info(concatenated_measurement_set, 
                                        major_cycle, npix, UVmin, UVmax)

            else:
                self.logger.info(
                   "Calculating parameters.. ( NOT selfcalibration)")
                cell_size, npix, w_max, w_proj_planes = \
                    self._get_imaging_parameters(
                            concatenated_measurement_set,
                            parset,
                            autogenerate_parameters,
                            specify_fov,
                            fov)

            self.logger.info("Using autogenerated parameters; ")
            self.logger.info(
                 "Calculated parameters: cell_size: {0}, npix: {1}".format(
                     cell_size, npix))

            self.logger.info("w_max: {0}, w_proj_planes: {1} ".format(
                        w_max, w_proj_planes))

            # ****************************************************************
            # 2. Get the target image location from the mapfile for the parset.
            # Create target dir if it not exists
            image_path_head = os.path.dirname(output_image)
            create_directory(image_path_head)
            self.logger.debug("Created directory to place awimager output"
                              " files: {0}".format(image_path_head))

            # ****************************************************************
            # 3. Create the mask
            #mask_file_path = self._create_mask(npix, cell_size, output_image,
            #             concatenated_measurement_set, executable,
            #             working_directory, log4_cplus_name, sourcedb_path,
            #              mask_patch_size, image_path_head)
            # *****************************************************************
            # 4. Update the parset with calculated parameters, and output image
            patch_dictionary = {'uselogger': 'True',  # enables log4cpluscd log
                               'ms': str(concatenated_measurement_set),
                               'cellsize': str(cell_size),
                               'npix': str(npix),
                               'wmax': str(w_max),
                               'wprojplanes': str(w_proj_planes),
                               'image': str(output_image),
                               'maxsupport': str(npix)
                               # 'mask':str(mask_file_path),  #TODO REINTRODUCE
                               # MASK, excluded to speed up in this debug stage                               
                               }

            # Add some aditional keys from the self calibration method
            if perform_self_cal:
                self_cal_patch_dict = {
                               'weight': 'briggs', 
                               'padding': str(1.18),
                               'niter' : str(1000000), 
                               'operation' : 'mfclark',
                               'timewindow' : '300',
                               'fits' : '',
                               'threshold' : str(threshold),
                               'robust' : str(robust),
                               'UVmin' : str(UVmin), 
                               'UVmax' : str(UVmax),
                               'maxbaseline' : str(10000000),
                               'select' : str("sumsqr(UVW[:2])<1e12"), 
                               }
                patch_dictionary.update(self_cal_patch_dict)

            # save the parset at the target dir for the image
            calculated_parset_path = os.path.join(image_path_head,
                                                       "parset.par")

            try:
                temp_parset_filename = patch_parset(parset, patch_dictionary)
                # Copy tmp file to the final location
                shutil.copyfile(temp_parset_filename, calculated_parset_path)
                self.logger.debug("Wrote parset for awimager run: {0}".format(
                                                    calculated_parset_path))
            finally:
                # remove temp file
                os.remove(temp_parset_filename)

            # *****************************************************************
            # 5. Run the awimager with the parameterset

            cmd = [executable, calculated_parset_path]
            self.logger.debug("Parset used for awimager run:")
            self.logger.debug(cmd)
            try:
                with CatchLog4CPlus(working_directory,
                        self.logger.name + "." +
                        os.path.basename(log4_cplus_name),
                        os.path.basename(executable)
                ) as logger:
                    catch_segfaults(cmd, working_directory, self.environment,
                                            logger, usageStats=self.resourceMonitor)

            # Thrown by catch_segfault
            except CalledProcessError as exception:
                self.logger.error(str(exception))
                return 1

            except Exception as exception:
                self.logger.error(str(exception))
                return 1

        # *********************************************************************
        # 6. Return output
        # Append static .restored: This might change but prob. not
        # The actual output image has this extention always, default of
        # awimager
        self.outputs["image"] = output_image + ".restored"
        return 0
Esempio n. 34
0
    def run(self, executable, environment, parset, working_directory,
            output_image, concatenated_measurement_set, sourcedb_path,
            mask_patch_size, autogenerate_parameters, specify_fov, fov):
        """
        :param executable: Path to awimager executable
        :param environment: environment for catch_segfaults (executable runner)
        :param parset: parameters for the awimager,
        :param working_directory: directory the place temporary files
        :param output_image: location and filesname to story the output images
          the multiple images are appended with type extentions
        :param concatenated_measurement_set: Input measurement set
        :param sourcedb_path: Path the the sourcedb used to create the image 
          mask
        :param mask_patch_size: Scaling of the patch around the source in the 
          mask
        :param autogenerate_parameters: Turns on the autogeneration of: 
           cellsize, npix, wprojplanes, wmax, fov
        :param fov: if  autogenerate_parameters is false calculate 
           imageparameter (cellsize, npix, wprojplanes, wmax) relative to this 
           fov
        :rtype: self.outputs["image"] The path to the output image
        """
        self.logger.info("Start imager_awimager node run:")
        log4_cplus_name = "imager_awimager"
        self.environment.update(environment)

        with log_time(self.logger):
            # Read the parameters as specified in the parset
            parset_object = get_parset(parset)

            #******************************************************************
            # 0. Create the directories used in this recipe
            create_directory(working_directory)

            # *************************************************************
            # 1. Calculate awimager parameters that depend on measurement set
            # and the parset

            cell_size, npix, w_max, w_proj_planes = \
                    self._get_imaging_parameters(
                            concatenated_measurement_set,
                            parset,
                            autogenerate_parameters,
                            specify_fov,
                            fov)

            self.logger.info("Using autogenerated parameters; ")
            self.logger.info(
                "Calculated parameters: cell_size: {0}, npix: {1}".format(
                    cell_size, npix))

            self.logger.info("w_max: {0}, w_proj_planes: {1} ".format(
                w_max, w_proj_planes))

            # ****************************************************************
            # 2. Get the target image location from the mapfile for the parset.
            # Create target dir if it not exists
            image_path_head = os.path.dirname(output_image)
            create_directory(image_path_head)
            self.logger.debug("Created directory to place awimager output"
                              " files: {0}".format(image_path_head))

            # ****************************************************************
            # 3. Create the mask
            mask_file_path = self._create_mask(npix, cell_size, output_image,
                                               concatenated_measurement_set,
                                               executable, working_directory,
                                               log4_cplus_name, sourcedb_path,
                                               mask_patch_size,
                                               image_path_head)

            # *****************************************************************
            # 4. Update the parset with calculated parameters, and output image
            patch_dictionary = {
                'uselogger': 'True',  # enables log4cpluscd log
                'ms': str(concatenated_measurement_set),
                'cellsize': str(cell_size),
                'npix': str(npix),
                'wmax': str(w_max),
                'wprojplanes': str(w_proj_planes),
                'image': str(output_image),
                'maxsupport': str(npix),
                # 'mask':str(mask_file_path),  #TODO REINTRODUCE
                # MASK, excluded to speed up in this debug stage
            }

            # save the parset at the target dir for the image
            calculated_parset_path = os.path.join(image_path_head,
                                                  "parset.par")

            try:
                temp_parset_filename = patch_parset(parset, patch_dictionary)
                # Copy tmp file to the final location
                shutil.copyfile(temp_parset_filename, calculated_parset_path)
                self.logger.debug("Wrote parset for awimager run: {0}".format(
                    calculated_parset_path))
            finally:
                # remove temp file
                os.remove(temp_parset_filename)

            # *****************************************************************
            # 5. Run the awimager with the updated parameterset
            cmd = [executable, calculated_parset_path]
            try:
                with CatchLog4CPlus(
                        working_directory, self.logger.name + "." +
                        os.path.basename(log4_cplus_name),
                        os.path.basename(executable)) as logger:
                    catch_segfaults(cmd,
                                    working_directory,
                                    self.environment,
                                    logger,
                                    usageStats=self.resourceMonitor)

            # Thrown by catch_segfault
            except CalledProcessError, exception:
                self.logger.error(str(exception))
                return 1

            except Exception, exception:
                self.logger.error(str(exception))
                return 1
Esempio n. 35
0
    def run(self, infile, executable, args, kwargs, work_dir='/tmp', parsetasfile=True, args_format='', environment=''):
        """
        This method contains all the needed functionality
        """

        # Debugging info
        self.logger.debug("infile            = %s" % infile)
        self.logger.debug("executable        = %s" % executable)
        self.logger.debug("working directory = %s" % work_dir)
        self.logger.debug("arguments         = %s" % args)
        self.logger.debug("arg dictionary    = %s" % kwargs)
        self.logger.debug("environment       = %s" % environment)

        self.environment.update(environment)

        self.work_dir = work_dir
        self.infile = infile
        self.executable = executable

        if 'replace-sourcedb' in kwargs:
            self.replace_sourcedb = kwargs['replace-sourcedb']
            kwargs.pop('replace-sourcedb')
        if 'replace-parmdb' in kwargs:
            self.replace_parmdb = kwargs['replace-parmdb']
            kwargs.pop('replace-parmdb')
        if 'dry-run' in kwargs:
            self.dry_run = kwargs['dry-run']
            kwargs.pop('dry-run')
        if 'sourcedb' in kwargs:
            self.sourcedb = kwargs['sourcedb']
            kwargs.pop('sourcedb')
        if 'parmdb' in kwargs:
            self.parmdb = kwargs['parmdb']
            kwargs.pop('parmdb')
        if 'sourcedb-name' in kwargs:
            self.sourcedb_basename = kwargs['sourcedb-name']
            self.replace_sourcedb = True
            kwargs.pop('sourcedb-name')
        if 'parmdb-name' in kwargs:
            self.parmdb_basename = kwargs['parmdb-name']
            self.replace_parmdb = True
            kwargs.pop('parmdb-name')
        if 'force' in kwargs:
            self.replace_parmdb = True
            self.replace_sourcedb = True
            kwargs.pop('force')
        numthreads = 1
        if 'numthreads' in kwargs:
            numthreads = kwargs['numthreads']
            kwargs.pop('numthreads')
        args.append('--numthreads='+str(numthreads))
        if 'observation' in kwargs:
            self.observation = kwargs.pop('observation')
        if 'catalog' in kwargs:
            self.catalog = kwargs.pop('catalog')

        self.createsourcedb()
        self.createparmdb()
        if not 'no-columns' in kwargs:
            #if not kwargs['no-columns']:
            self.addcolumns()
        else:
            kwargs.pop('no-columns')

        args.append('--sourcedb=' + self.sourcedb_path)
        args.append('--parmdb=' + self.parmdb_path)

        args.append(self.observation)
        #catalog = None


        # Time execution of this job
        with log_time(self.logger):
            #if os.path.exists(infile):
            self.logger.info("Processing %s" % infile)

            # Check if script is present
            if not os.path.isfile(executable):
                self.logger.error("Executable %s not found" % executable)
                return 1

            # hurray! race condition when running with more than one process on one filesystem
            if not os.path.isdir(work_dir):
                try:
                    os.mkdir(work_dir, )
                except OSError as exc:  # Python >2.5
                    if exc.errno == errno.EEXIST and os.path.isdir(work_dir):
                        pass
                    else:
                        raise

            if parsetasfile:
                nodeparset = Parset()
                parsetname = os.path.join(work_dir, os.path.basename(infile) + '.parset')
                for k, v in kwargs.items():
                    nodeparset.add(k, v)
                nodeparset.writeFile(parsetname)
                #args.insert(0, parsetname)
                args.append(parsetname)

            #if catalog is not None:
            #    args.append(catalog)

            try:
            # ****************************************************************
            #Run
                cmd = [executable] + args
                with CatchLog4CPlus(
                    work_dir,
                    self.logger.name + "." + os.path.basename(infile),
                    os.path.basename(executable),
                ) as logger:
                    # Catch segfaults and retry
                    catch_segfaults(
                        cmd, work_dir, self.environment, logger
                    )
            except CalledProcessError, err:
                # CalledProcessError isn't properly propagated by IPython
                self.logger.error(str(err))
                return 1
            except Exception, err:
                self.logger.error(str(err))
                return 1
Esempio n. 36
0
class imager_finalize(LOFARnodeTCP):
    """
    This script performs the folowing functions:
    
    1. Add the image info to the casa image:
       addimg.addImagingInfo (imageName, msNames, sourcedbName, minbl, maxbl)
    2. Convert the image to hdf5 and fits image
    3. Filling of the HDF5 root group
    4. Export fits image to msss image server
    5. Export sourcelist to msss server, copy the sourcelist to hdf5 location
    6. Return the outputs
    """
    def run(self, awimager_output, ms_per_image, sourcelist, target,
            output_image, minbaseline, maxbaseline, processed_ms_dir,
            fillrootimagegroup_exec, environment, sourcedb):
        self.environment.update(environment)
        """
        :param awimager_output: Path to the casa image produced by awimager 
        :param ms_per_image: The X (90) measurements set scheduled to 
            create the image
        :param sourcelist: list of sources found in the image 
        :param target: <unused>
        :param minbaseline: Minimum baseline used for the image 
        :param maxbaseline: largest/maximum baseline used for the image
        :param processed_ms_dir: The X (90) measurements set actually used to 
            create the image
        :param fillrootimagegroup_exec: Executable used to add image data to
            the hdf5 image  
                 
        :rtype: self.outputs['hdf5'] set to "succes" to signal node succes
        :rtype: self.outputs['image'] path to the produced hdf5 image
        """
        with log_time(self.logger):
            ms_per_image_map = DataMap.load(ms_per_image)

            # *****************************************************************
            # 1. add image info
            # Get all the files in the processed measurement dir
            file_list = os.listdir(processed_ms_dir)
            # TODO: BUG!! the meta data might contain files that were copied
            # but failed in imager_bbs
            processed_ms_paths = []
            for item in ms_per_image_map:
                path = item.file
                ms_file_name = os.path.split(path)[1]
                #if the ms is in the processed dir (additional check)
                if (ms_file_name in file_list):
                    # save the path
                    processed_ms_paths.append(
                        os.path.join(processed_ms_dir, ms_file_name))
            #add the information the image
            try:
                addimg.addImagingInfo(awimager_output, processed_ms_paths,
                                      sourcedb, minbaseline, maxbaseline)

            except Exception, error:
                self.logger.warn("addImagingInfo Threw Exception:")
                self.logger.warn(error)
                # Catch raising of already done error: allows for rerunning
                # of the recipe
                if "addImagingInfo already done" in str(error):
                    pass
                else:
                    raise Exception(error)
                #The majority of the tables is updated correctly

            # ***************************************************************
            # 2. convert to hdf5 image format
            output_directory = None
            pim_image = pim.image(awimager_output)
            try:
                self.logger.info(
                    "Saving image in HDF5 Format to: {0}".format(output_image))
                # Create the output directory
                output_directory = os.path.dirname(output_image)
                create_directory(output_directory)
                # save the image
                pim_image.saveas(output_image, hdf5=True)

            except Exception, error:
                self.logger.error(
                    "Exception raised inside pyrap.images: {0}".format(
                        str(error)))
                raise error

            # Convert to fits
            # create target location
            fits_output = output_image + ".fits"
            # To allow reruns a possible earlier version needs to be removed!
            # image2fits fails if not done!!
            if os.path.exists(fits_output):
                os.unlink(fits_output)

            try:
                temp_dir = tempfile.mkdtemp(suffix=".%s" %
                                            (os.path.basename(__file__), ))
                with CatchLog4CPlus(
                        temp_dir, self.logger.name + '.' +
                        os.path.basename(awimager_output),
                        "image2fits") as logger:
                    catch_segfaults([
                        "image2fits", '-in', awimager_output, '-out',
                        fits_output
                    ], temp_dir, self.environment, logger)
            except Exception, excp:
                self.logger.error(str(excp))
                return 1
Esempio n. 37
0
    def run(self,
            infile,
            executable,
            args,
            kwargs,
            work_dir='/tmp',
            parsetasfile=False,
            args_format='',
            environment=''):
        """
        This method contains all the needed functionality
        """
        # Debugging info
        self.logger.debug("infile            = %s" % infile)
        self.logger.debug("executable        = %s" % executable)
        self.logger.debug("working directory = %s" % work_dir)
        self.logger.debug("arguments         = %s" % args)
        self.logger.debug("arg dictionary    = %s" % kwargs)
        self.logger.debug("environment       = %s" % environment)

        self.environment.update(environment)

        # Time execution of this job
        with log_time(self.logger):
            #if os.path.exists(infile):
            self.logger.info("Processing %s" % infile)
            # else:
            #     self.logger.error("Dataset %s does not exist" % infile)
            #     return 1

            # Check if executable is present
            if not os.access(executable, os.X_OK):
                self.logger.error("Executable %s not found" % executable)
                return 1

            # hurray! race condition when running with more than one process on one filesystem
            if not os.path.isdir(work_dir):
                try:
                    os.mkdir(work_dir, )
                except OSError as exc:  # Python >2.5
                    if exc.errno == errno.EEXIST and os.path.isdir(work_dir):
                        pass
                    else:
                        raise

            argsformat = args_format['args_format']
            # deal with multiple input files for wsclean
            if argsformat == 'wsclean':
                for i in reversed(xrange(len(args))):
                    if str(args[i]).startswith('[') and str(
                            args[i]).endswith(']'):
                        tmplist = args.pop(i).lstrip('[').rstrip(']').split(
                            ',')
                        for val in reversed(tmplist):
                            args.insert(i, val.strip(' \'\"'))
            if not parsetasfile:
                if argsformat == 'gnu':
                    for k, v in kwargs.items():
                        args.append('--' + k + '=' + v)
                if argsformat == 'lofar':
                    for k, v in kwargs.items():
                        args.append(k + '=' + v)
                if argsformat == 'argparse':
                    for k, v in kwargs.items():
                        args.append('--' + k + ' ' + v)
                if argsformat == 'wsclean':
                    for k, v in kwargs.items():
                        if str(v).startswith('[') and str(v).endswith(']'):
                            v = v.lstrip('[').rstrip(']').replace(' ', '')
                            multargs = v.split(',')
                        else:
                            multargs = v.split(' ')
                        if multargs:
                            multargs.reverse()
                            for item in multargs:
                                args.insert(0, item)
                        else:
                            args.insert(0, v)
                        args.insert(0, '-' + k)

            else:
                nodeparset = Parset()
                parsetname = os.path.join(work_dir,
                                          os.path.basename(infile) + '.parset')
                for k, v in kwargs.items():
                    nodeparset.add(k, v)
                nodeparset.writeFile(parsetname)
                if argsformat == 'losoto':
                    args.append(parsetname)
                else:
                    args.insert(0, parsetname)

            try:
                # ****************************************************************
                # Run
                cmd = [executable] + args
                with CatchLog4CPlus(
                        work_dir,
                        self.logger.name + "." + os.path.basename(infile),
                        os.path.basename(executable),
                ) as logger:
                    # Catch segfaults and retry
                    catch_segfaults(cmd, work_dir, self.environment, logger)
            except CalledProcessError, err:
                # CalledProcessError isn't properly propagated by IPython
                self.logger.error(str(err))
                return 1
            except Exception, err:
                self.logger.error(str(err))
                return 1
Esempio n. 38
0
    def run(self, infile, outfile, parmdb, sourcedb,
            parsetfile, executable, working_directory, environment, demix_always, demix_if_needed,
            start_time, end_time, nthreads, clobber):
        """
        This function contains all the needed functionality
        """
        # Debugging info
        self.logger.debug("infile          = %s" % infile)
        self.logger.debug("outfile         = %s" % outfile)
        self.logger.debug("parmdb          = %s" % parmdb)
        self.logger.debug("sourcedb        = %s" % sourcedb)
        self.logger.debug("parsetfile      = %s" % parsetfile)
        self.logger.debug("executable      = %s" % executable)
        self.logger.debug("working_dir     = %s" % working_directory)
        self.logger.debug("environment     = %s" % environment)
        self.logger.debug("demix_always    = %s" % demix_always)
        self.logger.debug("demix_if_needed = %s" % demix_if_needed)
        self.logger.debug("start_time      = %s" % start_time)
        self.logger.debug("end_time        = %s" % end_time)
        self.logger.debug("nthreads        = %s" % nthreads)
        self.logger.debug("clobber         = %s" % clobber)

        self.environment.update(environment)

        # ********************************************************************
        # 1. preparations. set nthreads, Validate input, clean workspace
        #           
        if not nthreads:
            nthreads = 1
        if not outfile:
            outfile = infile
        tmpfile = outfile + '.tmp'

        # Time execution of this job
        with log_time(self.logger):
            if os.path.exists(infile):
                self.logger.info("Processing %s" % (infile))
            else:
                self.logger.error("Dataset %s does not exist" % (infile))
                return 1

            # Check if DPPP executable is present
            if not os.access(executable, os.X_OK):
                self.logger.error("Executable %s not found" % executable)
                return 1

            # Make sure that we start with a clean slate
            shutil.rmtree(tmpfile, ignore_errors=True)
            if clobber:
                if outfile == infile:
                    self.logger.warn(
                        "Input and output are identical, not clobbering %s" %
                        outfile
                    )
                else:
                    self.logger.info("Removing previous output %s" % outfile)
                    shutil.rmtree(outfile, ignore_errors=True)

            # *****************************************************************
            # 2. Perform house keeping, test if work is already done
            # If input and output files are different, and if output file
            # already exists, then we're done.
            if outfile != infile and os.path.exists(outfile):
                self.logger.info(
                    "Output file %s already exists. We're done." % outfile
                )
                self.outputs['ok'] = True
                return 0

            # Create a working copy if input and output are identical, to
            # avoid corrupting the original file if things go awry.
            if outfile == infile:
                self.logger.info(
                    "Creating working copy: %s --> %s" % (infile, tmpfile)
                )
                shutil.copytree(infile, tmpfile)

            # *****************************************************************
            # 3. Update the parset with locally calculate information

            # Put arguments we need to pass to some private methods in a dict
            kwargs = {
                'infile' : infile,
                'tmpfile' : tmpfile,
                'parmdb' : parmdb,
                'sourcedb' : sourcedb,
                'parsetfile' : parsetfile,
                'demix_always' : demix_always,
                'demix_if_needed' : demix_if_needed,
                'start_time' : start_time,
                'end_time' : end_time
            }

            # Prepare for the actual DPPP run.
            with patched_parset(
            # *****************************************************************
            # 4. Add ms names to the parset, start/end times if availabe, etc.
            # 5. Add demixing parameters to the parset
                parsetfile, self._prepare_steps(**kwargs), output_dir=working_directory, unlink=False
            ) as temp_parset_filename:

                self.logger.debug("Created temporary parset file: %s" %
                    temp_parset_filename
                )
                try:
                    working_dir = tempfile.mkdtemp(dir=working_directory,suffix=".%s" % (os.path.basename(__file__),))
            # ****************************************************************
            # 6. Run ndppp
                    cmd = [executable, temp_parset_filename, '1']

                    with CatchLog4CPlus(
                        working_dir,
                        self.logger.name + "." + os.path.basename(infile),
                        os.path.basename(executable),
                    ) as logger:
                        # Catch NDPPP segfaults (a regular occurance), and retry

                        catch_segfaults(
                            cmd, working_dir, self.environment, logger,
                            cleanup=lambda : shutil.rmtree(tmpfile, ignore_errors=True)
                        )
                        # Replace outfile with the updated working copy
                        shutil.rmtree(outfile, ignore_errors=True)
                        os.rename(tmpfile, outfile)
                except CalledProcessError, err:
                    # CalledProcessError isn't properly propagated by IPython
                    self.logger.error(str(err))
                    return 1
                except Exception, err:
                    self.logger.error(str(err))
                    return 1
                finally:
Esempio n. 39
0
    def run(self, infile, outfile, parmdb, sourcedb, parsetfile, executable,
            working_directory, environment, demix_always, demix_if_needed,
            start_time, end_time, nthreads, clobber):
        """
        This function contains all the needed functionality
        """
        # Debugging info
        self.logger.debug("infile          = %s" % infile)
        self.logger.debug("outfile         = %s" % outfile)
        self.logger.debug("parmdb          = %s" % parmdb)
        self.logger.debug("sourcedb        = %s" % sourcedb)
        self.logger.debug("parsetfile      = %s" % parsetfile)
        self.logger.debug("executable      = %s" % executable)
        self.logger.debug("working_dir     = %s" % working_directory)
        self.logger.debug("environment     = %s" % environment)
        self.logger.debug("demix_always    = %s" % demix_always)
        self.logger.debug("demix_if_needed = %s" % demix_if_needed)
        self.logger.debug("start_time      = %s" % start_time)
        self.logger.debug("end_time        = %s" % end_time)
        self.logger.debug("nthreads        = %s" % nthreads)
        self.logger.debug("clobber         = %s" % clobber)

        self.environment.update(environment)

        # ********************************************************************
        # 1. preparations. set nthreads, Validate input, clean workspace
        #
        if not nthreads:
            nthreads = 1
        if not outfile:
            outfile = infile
        tmpfile = outfile + '.tmp'

        # Time execution of this job
        with log_time(self.logger):
            if os.path.exists(infile):
                self.logger.info("Processing %s" % (infile))
            else:
                self.logger.error("Dataset %s does not exist" % (infile))
                return 1

            # Check if DPPP executable is present
            if not os.access(executable, os.X_OK):
                self.logger.error("Executable %s not found" % executable)
                return 1

            # Make sure that we start with a clean slate
            shutil.rmtree(tmpfile, ignore_errors=True)
            if clobber:
                if outfile == infile:
                    self.logger.warn(
                        "Input and output are identical, not clobbering %s" %
                        outfile)
                else:
                    self.logger.info("Removing previous output %s" % outfile)
                    shutil.rmtree(outfile, ignore_errors=True)

            # *****************************************************************
            # 2. Perform house keeping, test if work is already done
            # If input and output files are different, and if output file
            # already exists, then we're done.
            if outfile != infile and os.path.exists(outfile):
                self.logger.info("Output file %s already exists. We're done." %
                                 outfile)
                self.outputs['ok'] = True
                return 0

            # Create a working copy if input and output are identical, to
            # avoid corrupting the original file if things go awry.
            if outfile == infile:
                self.logger.info("Creating working copy: %s --> %s" %
                                 (infile, tmpfile))
                shutil.copytree(infile, tmpfile)

            # *****************************************************************
            # 3. Update the parset with locally calculate information

            # Put arguments we need to pass to some private methods in a dict
            kwargs = {
                'infile': infile,
                'tmpfile': tmpfile,
                'parmdb': parmdb,
                'sourcedb': sourcedb,
                'parsetfile': parsetfile,
                'demix_always': demix_always,
                'demix_if_needed': demix_if_needed,
                'start_time': start_time,
                'end_time': end_time
            }

            # Prepare for the actual DPPP run.
            with patched_parset(
                    # *****************************************************************
                    # 4. Add ms names to the parset, start/end times if availabe, etc.
                    # 5. Add demixing parameters to the parset
                    parsetfile,
                    self._prepare_steps(**kwargs),
                    output_dir=working_directory,
                    unlink=False) as temp_parset_filename:

                self.logger.debug("Created temporary parset file: %s" %
                                  temp_parset_filename)
                try:
                    working_dir = tempfile.mkdtemp(
                        dir=working_directory,
                        suffix=".%s" % (os.path.basename(__file__), ))
                    # ****************************************************************
                    # 6. Run ndppp
                    cmd = [executable, temp_parset_filename, '1']

                    with CatchLog4CPlus(
                            working_dir,
                            self.logger.name + "." + os.path.basename(infile),
                            os.path.basename(executable),
                    ) as logger:
                        # Catch NDPPP segfaults (a regular occurance), and retry

                        catch_segfaults(cmd,
                                        working_dir,
                                        self.environment,
                                        logger,
                                        cleanup=lambda: shutil.rmtree(
                                            tmpfile, ignore_errors=True))
                        # Replace outfile with the updated working copy
                        shutil.rmtree(outfile, ignore_errors=True)
                        os.rename(tmpfile, outfile)
                except CalledProcessError as err:
                    # CalledProcessError isn't properly propagated by IPython
                    self.logger.error(str(err))
                    return 1
                except Exception as err:
                    self.logger.error(str(err))
                    return 1
                finally:
                    shutil.rmtree(working_dir)

        # We need some signal to the master script that the script ran ok.
        self.outputs['ok'] = True
        return 0
Esempio n. 40
0
    def run(self,
            infile,
            executable,
            args,
            kwargs,
            work_dir='/tmp',
            parsetasfile=False,
            args_format='',
            environment=''):
        """
        This function contains all the needed functionality
        """
        # Debugging info
        self.logger.debug("infile            = %s" % infile)
        self.logger.debug("executable        = %s" % executable)
        self.logger.debug("working directory = %s" % work_dir)
        self.logger.debug("arguments         = %s" % args)
        self.logger.debug("arg dictionary    = %s" % kwargs)
        self.logger.debug("environment       = %s" % environment)

        self.environment.update(environment)

        # Time execution of this job
        with log_time(self.logger):
            if infile[0] == '[':
                infiles = [ms.strip(" []\'\"") for ms in infile.split(',')]
                reffile = infiles[0]
            else:
                reffile = infile

            if os.path.exists(reffile):
                self.logger.info("Processing %s" % reffile)
            else:
                self.logger.error("Dataset %s does not exist" % reffile)
                return 1

            # Check if executable is present
            if not os.access(executable, os.X_OK):
                self.logger.error("Executable %s not found" % executable)
                return 1

            # race condition when running with more than one process on one filesystem
            if not os.path.isdir(work_dir):
                try:
                    os.mkdir(work_dir, )
                except OSError as exc:  # Python >2.5
                    if exc.errno == errno.EEXIST and os.path.isdir(work_dir):
                        pass
                    else:
                        raise

            if not parsetasfile:
                self.logger.error(
                    "Nodescript \"executable_casa.py\" requires \"parsetasfile\" to be True!"
                )
                return 1
            else:
                nodeparset = Parset()
                sublist = []
                for k, v in kwargs.items():
                    nodeparset.add(k, v)
                    if str(k).find('.'):
                        if not str(k).split('.')[0] in sublist:
                            sublist.append(str(k).split('.')[0])

                #quick hacks below. for proof of concept.
                casastring = ''
                for sub in sublist:
                    subpar = nodeparset.makeSubset(
                        nodeparset.fullModuleName(sub) + '.')
                    casastring = sub + '('
                    for k in subpar.keys():
                        if str(subpar[k]).find('/') == 0:
                            casastring += str(k) + '=' + "'" + str(
                                subpar[k]) + "'" + ','
                        elif str(subpar[k]).find('casastr/') == 0:
                            casastring += str(k) + '=' + "'" + str(
                                subpar[k]).strip('casastr/') + "'" + ','
                        elif str(subpar[k]).lower() == 'false' or str(
                                subpar[k]).lower() == 'true':
                            casastring += str(k) + '=' + str(subpar[k]) + ','
                        else:
                            # Test if int/float or list of int/float
                            try:
                                self.logger.info('value: {}'.format(subpar[k]))
                                test = float(str(subpar[k]))
                                is_int_float = True
                            except:
                                is_int_float = False
                            if is_int_float:
                                casastring += str(k) + '=' + str(
                                    subpar[k]) + ','
                            else:
                                if '[' in str(subpar[k]) or '(' in str(
                                        subpar[k]):
                                    # Check if list of int/float or strings
                                    list_vals = [
                                        f.strip() for f in str(
                                            subpar[k]).strip('[]()').split(',')
                                    ]
                                    is_int_float = True
                                    for list_val in list_vals:
                                        try:
                                            test = float(list_val)
                                        except:
                                            is_int_float = False
                                            break
                                    if is_int_float:
                                        casastring += str(k) + '=' + str(
                                            subpar[k]) + ','
                                    else:
                                        casastring += str(
                                            k) + '=' + '[{}]'.format(','.join([
                                                "'" + list_val + "'"
                                                for list_val in list_vals
                                            ])) + ','
                                else:
                                    # Simple string
                                    casastring += str(k) + '=' + "'" + str(
                                        subpar[k]) + "'" + ','

                    casastring = casastring.rstrip(',')
                    casastring += ')\n'

                # 1) return code of a casapy is not properly recognized by the pipeline
                # wrapping in shellscript works for succesful runs.
                # failed runs seem to hang the pipeline...
                # 2) casapy can not have two instances running from the same directory.
                # create tmp dirs
                casapydir = tempfile.mkdtemp(dir=work_dir)
                if casastring != '':
                    casafilename = os.path.join(
                        work_dir,
                        os.path.basename(reffile) + '.casacommand.py')
                    casacommandfile = open(casafilename, 'w')
                    casacommandfile.write(casastring)
                    casacommandfile.close()
                    args.append(casafilename)

                somename = os.path.join(
                    work_dir,
                    os.path.basename(reffile) + '.casashell.sh')
                commandstring = ''
                commandstring += executable
                for item in args:
                    if str(item).find(' ') > -1 or str(item).find('[') > -1:
                        commandstring += ' "' + item + '"'
                    else:
                        commandstring += ' ' + item

                crap = open(somename, 'w')
                crap.write('#!/bin/bash \n')
                crap.write('echo "Trying CASAPY command" \n')
                crap.write(commandstring + ' >& casa.log\n')
                crap.close()

                # file permissions
                st = os.stat(somename)
                os.chmod(
                    somename,
                    st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)

            try:
                # ****************************************************************
                # Run
                cmd = [somename]
                with CatchLog4CPlus(
                        casapydir,
                        self.logger.name + "." + os.path.basename(reffile),
                        os.path.basename(executable),
                ) as logger:
                    # Catch segfaults and retry
                    catch_segfaults(cmd, casapydir, self.environment, logger)
            except CalledProcessError, err:
                # CalledProcessError isn't properly propagated by IPython
                self.logger.error(str(err))
                return 1
            except Exception, err:
                self.logger.error(str(err))
                return 1
    def run(self,
            infile,
            executable,
            args,
            kwargs,
            work_dir='/tmp',
            parsetasfile=False,
            args_format='',
            environment=''):
        """
        This function contains all the needed functionality
        """
        # Debugging info
        self.logger.debug("infile            = %s" % infile)
        self.logger.debug("executable        = %s" % executable)
        self.logger.debug("working directory = %s" % work_dir)
        self.logger.debug("arguments         = %s" % args)
        self.logger.debug("arg dictionary    = %s" % kwargs)
        self.logger.debug("environment       = %s" % environment)

        self.environment.update(environment)

        # hack the planet
        #executable = 'casa'

        # Time execution of this job
        with log_time(self.logger):
            if os.path.exists(infile):
                self.logger.info("Processing %s" % infile)
            else:
                self.logger.error("Dataset %s does not exist" % infile)
                return 1

            # Check if executable is present
            if not os.access(executable, os.X_OK):
                self.logger.error("Executable %s not found" % executable)
                return 1

            # hurray! race condition when running with than one process on one filesystem
            if not os.path.isdir(work_dir):
                try:
                    os.mkdir(work_dir, )
                except OSError as exc:  # Python >2.5
                    if exc.errno == errno.EEXIST and os.path.isdir(work_dir):
                        pass
                    else:
                        raise

            #print 'KWARGS: ', kwargs
            if not parsetasfile:
                for k, v in kwargs.items():
                    args.append('--' + k + '=' + v)
            else:
                nodeparset = Parset()
                sublist = []
                for k, v in kwargs.items():
                    nodeparset.add(k, v)
                    if str(k).find('.'):
                        #print 'DOTPOS: ',str(k).find('.')
                        #print 'SPLIT: ', str(k).split('.')[0]
                        #print 'SPLIT: ', str(k).split('.')[1]
                        if not str(k).split('.')[0] in sublist:
                            sublist.append(str(k).split('.')[0])
                #print 'SUBPARSETLIST: ', sublist

                #subpar = Parset()
                #quick hacks below. for proof of concept.
                subparsetlist = []
                casastring = ''
                for sub in sublist:
                    subpar = nodeparset.makeSubset(
                        nodeparset.fullModuleName(sub) + '.')
                    #print 'SUBPAR: ',subpar.keys()
                    casastring = sub + '('
                    for k in subpar.keys():
                        #print 'SUBPARSET: ',k ,' ',subpar[k]
                        #args.append('--' + k + '=' + subpar[k])
                        if str(subpar[k]).find('/') == 0:
                            casastring += str(k) + '=' + "'" + str(
                                subpar[k]) + "'" + ','
                        elif str(subpar[k]).find('/casastr/') == 0:
                            casastring += str(k) + '=' + "'" + str(
                                subpar[k]).strip('/casastr/') + "'" + ','
                        else:
                            casastring += str(k) + '=' + str(subpar[k]) + ','
                    casastring = casastring.rstrip(',')
                    casastring += ')\n'
                #print 'CASASTRING:'
                #print casastring
                # 1) return code of a casapy is not properly recognized by the pipeline
                # wrapping in shellscript works for succesful runs.
                # failed runs seem to hang the pipeline...
                # 2) casapy can not have two instances running from the same directory.
                # create tmp dirs
                casapydir = tempfile.mkdtemp(dir=work_dir)
                if casastring != '':
                    casafilename = os.path.join(
                        work_dir,
                        os.path.basename(infile) + '.casacommand.py')
                    casacommandfile = open(casafilename, 'w')
                    casacommandfile.write('try:\n')
                    casacommandfile.write('    ' + casastring)
                    casacommandfile.write('except SystemExit:\n')
                    casacommandfile.write('    pass\n')
                    casacommandfile.write('except:\n')
                    casacommandfile.write('    import os\n')
                    casacommandfile.write('    os._exit(1)\n')
                    casacommandfile.close()
                    args.append(casafilename)
                somename = os.path.join(
                    work_dir,
                    os.path.basename(infile) + '.casashell.sh')
                commandstring = ''
                commandstring += executable
                for item in args:
                    commandstring += ' ' + item

                #print 'COMMANDSTRING: ',commandstring
                crap = open(somename, 'w')
                crap.write('#!/bin/bash \n')
                crap.write('echo "Trying CASAPY command" \n')
                #crap.write('/home/zam/sfroehli/casapy-42.1.29047-001-1-64b/bin/casa' + ' --nologger'+' -c ' + casafilename)
                crap.write(commandstring)
                #                 crap.write('\nexit 0')
                crap.close()

                import stat
                st = os.stat(somename)
                #os.chmod(casafilename, stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
                os.chmod(
                    somename,
                    st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)

            try:
                # ****************************************************************
                # Run
                #cmd = [executable] + args
                cmd = [somename]
                with CatchLog4CPlus(
                        casapydir,
                        self.logger.name + "." + os.path.basename(infile),
                        os.path.basename(executable),
                ) as logger:
                    # Catch segfaults and retry
                    catch_segfaults(cmd, casapydir, self.environment, logger)
            except CalledProcessError, err:
                # CalledProcessError isn't properly propagated by IPython
                self.logger.error(str(err))
                return 1
            except Exception, err:
                self.logger.error(str(err))
                return 1
Esempio n. 42
0
    def run(self, executable, environment, parset, working_directory,
            output_image, concatenated_measurement_set, sourcedb_path,
             mask_patch_size, autogenerate_parameters, specify_fov, fov):
        """
        :param executable: Path to awimager executable
        :param environment: environment for catch_segfaults (executable runner)
        :param parset: parameters for the awimager,
        :param working_directory: directory the place temporary files
        :param output_image: location and filesname to story the output images
          the multiple images are appended with type extentions
        :param concatenated_measurement_set: Input measurement set
        :param sourcedb_path: Path the the sourcedb used to create the image 
          mask
        :param mask_patch_size: Scaling of the patch around the source in the 
          mask
        :param autogenerate_parameters: Turns on the autogeneration of: 
           cellsize, npix, wprojplanes, wmax, fov
        :param fov: if  autogenerate_parameters is false calculate 
           imageparameter (cellsize, npix, wprojplanes, wmax) relative to this 
           fov
        :rtype: self.outputs["image"] The path to the output image
        """
        self.logger.info("Start imager_awimager node run:")
        log4_cplus_name = "imager_awimager"
        self.environment.update(environment)

        with log_time(self.logger):
            # Read the parameters as specified in the parset
            parset_object = get_parset(parset)

            # *************************************************************
            # 1. Calculate awimager parameters that depend on measurement set
            # and the parset

            cell_size, npix, w_max, w_proj_planes = \
                    self._get_imaging_parameters(
                            concatenated_measurement_set,
                            parset,
                            autogenerate_parameters,
                            specify_fov,
                            fov)

            self.logger.info("Using autogenerated parameters; ")
            self.logger.info(
                 "Calculated parameters: cell_size: {0}, npix: {1}".format(
                     cell_size, npix))

            self.logger.info("w_max: {0}, w_proj_planes: {1} ".format(
                        w_max, w_proj_planes))

            # ****************************************************************
            # 2. Get the target image location from the mapfile for the parset.
            # Create target dir if it not exists
            image_path_head = os.path.dirname(output_image)
            create_directory(image_path_head)
            self.logger.debug("Created directory to place awimager output"
                              " files: {0}".format(image_path_head))

            # ****************************************************************
            # 3. Create the mask
            mask_file_path = self._create_mask(npix, cell_size, output_image,
                         concatenated_measurement_set, executable,
                         working_directory, log4_cplus_name, sourcedb_path,
                          mask_patch_size, image_path_head)

            # *****************************************************************
            # 4. Update the parset with calculated parameters, and output image
            patch_dictionary = {'uselogger': 'True',  # enables log4cpluscd log
                               'ms': str(concatenated_measurement_set),
                               'cellsize': str(cell_size),
                               'npix': str(npix),
                               'wmax': str(w_max),
                               'wprojplanes': str(w_proj_planes),
                               'image': str(output_image),
                               'maxsupport': str(npix),
                               # 'mask':str(mask_file_path),  #TODO REINTRODUCE
                               # MASK, excluded to speed up in this debug stage
                               }

            # save the parset at the target dir for the image
            calculated_parset_path = os.path.join(image_path_head,
                                                       "parset.par")

            try:
                temp_parset_filename = patch_parset(parset, patch_dictionary)
                # Copy tmp file to the final location
                shutil.copyfile(temp_parset_filename, calculated_parset_path)
                self.logger.debug("Wrote parset for awimager run: {0}".format(
                                                    calculated_parset_path))
            finally:
                # remove temp file
                os.remove(temp_parset_filename)

            # *****************************************************************
            # 5. Run the awimager with the updated parameterset
            cmd = [executable, calculated_parset_path]
            try:
                with CatchLog4CPlus(working_directory,
                        self.logger.name + "." +
                        os.path.basename(log4_cplus_name),
                        os.path.basename(executable)
                ) as logger:
                    catch_segfaults(cmd, working_directory, self.environment,
                                            logger)

            # Thrown by catch_segfault
            except CalledProcessError, exception:
                self.logger.error(str(exception))
                return 1

            except Exception, exception:
                self.logger.error(str(exception))
                return 1
Esempio n. 43
0
    def run(self, awimager_output, ms_per_image, sourcelist, target,
            output_image, minbaseline, maxbaseline, processed_ms_dir,
            fillrootimagegroup_exec, environment, sourcedb):
        self.environment.update(environment)
        """
        :param awimager_output: Path to the casa image produced by awimager 
        :param ms_per_image: The X (90) measurements set scheduled to 
            create the image
        :param sourcelist: list of sources found in the image 
        :param target: <unused>
        :param minbaseline: Minimum baseline used for the image 
        :param maxbaseline: largest/maximum baseline used for the image
        :param processed_ms_dir: The X (90) measurements set actually used to 
            create the image
        :param fillrootimagegroup_exec: Executable used to add image data to
            the hdf5 image  
                 
        :rtype: self.outputs['hdf5'] set to "succes" to signal node succes
        :rtype: self.outputs['image'] path to the produced hdf5 image
        """
        with log_time(self.logger):
            ms_per_image_map = DataMap.load(ms_per_image)

            # *****************************************************************
            # 1. add image info
            # Get all the files in the processed measurement dir
            file_list = os.listdir(processed_ms_dir)
            # TODO: BUG!! the meta data might contain files that were copied
            # but failed in imager_bbs
            processed_ms_paths = []
            for item in ms_per_image_map:
                path = item.file
                ms_file_name = os.path.split(path)[1]
                #if the ms is in the processed dir (additional check)
                if (ms_file_name in file_list):
                    # save the path
                    processed_ms_paths.append(
                        os.path.join(processed_ms_dir, ms_file_name))
            #add the information the image
            try:
                addimg.addImagingInfo(awimager_output, processed_ms_paths,
                                      sourcedb, minbaseline, maxbaseline)

            except Exception as error:
                self.logger.warn("addImagingInfo Threw Exception:")
                self.logger.warn(error)
                # Catch raising of already done error: allows for rerunning
                # of the recipe
                if "addImagingInfo already done" in str(error):
                    pass
                else:
                    raise Exception(error)
                #The majority of the tables is updated correctly

            # ***************************************************************
            # 2. convert to hdf5 image format
            output_directory = None
            pim_image = pim.image(awimager_output)
            try:
                self.logger.info(
                    "Saving image in HDF5 Format to: {0}".format(output_image))
                # Create the output directory
                output_directory = os.path.dirname(output_image)
                create_directory(output_directory)
                # save the image
                pim_image.saveas(output_image, hdf5=True)

            except Exception as error:
                self.logger.error(
                    "Exception raised inside pyrap.images: {0}".format(
                        str(error)))
                raise error

            # Convert to fits
            # create target location
            fits_output = output_image + ".fits"
            # To allow reruns a possible earlier version needs to be removed!
            # image2fits fails if not done!!
            if os.path.exists(fits_output):
                os.unlink(fits_output)

            try:
                temp_dir = tempfile.mkdtemp(suffix=".%s" %
                                            (os.path.basename(__file__), ))
                with CatchLog4CPlus(
                        temp_dir, self.logger.name + '.' +
                        os.path.basename(awimager_output),
                        "image2fits") as logger:
                    catch_segfaults([
                        "image2fits", '-in', awimager_output, '-out',
                        fits_output
                    ], temp_dir, self.environment, logger)
            except Exception as excp:
                self.logger.error(str(excp))
                return 1
            finally:
                shutil.rmtree(temp_dir)

            # ****************************************************************
            # 3. Filling of the HDF5 root group
            command = [fillrootimagegroup_exec, output_image]
            self.logger.info(" ".join(command))
            #Spawn a subprocess and connect the pipes
            proc = subprocess.Popen(command,
                                    stdin=subprocess.PIPE,
                                    stdout=subprocess.PIPE,
                                    stderr=subprocess.PIPE)

            (stdoutdata, stderrdata) = communicate_returning_strings(proc)

            exit_status = proc.returncode
            self.logger.info(stdoutdata)
            self.logger.info(stderrdata)

            #if copy failed log the missing file
            if exit_status != 0:
                self.logger.error(
                    "Error using the fillRootImageGroup command"
                    "see above lines. Exit status: {0}".format(exit_status))

                return 1

            # *****************************************************************
            # 4 Export the fits image to the msss server
            url = "http://tanelorn.astron.nl:8000/upload"
            try:
                self.logger.info(
                    "Starting upload of fits image data to server!")
                opener = urllib.request.build_opener(mph.MultipartPostHandler)
                filedata = {"file": open(fits_output, "rb")}
                opener.open(url, filedata, timeout=2)

                # HTTPError needs to be caught first.
            except urllib.error.HTTPError as httpe:
                self.logger.warn("HTTP status is: {0}".format(httpe.code))
                self.logger.warn("failed exporting fits image to server")

            except urllib.error.URLError as urle:
                self.logger.warn(str(urle.reason))
                self.logger.warn("failed exporting fits image to server")

            except Exception as exc:
                self.logger.warn(str(exc))
                self.logger.warn("failed exporting fits image to server")

            # *****************************************************************
            # 5. export the sourcelist to the msss server
            url = "http://tanelorn.astron.nl:8000/upload_srcs"
            try:
                # Copy file to output location
                new_sourcelist_path = output_image + ".sourcelist"
                if os.path.exists(new_sourcelist_path):
                    os.unlink(new_sourcelist_path)

                shutil.copy(sourcelist, new_sourcelist_path)
                self.logger.info(
                    "Starting upload of sourcelist data to server!")
                opener = urllib.request.build_opener(mph.MultipartPostHandler)
                filedata = {"file": open(new_sourcelist_path, "rb")}
                opener.open(url, filedata, timeout=2)

                # HTTPError needs to be caught first.
            except urllib.error.HTTPError as httpe:
                self.logger.warn("HTTP status is: {0}".format(httpe.code))
                self.logger.warn("failed exporting sourcelist to server")

            except urllib.error.URLError as urle:
                self.logger.warn(str(urle.reason))
                self.logger.warn("failed exporting sourcelist image to server")

            except Exception as exc:
                self.logger.warn(str(exc))
                self.logger.warn("failed exporting sourcelist image to serve")

            self.outputs["hdf5"] = "succes"
            self.outputs["image"] = output_image

        return 0