Ejemplo n.º 1
0
def runApVerify(cmdLine=None):
    """Execute the AP pipeline while handling metrics.

    This is the main function for ``ap_verify``, and handles logging,
    command-line argument parsing, pipeline execution, and metrics
    generation.

    Parameters
    ----------
    cmdLine : `list` of `str`
        an optional command line used to execute `runApVerify` from other
        Python code. If `None`, `sys.argv` will be used.
    """
    lsst.log.configure()
    log = lsst.log.Log.getLogger('ap.verify.ap_verify.main')
    # TODO: what is LSST's policy on exceptions escaping into main()?
    args = _ApVerifyParser().parse_args(args=cmdLine)
    log.debug('Command-line arguments: %s', args)

    workspace = Workspace(args.output)
    ingestDataset(args.dataset, workspace)

    log.info('Running pipeline...')
    expandedDataIds = runApPipe(workspace, args)
    computeMetrics(workspace, expandedDataIds, args)
Ejemplo n.º 2
0
    def configure(self, provSetup=None, workflowVerbosity=None):
        """Prepare a workflow for launching
        Parameters
        ----------
        provSetup : `object`
            A provenance setup object to pass to Configurator instances.
        workflowVerbosity : `int`
            The logging verbosity level to set for workflows

        Returns
        -------
        WorkflowLauncher
        """
        log.debug("WorkflowManager:configure")
        if self._workflowConfigurator:
            log.info("production has already been configured.")
            return

        # lock this branch of code
        try:
            self._locked.acquire()

            self._workflowConfigurator = self.createConfigurator(
                self.runid, self.repository, self.name, self.wfConfig, self.prodConfig)
            self._workflowLauncher = self._workflowConfigurator.configure(provSetup, workflowVerbosity)
        finally:
            self._locked.release()

        # do specialized workflow level configuration here, this may include
        # calling ProvenanceSetup.getWorkflowCommands()
        return self._workflowLauncher
Ejemplo n.º 3
0
    def _stitch_exposures(self, dest_wcs, dest_bbox, expo_list, coadd_config,
                          warper):
        """Return an exposure matching the dest_wcs and dest_bbox that is
            composed of
        pixels from the exposures in expo_list. Uses coadd_utils.Coadd.
        dest_wcs     - WCS object for the destination exposure.
        dest_bbox    - Bounding box for the destination exposure.
        expo_list    - List of exposures to combine to form destination
                       exposure.
        coadd_config - configuration for Coadd
        warper      - Warper to use when warping images.
        All exposures need valid WCS.
        """
        coadd = Coadd.fromConfig(bbox=dest_bbox,
                                 wcs=dest_wcs,
                                 config=coadd_config)
        for j, expo in enumerate(expo_list):
            warped_exposure = warper.warpExposure(destWcs=coadd.getWcs(),
                                                  srcExposure=expo,
                                                  maxBBox=coadd.getBBox())
            log.info("warp{}".format(j))
            j += 1
            coadd.addExposure(warped_exposure)

        return coadd.getCoadd()
Ejemplo n.º 4
0
    def testMdcInit(self):

        expected_msg = \
            "INFO  - main thread {{MDC_INIT,OK}}\n" + \
            "INFO  - thread 1 {{MDC_INIT,OK}}\n" + \
            "INFO  - thread 2 {{MDC_INIT,OK}}\n"

        with TestLog.StdoutCapture(self.outputFilename):

            self.configure("""
log4j.rootLogger=DEBUG, CA
log4j.appender.CA=ConsoleAppender
log4j.appender.CA.layout=PatternLayout
log4j.appender.CA.layout.ConversionPattern=%-5p - %m %X%n
""")

            def fun():
                log.MDC("MDC_INIT", "OK")

            log.MDCRegisterInit(fun)

            log.info("main thread")

            thread = threading.Thread(target=lambda: log.info("thread 1"))
            thread.start()
            thread.join()

            thread = threading.Thread(target=lambda: log.info("thread 2"))
            thread.start()
            thread.join()

        self.check(expected_msg)

        log.MDCRemove("MDC_INIT")
Ejemplo n.º 5
0
def insert_extra_package_metadata(job, config):
    """Insert metadata for extra packages ('--package-repos') into
    ``Job.meta['packages']``.
    """
    log = lsst.log.Log.getLogger(
        'verify.bin.dispatchverify.insert_extra_package_metadata')

    if 'packages' not in job.meta:
        job.meta['packages'] = dict()

    for package_path in config.extra_package_paths:
        log.info('Inserting extra package metadata: {0}'.format(package_path))
        package_name = package_path.split(os.sep)[-1]

        package = {'name': package_name}

        if git is not None:
            git_repo = git.Repo(package_path)
            package['git_sha'] = git_repo.active_branch.commit.hexsha
            package['git_branch'] = git_repo.active_branch.name
            package['git_url'] = git_repo.remotes.origin.url

        if package_name in job.meta['packages']:
            # Update pre-existing package metadata
            job.meta['packages'][package_name].update(package)
        else:
            # Create new package metadata
            job.meta['packages'][package_name] = package

    return job
Ejemplo n.º 6
0
    def testRedir(self):
        """
        Test redirection to stream.
        """
        with TestRedir.StdoutCapture(self.outputFilename):
            log.configure()
            dest = io.StringIO()
            log_utils.enable_notebook_logging(dest)
            log.log(log.getDefaultLogger().getName(), log.INFO, "This is INFO")
            log.info(u"This is unicode INFO")
            log.trace("This is TRACE")
            log.debug("This is DEBUG")
            log.warn("This is WARN")
            log.error("This is ERROR")
            log.fatal("This is FATAL")
            log_utils.disable_notebook_logging()
            log.warn("Format %d %g %s", 3, 2.71828, "foo")
        self.assertEqual(
            dest.getvalue(),
            """root INFO: This is INFO
root INFO: This is unicode INFO
root WARN: This is WARN
root ERROR: This is ERROR
root FATAL: This is FATAL
""",
        )
        self.check(
            """
root WARN: Format 3 2.71828 foo
"""
        )
Ejemplo n.º 7
0
def runApVerify(cmdLine=None):
    """Execute the AP pipeline while handling metrics.

    This is the main function for ``ap_verify``, and handles logging,
    command-line argument parsing, pipeline execution, and metrics
    generation.

    Parameters
    ----------
    cmdLine : `list` of `str`
        an optional command line used to execute `runApVerify` from other
        Python code. If `None`, `sys.argv` will be used.

    Returns
    -------
    nFailed : `int`
        The number of data IDs that were not successfully processed, up to 127,
        or 127 if the task runner framework failed.
    """
    _configure_logger()
    log = _LOG.getChild('main')
    # TODO: what is LSST's policy on exceptions escaping into main()?
    args = _ApVerifyParser().parse_args(args=cmdLine)
    log.debug('Command-line arguments: %s', args)

    workspace = WorkspaceGen3(args.output)
    ingestDatasetGen3(args.dataset, workspace, processes=args.processes)
    log.info('Running pipeline...')
    # Gen 3 pipeline includes both AP and metrics
    return runApPipeGen3(workspace, args, processes=args.processes)
Ejemplo n.º 8
0
def scaleVariance(maskedImage, maskPlanes, log=None):
    """!
    \brief Scale the variance in a maskedImage

    The variance plane in a convolved or warped image (or a coadd derived
    from warped images) does not accurately reflect the noise properties of
    the image because variance has been lost to covariance. This function
    attempts to correct for this by scaling the variance plane to match
    the observed variance in the image. This is not perfect (because we're
    not tracking the covariance) but it's simple and is often good enough.

    @param maskedImage  MaskedImage to operate on; variance will be scaled
    @param maskPlanes  List of mask planes for pixels to reject
    @param log  Log for reporting the renormalization factor; or None
    @return renormalisation factor
    """
    variance = maskedImage.getVariance()
    sigNoise = maskedImage.getImage().getArray() / numpy.sqrt(
        variance.getArray())
    maskVal = maskedImage.getMask().getPlaneBitMask(maskPlanes)
    good = (maskedImage.getMask().getArray() & maskVal) == 0
    # Robust measurement of stdev
    q1, q3 = numpy.percentile(sigNoise[good], (25, 75))
    stdev = 0.74 * (q3 - q1)
    ratio = stdev**2
    if log:
        log.info("Renormalizing variance by %f" % (ratio, ))
    variance *= ratio
    return ratio
    def test_readInFits(self):
        credFile = os.path.expanduser('~/.mysqlAuthLSST')
        if not os.path.isfile(credFile):
            log.warn("Required file with credentials '%s' not found.", credFile)
            return

        testFile = ("./tests/testData/imsim_886258731_R33_S21_C12_E000.fits.gz")
        self.assertTrue(isFitsExt('stuf.fits'))
        self.assertFalse(isFitsExt('thing.txt'))
        self.assertFalse(isFitsExt('item.tx.gz'))
        self.assertTrue(isFitsExt(testFile))
        self.assertTrue(isFits(testFile))

        # Destroy existing tables and re-create them
        dbDestroyCreate(credFile, "DELETE")

        # Open a connection to the database.
        metadataFits = MetadataFitsDb(credFile)

        # test a specific file
        self.assertFalse(metadataFits.isFileInDb(testFile))
        metadataFits.insertFile(testFile)
        log.info(metadataFits.showColumnsInTables())
        self.assertTrue(metadataFits.isFileInDb(testFile))

        # test crawler
        rootDir = '~/test_md'
        rootDir = os.path.expanduser(rootDir)
        if not os.path.exists(rootDir):
            log.error("Data directory {} is required".format(rootDir))
            return
        directoryCrawl(rootDir, metadataFits)
    def configure(self, workflowVerbosity=None):
        if self._productionRunConfigurator:
            log.info("production has already been configured.")
            return
        
        # lock this branch of code
        try:
            self._locked.acquire()

            # TODO - SRP
            self._productionRunConfigurator = self.createConfigurator(self.runid,
                                                                     self.fullConfigFilePath)
            workflowManagers = self._productionRunConfigurator.configure(workflowVerbosity)

            self._workflowManagers = { "__order": [] }
            for wfm in workflowManagers:
                self._workflowManagers["__order"].append(wfm)
                self._workflowManagers[wfm.getName()] = wfm

            
            loggerManagers = self._productionRunConfigurator.getLoggerManagers()
            for lm in loggerManagers:
                self._loggerManagers.append(lm)

        finally:
            self._locked.release()
Ejemplo n.º 11
0
def _image_cutout(_request, image_db_class, units):
    """Get a raw image response from based on imput parameters.
    image_db_class should be the appropriate class (W13DeepCoadDb, W13RawDb, etc.)
    units should be 'pixel' or 'arcsecond'
    """
    ra = _request.args.get('ra')
    dec = _request.args.get('dec')
    filter = _request.args.get('filter')
    width = _request.args.get('width')
    height = _request.args.get('height')
    # check inputs
    try:
        ra, dec, filter = _assert_ra_dec_filter(ra, dec, filter, 'irg')
        try:
            width = float(width)
            height = float(height)
        except ValueError:
            msg = "INVALID_INPUT width={} height={}".format(width, height)
            raise ValueError(msg)
    except ValueError as e:
        return _error(ValueError.__name__, e.args[0], BAD_REQUEST)

    log.info("raw cutout pixel ra={} dec={} filter={} width={} height={}".format(
             ra, dec, filter, width, height))
    # fetch the image here
    img_getter = image_open(current_app.config["DAX_IMG_DBCONF"], image_db_class)
    img = img_getter.image_cutout(ra, dec, filter, width, height, units)
    if img is None:
        return _image_not_found()
    log.debug("Sub w={} h={}".format(img.getWidth(), img.getHeight()))
    return _file_response(img, "cutout.fits")
Ejemplo n.º 12
0
    def testBasic(self):
        """
        Test basic log output with default configuration.
        Since the default threshold is INFO, the DEBUG or TRACE
        message is not emitted.
        """
        with TestLog.StdoutCapture(self.outputFilename):
            log.configure()
            log.log(log.getDefaultLogger(), log.INFO, "This is INFO")
            log.info(u"This is unicode INFO")
            log.trace("This is TRACE")
            log.debug("This is DEBUG")
            log.warn("This is WARN")
            log.error("This is ERROR")
            log.fatal("This is FATAL")
            log.critical("This is CRITICAL")
            log.warning("Format %d %g %s", 3, 2.71828, "foo")
        self.check("""
root INFO: This is INFO
root INFO: This is unicode INFO
root WARN: This is WARN
root ERROR: This is ERROR
root FATAL: This is FATAL
root FATAL: This is CRITICAL
root WARN: Format 3 2.71828 foo
""")
Ejemplo n.º 13
0
def process_one(filename, write=False, quiet=False):
    """Convert one file in-place from Jy (or no units) to nJy fluxes.

    Parameters
    ----------
    filename : `str`
        The file to convert.
    write : `bool`, optional
        Write the converted catalog out, overwriting the read in catalog?
    quiet : `bool`, optional
        Do not print messages about files read/written or fields found?
    """
    log = lsst.log.Log()
    if quiet:
        log.setLevel(lsst.log.WARN)

    log.info(f"Reading: {filename}")
    catalog = lsst.afw.table.SimpleCatalog.readFits(filename)

    output = convertToNanojansky(catalog, log, doConvert=write)

    if write:
        addRefCatMetadata(output)
        output.writeFits(filename)
        log.info(f"Wrote: {filename}")
Ejemplo n.º 14
0
    def test_readInFits(self):
        credFile = os.path.expanduser('~/.mysqlAuthLSST')
        if not os.path.isfile(credFile):
            log.warn("Required file with credentials '%s' not found.",
                     credFile)
            return

        testFile = (
            "./tests/testData/imsim_886258731_R33_S21_C12_E000.fits.gz")
        self.assertTrue(isFitsExt('stuf.fits'))
        self.assertFalse(isFitsExt('thing.txt'))
        self.assertFalse(isFitsExt('item.tx.gz'))
        self.assertTrue(isFitsExt(testFile))
        self.assertTrue(isFits(testFile))

        # Destroy existing tables and re-create them
        dbDestroyCreate(credFile, "DELETE")

        # Open a connection to the database.
        metadataFits = MetadataFitsDb(credFile)

        # test a specific file
        self.assertFalse(metadataFits.isFileInDb(testFile))
        metadataFits.insertFile(testFile)
        log.info(metadataFits.showColumnsInTables())
        self.assertTrue(metadataFits.isFileInDb(testFile))

        # test crawler
        rootDir = '~/test_md'
        rootDir = os.path.expanduser(rootDir)
        if not os.path.exists(rootDir):
            log.error("Data directory {} is required".format(rootDir))
            return
        directoryCrawl(rootDir, metadataFits)
Ejemplo n.º 15
0
    def testMdcInit(self):

        expected_msg = \
            "INFO  - main thread {{MDC_INIT,OK}}\n" + \
            "INFO  - thread 1 {{MDC_INIT,OK}}\n" + \
            "INFO  - thread 2 {{MDC_INIT,OK}}\n"

        with TestLog.StdoutCapture(self.outputFilename):

            self.configure("""
log4j.rootLogger=DEBUG, CA
log4j.appender.CA=ConsoleAppender
log4j.appender.CA.layout=PatternLayout
log4j.appender.CA.layout.ConversionPattern=%-5p - %m %X%n
""")

            def fun():
                log.MDC("MDC_INIT", "OK")
            log.MDCRegisterInit(fun)

            log.info("main thread")

            thread = threading.Thread(target=lambda: log.info("thread 1"))
            thread.start()
            thread.join()

            thread = threading.Thread(target=lambda: log.info("thread 2"))
            thread.start()
            thread.join()

        self.check(expected_msg)

        log.MDCRemove("MDC_INIT")
Ejemplo n.º 16
0
def replace_root(filename, old_root, new_root):
    """Replace the original gen2 root with the new one

    Parameters
    ----------
    filename : `str`
        the path of the file in which to replace the root
    old_root : `str`
        the original fully qualified path of the gen2 repo root
    new_root : `str`
        the new fully qualified path of the gen2 repo root
    """
    log = lsst.log.Log.getLogger("convertRepo")

    # Not attempting to deal with parsing the yaml, because
    # that would require building a special purpose constructor
    # and emitter for RepositoryCfg_v1, which would only
    # make it more fragile.

    with open(filename, "r") as f:
        config_str = f.read()

    new_config_str = config_str.replace(old_root, new_root)
    if new_config_str == config_str:
        log.info(f"No change in {filename}")
    else:
        _break_hardlink(filename)
        with open(filename, "w") as f:
            f.write(new_config_str)
        log.info(f"{filename} updated")
Ejemplo n.º 17
0
def move_files(calib_root, policy_file=POLICY_FILE):
    """Copy files from a DESC DC2 gen2 origin repo to one with modified names
    to change parsed filter names to match that expected by the gen3 butler.

    Parameters
    ----------
    calib_root : str
        full path to the calib gen2 (POSIX) filestore
    policy_file : str
        Policy file from which to load templates

    """
    log = lsst.log.Log.getLogger("convertRepo")
    policy = Policy(policy_file)
    for data_type in DATATYPES_TO_CONVERT:

        if data_type == "SKY":
            template = policy["calibrations"]["sky"]["template"]
            template = template.replace("sky", "SKY")
        else:
            template = policy["calibrations"][data_type]["template"]

        for old_filename, new_filename in _transform_pairs(
                calib_root, template):
            log.info("Moving %s to %s", old_filename, new_filename)
            new_dir = os.path.split(new_filename)[0]
            try:
                os.makedirs(new_dir)
            except FileExistsError:
                pass
            shutil.move(old_filename, new_filename)
Ejemplo n.º 18
0
    def getSourcesAndCoeffs(self):
        """Read crosstalk sources and coefficients from DECam-specific file.

        Returns
        -------
        sources : `defaultdict`
            Sources of crosstalk read from crosstalk_file.
        coeffs : `dict`
            Linear crosstalk coefficients corresponding to sources.
        """
        crosstalk_file = self.getSourcesAndCoeffsFile()
        sources = defaultdict(list)
        coeffs = {}
        log = lsst.log.Log.getLogger('obs.decam.DecamCrosstalkConfig')
        log.info('Reading crosstalk coefficient data')
        with open(crosstalk_file) as f:
            for line in f:
                li = line.strip()
                if not li.startswith('#'):
                    elem = li.split()
                    # The xtalk file has image areas like 'ccd01A'; preserve only '01A'
                    sources[elem[0][3:]].append(elem[1][3:])
                    # The linear crosstalk coefficients
                    coeffs[(elem[0][3:], elem[1][3:])] = float(elem[2])
        return sources, coeffs
Ejemplo n.º 19
0
def _getIFull(_request, W13db):
    ''' Get a full image from the input paramters.
    W13db should be the appropriate class (W13DeepCoadDb, W13RawDb, etc.)
    '''
    raIn = _request.args.get('ra')
    decIn = _request.args.get('dec')
    filt = _request.args.get('filter')

    # check inputs
    valid, ra, dec, filt, msg = checkRaDecFilter(raIn, decIn, filt, 'irg')
    if not valid:
        # TODO: use HTTP errors DM-1980
        resp = "INVALID_INPUT {}".format(msg)
        return resp
    log.info("raw ra={} dec={} filt={}".format(ra, dec, filt))
    # fetch the image here
    w13db = dbOpen("~/.lsst/dbAuth-dbServ.ini", W13db)
    imgFull = w13db.getImageFull(ra, dec)
    if imgFull is None:
        return _imageNotFound()
    log.debug("Full w=%d h=%d", imgFull.getWidth(), imgFull.getHeight())
    tmpPath = tempfile.mkdtemp()
    fileName = os.path.join(tmpPath, "fullImage.fits")
    log.info("temporary fileName=%s", fileName)
    imgFull.writeFits(fileName)
    w13db.closeConnection()
    resp = responseFile(fileName)
    os.remove(fileName)
    os.removedirs(tmpPath)
    return resp
Ejemplo n.º 20
0
    def getCcdImage(self, ccd, imageFactory=afwImage.ImageF, binSize=1):
        """Return an image of the specified ccd, and also the (possibly updated) ccd"""

        log = lsst.log.Log.getLogger("afw.cameraGeom.utils.ButlerImage")

        if self.isTrimmed:
            bbox = ccd.getBBox()
        else:
            bbox = calcRawCcdBBox(ccd)

        im = None
        if self.butler is not None:
            e = None
            if self.type == "calexp":    # reading the exposure can die if the PSF's unknown
                try:                     # need to switch to cid as below.  RHL has no calexp to test with
                    fileName = self.butler.get(self.type + "_filename", ccd=ccd.getId(),
                                               **self.kwargs)[0]
                    im = imageFactory(fileName)
                except Exception as e:
                    pass
            else:
                im = None
                for cid in [ccd.getId(), ccd.getName()]:
                    try:
                        im = self.butler.get(self.type, ccd=cid, **self.kwargs)
                        ccd = im.getDetector()  # possibly modified by assembleCcdTask
                        e = None
                        break
                    except Exception as e:
                        continue

                if im:
                    im = im.getMaskedImage().getImage()
                else:
                    raise e

            if e:
                if self.verbose:
                    log.info("Reading %s: %s" % (ccd.getId(), e))

        if im is None:
            return self._prepareImage(ccd, imageFactory(*bbox.getDimensions()), binSize), ccd

        if self.type == "raw":
            allowRotate = True          # all other images were rotated by the ISR
            if hasattr(im, 'convertF'):
                im = im.convertF()
            if False and self.callback is None:   # we need to trim the raw image
                self.callback = rawCallback

        if self.callback:
            try:
                im = self.callback(im, ccd, imageSource=self)
            except Exception as e:
                if self.verbose:
                    log.error("callback failed: %s" % e)
                im = imageFactory(*bbox.getDimensions())

        return self._prepareImage(ccd, im, binSize, allowRotate=allowRotate), ccd
Ejemplo n.º 21
0
def update_registry(gen2_root):
    """Update a gen2 registry to include new columns

    Parameters
    ----------
    gen2_root : str
        full path to the calib gen2 (POSIX) filestore
    """
    log = lsst.log.Log.getLogger("convertRepo")

    registry_filename = os.path.join(gen2_root, "registry.sqlite3")
    _break_hardlink(registry_filename)

    with closing(sqlite3.connect(registry_filename)) as con:
        raw_columns = [c[1] for c in con.execute("PRAGMA table_info(raw);")]

        if "controller" not in raw_columns:
            with con:
                con.execute("ALTER TABLE raw ADD COLUMN controller TEXT;")
                con.execute("UPDATE raw SET controller = 'S'")
                log.info(
                    f"Added 'controller' column to raw table of {registry_filename}"
                )

        if "obsid" not in raw_columns:
            with con:
                con.execute("ALTER TABLE raw ADD COLUMN obsid TEXT;")
                con.execute("UPDATE raw SET obsid=visit")
                log.info(
                    f"Added 'obsid' column to raw table of {registry_filename}"
                )

        if "expGroup" not in raw_columns:
            with con:
                con.execute("ALTER TABLE raw ADD COLUMN expGroup TEXT;")
                con.execute("UPDATE raw SET expGroup=CAST(visit AS TEXT)")
                log.info(
                    f"Added 'expGroup' column to raw table of {registry_filename}"
                )

        if "expId" not in raw_columns:
            with con:
                con.execute("ALTER TABLE raw ADD COLUMN expId INT;")
                con.execute("UPDATE raw SET expId=visit")
                log.info(
                    f"Added 'expId' column to raw table of {registry_filename}"
                )

        uraw_index_query = (
            "SELECT * FROM sqlite_master WHERE type='index' AND name='u_raw';")
        has_uraw_index = len([r for r in con.execute(uraw_index_query)]) > 0
        if not has_uraw_index:
            with con:
                con.execute(
                    "CREATE UNIQUE INDEX u_raw ON raw (expId, detector, visit);"
                )
                log.info(
                    f"Added 'u_raw' index to raw table of {registry_filename}")
Ejemplo n.º 22
0
    def stopProduction(self, urgency, timeout=1800):
        """Stops all workflows in this production run

        Parameters
        ----------
        urgency : `int`
            An indicator of how urgently to carry out the shutdown.
        timeout : `int`
            An time to wait (in sec nds) for workflows to finish.

        Returns
        -------
        success : `bool`
            True on successful shutdown of workflows, False otherwise.

        Notes
        -----
        For urgency, it is intended that recognized values should be:

        FINISH_PENDING_DATA - end after all currently available data has been processed
        END_ITERATION       - end after the current data looping iteration
        CHECKPOINT          - end at next checkpoint opportunity (typically between stages)
        NOW                 - end as soon as possible, foregoing any check-pointing
        """
        if not self.isRunning():
            log.info("shutdown requested when production is not running")
            return

        log.info("Shutting down production (urgency=%s)" % urgency)

        for workflow in self._workflowManagers["__order"]:
            workflowMgr = self._workflowManagers[workflow.getName()]
            workflowMgr.stopWorkflow(urgency)

        pollintv = 0.2
        running = self.isRunning()
        lasttime = time.time()
        while running and timeout > 0:
            time.sleep(pollintv)
            for workflow in self._workflowManagers["__order"]:
                running = self._workflowManagers[
                    workflow.getName()].isRunning()
                if running:
                    break
            timeout -= time.time() - lasttime
        if not running:
            with self._locked:
                self._locked.running = False
                self._locked.done = True
        else:
            log.debug("Failed to shutdown pipelines within timeout: %ss" %
                      timeout)
            return False

        return True
Ejemplo n.º 23
0
def main():
    # Use logger "main"
    logger = log.getLogger("main")
    logger.info("In main")
    visits = [12345, 67890, 27182, 31415]
    pool = mp.Pool(processes=2)
    pool.map_async(a, [(visit, logger) for visit in visits])
    pool.close()
    pool.join()
    b(logger)
    log.info("Leaving main")
Ejemplo n.º 24
0
    def testPattern(self):
        """
        Test a complex pattern for log messages, including Mapped
        Diagnostic Context (MDC).
        """
        with TestLog.StdoutCapture(self.outputFilename):
            self.configure("""
log4j.rootLogger=DEBUG, CA
log4j.appender.CA=ConsoleAppender
log4j.appender.CA.layout=PatternLayout
log4j.appender.CA.layout.ConversionPattern=%-5p %c %C %M (%F:%L) %l - %m - %X%n
""")
            log.trace("This is TRACE")
            log.info("This is INFO")
            log.debug("This is DEBUG")

            log.MDC("x", 3)
            log.MDC("y", "foo")
            log.MDC("z", TestLog)

            log.trace("This is TRACE 2")
            log.info("This is INFO 2")
            log.debug("This is DEBUG 2")
            log.MDCRemove("z")

            with log.LogContext("component"):
                log.trace("This is TRACE 3")
                log.info("This is INFO 3")
                log.debug("This is DEBUG 3")
                log.MDCRemove("x")
                log.trace("This is TRACE 4")
                log.info("This is INFO 4")
                log.debug("This is DEBUG 4")

            log.trace("This is TRACE 5")
            log.info("This is INFO 5")
            log.debug("This is DEBUG 5")

            log.MDCRemove("y")

        # Use format to make line numbers easier to change.
        self.check("""
INFO  root  testPattern (test_log.py:{0[0]}) test_log.py({0[0]}) - This is INFO - {{}}
DEBUG root  testPattern (test_log.py:{0[1]}) test_log.py({0[1]}) - This is DEBUG - {{}}
INFO  root  testPattern (test_log.py:{0[2]}) test_log.py({0[2]}) - This is INFO 2 - {{{{x,3}}{{y,foo}}{{z,<class '{1}.TestLog'>}}}}
DEBUG root  testPattern (test_log.py:{0[3]}) test_log.py({0[3]}) - This is DEBUG 2 - {{{{x,3}}{{y,foo}}{{z,<class '{1}.TestLog'>}}}}
INFO  component  testPattern (test_log.py:{0[4]}) test_log.py({0[4]}) - This is INFO 3 - {{{{x,3}}{{y,foo}}}}
DEBUG component  testPattern (test_log.py:{0[5]}) test_log.py({0[5]}) - This is DEBUG 3 - {{{{x,3}}{{y,foo}}}}
INFO  component  testPattern (test_log.py:{0[6]}) test_log.py({0[6]}) - This is INFO 4 - {{{{y,foo}}}}
DEBUG component  testPattern (test_log.py:{0[7]}) test_log.py({0[7]}) - This is DEBUG 4 - {{{{y,foo}}}}
INFO  root  testPattern (test_log.py:{0[8]}) test_log.py({0[8]}) - This is INFO 5 - {{{{y,foo}}}}
DEBUG root  testPattern (test_log.py:{0[9]}) test_log.py({0[9]}) - This is DEBUG 5 - {{{{y,foo}}}}
""".format([x + 209 for x in (0, 1, 8, 9, 14, 15, 18, 19, 22, 23)],
           __name__))  # noqa E501 line too long
Ejemplo n.º 25
0
    def _createWorkflowLauncher(self):
        """Create the workflow launcher

        Notes
        -----
        This abstract method must be overridden; otherwise an exception is raised
        """

        msg = 'called "abstract" WorkflowConfigurator._createWorkflowLauncher'
        log.info(msg)
        raise RuntimeError(msg)
Ejemplo n.º 26
0
    def _createWorkflowLauncher(self):
        """Create the workflow launcher

        Notes
        -----
        This abstract method must be overridden; otherwise an exception is raised
        """

        msg = 'called "abstract" WorkflowConfigurator._createWorkflowLauncher'
        log.info(msg)
        raise RuntimeError(msg)
Ejemplo n.º 27
0
def main():
    # Set component to "main" (from "root")
    with log.LogContext("main"):
        log.info("In main")
        visits = [12345, 67890, 27182, 31415]
        pool = mp.Pool(processes=2)
        pool.map_async(a, visits)
        pool.close()
        pool.join()
        b()
        log.info("Leaving main")
Ejemplo n.º 28
0
def a(visit):
    # Set subcomponent to "a" (sets component to "main.a")
    with log.LogContext("a"):
        # Clean out any previous MDC for visit
        log.MDCRemove("visit")
        # All subsequent log messages will have visit id
        log.MDC("visit", visit)
        log.info("In a, %d", visit)
        log.debug("Debug message in a")
        b()
        log.info("Leaving a")
Ejemplo n.º 29
0
def runApPipeGen2(workspace, parsedCmdLine, processes=1):
    """Run `ap_pipe` on this object's dataset.

    Parameters
    ----------
    workspace : `lsst.ap.verify.workspace.WorkspaceGen2`
        The abstract location containing input and output repositories.
    parsedCmdLine : `argparse.Namespace`
        Command-line arguments, including all arguments supported by `ApPipeParser`.
    processes : `int`
        The number of processes with which to call the AP pipeline

    Returns
    -------
    apPipeReturn : `Struct`
        The `Struct` returned from `~lsst.ap.pipe.ApPipeTask.parseAndRun` with
        ``doReturnResults=False``. This object is valid even if
        `~lsst.ap.pipe.ApPipeTask` was never run.
    """
    log = lsst.log.Log.getLogger('ap.verify.pipeline_driver.runApPipeGen2')

    makeApdb(_getApdbArguments(workspace, parsedCmdLine))

    pipelineArgs = [
        workspace.dataRepo, "--output", workspace.outputRepo, "--calib",
        workspace.calibRepo, "--template", workspace.templateRepo
    ]
    pipelineArgs.extend(_getConfigArguments(workspace, parsedCmdLine))
    if parsedCmdLine.dataIds:
        for singleId in parsedCmdLine.dataIds:
            pipelineArgs.extend(["--id", *singleId.split(" ")])
    else:
        pipelineArgs.extend(["--id"])
    pipelineArgs.extend(["--processes", str(processes)])
    pipelineArgs.extend(["--noExit"])

    if not parsedCmdLine.skip_pipeline:
        results = apPipe.ApPipeTask.parseAndRun(pipelineArgs)
        log.info('Pipeline complete')
    else:
        log.info('Skipping AP pipeline entirely.')
        apPipeParser = apPipe.ApPipeTask._makeArgumentParser()
        apPipeParsed = apPipeParser.parse_args(
            config=apPipe.ApPipeTask.ConfigClass(), args=pipelineArgs)
        results = pipeBase.Struct(
            argumentParser=apPipeParser,
            parsedCmd=apPipeParsed,
            taskRunner=apPipe.ApPipeTask.RunnerClass(
                TaskClass=apPipe.ApPipeTask, parsedCmd=apPipeParsed),
            resultList=[],
        )

    return results
Ejemplo n.º 30
0
def _getISkyMapDeepCoaddCutout(_request, units):
    '''Get a stitched together deepCoadd image from /lsst/releaseW13EP deepCoadd_skyMap
    '''
    source = _request.args.get("source", None)
    if not source:
        # Use a default
        source = current_app.config["dax.imgserv.default_source"]

    # Be safe and encode source to utf8, just in case
    source = source.encode('utf8')
    log.debug("Using filesystem source: " + source)

    mapType = "deepCoadd_skyMap"
    patchType = "deepCoadd"

    raIn = _request.args.get('ra')
    decIn = _request.args.get('dec')
    filt = _request.args.get('filter')
    widthIn = _request.args.get('width')
    heightIn = _request.args.get('height')
    # check inputs - Many valid filter names are unknown and can't be checked.
    valid, ra, dec, msg = checkRaDec(raIn, decIn)
    if not valid:
        msg = "INVALID_INPUT {}".format(msg)
        return _error(ValueError.__name__, msg, BAD_REQUEST)
    try:
        width = float(widthIn)
        height = float(heightIn)
        # The butler isn't fond of unicode in this case.
        filt = filt.encode('ascii')
    except ValueError:
        msg = "INVALID_INPUT width={} height={}".format(widthIn, heightIn)
        return _error(ValueError.__name__, msg, BAD_REQUEST)
    log.info("skymap cutout pixel ra={} dec={} filt={} width={} height={}".format(
            ra, dec, filt, width, height))
    # fetch the image here
    raA = afwGeom.Angle(ra, afwGeom.degrees)
    decA = afwGeom.Angle(dec, afwGeom.degrees)
    ctrCoord = afwCoord.Coord(raA, decA, 2000.0)
    try:
        expo = getSkyMap(ctrCoord, int(width), int(height), filt, units, source, mapType, patchType)
    except RuntimeError as e:
        return _error("RuntimeError", e.message, INTERNAL_SERVER_ERROR)
    if expo is None:
        return _imageNotFound()
    tmpPath = tempfile.mkdtemp()
    fileName = os.path.join(tmpPath, "cutout.fits")
    log.info("temporary fileName=%s", fileName)
    expo.writeFits(fileName)
    resp = responseFile(fileName)
    os.remove(fileName)
    os.removedirs(tmpPath)
    return resp
Ejemplo n.º 31
0
    def stopProduction(self, urgency, timeout=1800):
        """Stops all workflows in this production run

        Parameters
        ----------
        urgency : `int`
            An indicator of how urgently to carry out the shutdown.
        timeout : `int`
            An time to wait (in sec nds) for workflows to finish.

        Returns
        -------
        success : `bool`
            True on successful shutdown of workflows, False otherwise.

        Notes
        -----
        For urgency, it is intended that recognized values should be:

        FINISH_PENDING_DATA - end after all currently available data has been processed
        END_ITERATION       - end after the current data looping iteration
        CHECKPOINT          - end at next checkpoint opportunity (typically between stages)
        NOW                 - end as soon as possible, foregoing any check-pointing
        """
        if not self.isRunning():
            log.info("shutdown requested when production is not running")
            return

        log.info("Shutting down production (urgency=%s)" % urgency)

        for workflow in self._workflowManagers["__order"]:
            workflowMgr = self._workflowManagers[workflow.getName()]
            workflowMgr.stopWorkflow(urgency)

        pollintv = 0.2
        running = self.isRunning()
        lasttime = time.time()
        while running and timeout > 0:
            time.sleep(pollintv)
            for workflow in self._workflowManagers["__order"]:
                running = self._workflowManagers[workflow.getName()].isRunning()
                if running:
                    break
            timeout -= time.time() - lasttime
        if not running:
            with self._locked:
                self._locked.running = False
                self._locked.done = True
        else:
            log.debug("Failed to shutdown pipelines within timeout: %ss" % timeout)
            return False

        return True
Ejemplo n.º 32
0
    def testPattern(self):
        """
        Test a complex pattern for log messages, including Mapped
        Diagnostic Context (MDC).
        """
        with TestLog.StdoutCapture(self.outputFilename):
            self.configure("""
log4j.rootLogger=DEBUG, CA
log4j.appender.CA=ConsoleAppender
log4j.appender.CA.layout=PatternLayout
log4j.appender.CA.layout.ConversionPattern=%-5p %c %C %M (%F:%L) %l - %m - %X%n
""")
            log.trace("This is TRACE")
            log.info("This is INFO")
            log.debug("This is DEBUG")

            log.MDC("x", 3)
            log.MDC("y", "foo")
            log.MDC("z", TestLog)

            log.trace("This is TRACE 2")
            log.info("This is INFO 2")
            log.debug("This is DEBUG 2")
            log.MDCRemove("z")

            with log.LogContext("component"):
                log.trace("This is TRACE 3")
                log.info("This is INFO 3")
                log.debug("This is DEBUG 3")
                log.MDCRemove("x")
                log.trace("This is TRACE 4")
                log.info("This is INFO 4")
                log.debug("This is DEBUG 4")

            log.trace("This is TRACE 5")
            log.info("This is INFO 5")
            log.debug("This is DEBUG 5")

            log.MDCRemove("y")

        # Use format to make line numbers easier to change.
        self.check("""
INFO  root  testPattern (testLog.py:{0[0]}) testLog.py({0[0]}) - This is INFO - {{}}
DEBUG root  testPattern (testLog.py:{0[1]}) testLog.py({0[1]}) - This is DEBUG - {{}}
INFO  root  testPattern (testLog.py:{0[2]}) testLog.py({0[2]}) - This is INFO 2 - {{{{x,3}}{{y,foo}}{{z,<class '{1}.TestLog'>}}}}
DEBUG root  testPattern (testLog.py:{0[3]}) testLog.py({0[3]}) - This is DEBUG 2 - {{{{x,3}}{{y,foo}}{{z,<class '{1}.TestLog'>}}}}
INFO  component  testPattern (testLog.py:{0[4]}) testLog.py({0[4]}) - This is INFO 3 - {{{{x,3}}{{y,foo}}}}
DEBUG component  testPattern (testLog.py:{0[5]}) testLog.py({0[5]}) - This is DEBUG 3 - {{{{x,3}}{{y,foo}}}}
INFO  component  testPattern (testLog.py:{0[6]}) testLog.py({0[6]}) - This is INFO 4 - {{{{y,foo}}}}
DEBUG component  testPattern (testLog.py:{0[7]}) testLog.py({0[7]}) - This is DEBUG 4 - {{{{y,foo}}}}
INFO  root  testPattern (testLog.py:{0[8]}) testLog.py({0[8]}) - This is INFO 5 - {{{{y,foo}}}}
DEBUG root  testPattern (testLog.py:{0[9]}) testLog.py({0[9]}) - This is DEBUG 5 - {{{{y,foo}}}}
""".format([x + 180 for x in (0, 1, 8, 9, 14, 15, 18, 19, 22, 23)], __name__))  # noqa line too long
    def _newTask(self, pipeline, taskName, label=None, obsPkg=None, camera=None):
        """Append new task to a pipeline.

        Parameters
        ----------
        pipeline : py:class:`Pipeline`
            Pipeline instance.
        taskName : `str`
            Name of the new task, can be either full class name including
            package and module, or just a class name to be searched in
            known packages and modules.
        label : `str`, optional
            Label for new task, if `None` the n task class name is used as
            label.
        obsPkg : `str`, optional
            Name of the package to look for task overrides, if None then
            overrides are not used.
        camera : `str`, optional
            Camera name, used for camera-specific overrides an only if
            `obsPkg` is not `None`.
        """
        # load task class, will throw on errors
        taskClass, taskName = self.taskFactory.loadTaskClass(taskName)

        # get label and check that it is unique
        if not label:
            label = taskName.rpartition('.')[2]
        if pipeline.labelIndex(label) >= 0:
            raise LookupError("Task label (or name) is not unique: " + label)

        # make config instance with defaults
        config = taskClass.ConfigClass()

        # apply camera/package overrides
        if obsPkg:
            obsPkgDir = lsst.utils.getPackageDir(obsPkg)
            configName = taskClass._DefaultName
            fileName = configName + ".py"
            overrides = ConfigOverrides()
            for filePath in (
                os.path.join(obsPkgDir, "config", fileName),
                os.path.join(obsPkgDir, "config", camera, fileName),
            ):
                if os.path.exists(filePath):
                    lsstLog.info("Loading config overrride file %r", filePath)
                    overrides.addFileOverride(filePath)
                else:
                    lsstLog.debug("Config override file does not exist: %r", filePath)

            overrides.applyTo(config)

        pipeline.append(TaskDef(taskName=taskName, config=config,
                                taskClass=taskClass, label=label))
Ejemplo n.º 34
0
Archivo: test_log.py Proyecto: lsst/log
    def testMdcUpdate(self):
        """Test for overwriting MDC.
        """

        expected_msg = \
            "INFO  - Message one {}\n" \
            "INFO  - Message two {{LABEL,123456}}\n" \
            "INFO  - Message three {{LABEL,654321}}\n" \
            "INFO  - Message four {}\n"

        with TestLog.StdoutCapture(self.outputFilename):

            self.configure("""
log4j.rootLogger=DEBUG, CA
log4j.appender.CA=ConsoleAppender
log4j.appender.CA.layout=PatternLayout
log4j.appender.CA.layout.ConversionPattern=%-5p - %m %X%n
""")

            log.info("Message one")

            log.MDC("LABEL", "123456")
            log.info("Message two")

            log.MDC("LABEL", "654321")
            log.info("Message three")

            log.MDCRemove("LABEL")
            log.info("Message four")

        self.check(expected_msg)
Ejemplo n.º 35
0
    def testMdcUpdate(self):
        """Test for overwriting MDC.
        """

        expected_msg = \
            "INFO  - Message one {}\n" \
            "INFO  - Message two {{LABEL,123456}}\n" \
            "INFO  - Message three {{LABEL,654321}}\n" \
            "INFO  - Message four {}\n"

        with TestLog.StdoutCapture(self.outputFilename):

            self.configure("""
log4j.rootLogger=DEBUG, CA
log4j.appender.CA=ConsoleAppender
log4j.appender.CA.layout=PatternLayout
log4j.appender.CA.layout.ConversionPattern=%-5p - %m %X%n
""")

            log.info("Message one")

            log.MDC("LABEL", "123456")
            log.info("Message two")

            log.MDC("LABEL", "654321")
            log.info("Message three")

            log.MDCRemove("LABEL")
            log.info("Message four")

        self.check(expected_msg)
Ejemplo n.º 36
0
    def stopWorkflow(self, urgency):
        """Stop the workflow

        Parameters
        ----------
        urgency : `int`
            urgency at which to shut down this workflow
        """

        log.debug("WorkflowManager:stopWorkflow")
        if self._monitor:
            self._monitor.stopWorkflow(urgency)
        else:
            log.info("Workflow %s is not running" % self.name)
Ejemplo n.º 37
0
def runApPipeGen3(workspace, parsedCmdLine, processes=1):
    """Run `ap_pipe` on this object's dataset.

    Parameters
    ----------
    workspace : `lsst.ap.verify.workspace.WorkspaceGen3`
        The abstract location containing input and output repositories.
    parsedCmdLine : `argparse.Namespace`
        Command-line arguments, including all arguments supported by `ApPipeParser`.
    processes : `int`
        The number of processes with which to call the AP pipeline
    """
    log = lsst.log.Log.getLogger('ap.verify.pipeline_driver.runApPipeGen3')

    makeApdb(_getApdbArguments(workspace, parsedCmdLine))

    pipelineArgs = [
        "run",
        "--butler-config",
        workspace.repo,
        "--pipeline",
        parsedCmdLine.pipeline,
    ]
    # TODO: collections should be determined exclusively by Workspace.workButler,
    # but I can't find a way to hook that up to the graph builder. So use the CLI
    # for now and revisit once DM-26239 is done.
    pipelineArgs.extend(_getCollectionArguments(workspace))
    pipelineArgs.extend(_getConfigArgumentsGen3(workspace, parsedCmdLine))
    if parsedCmdLine.dataIds:
        for singleId in parsedCmdLine.dataIds:
            pipelineArgs.extend(["--data-query", singleId])
    pipelineArgs.extend(["--processes", str(processes)])
    pipelineArgs.extend(["--register-dataset-types"])

    if not parsedCmdLine.skip_pipeline:
        # CliRunner is an unsafe workaround for DM-26239
        runner = click.testing.CliRunner()
        # TODO: generalize this code in DM-26028
        # TODO: work off of workspace.workButler after DM-26239
        results = runner.invoke(lsst.ctrl.mpexec.cli.pipetask.cli,
                                pipelineArgs)
        if results.exception:
            raise RuntimeError("Pipeline failed.") from results.exception

        log.info('Pipeline complete.')
        return results.exit_code
    else:
        log.info('Skipping AP pipeline entirely.')
Ejemplo n.º 38
0
def _image_from_data_id(_request, image_db_class):
    """ Get a full image from the field ids given.
    image_db_class should be the appropriate class (W13DeepCoadDb, W13RawDb, etc.)
    """
    # fetch the image here
    img_getter = image_open(current_app.config["DAX_IMG_DBCONF"], image_db_class)
    ids, valid_ids = img_getter.data_id_from_request(_request)
    log.info("valid={} id {}".format(valid_ids, ids))
    if not valid_ids:
        resp = "INVALID_INPUT {}".format(ids)
        return resp
    full_img = img_getter.image_by_data_id(ids)
    if full_img is None:
        return _image_not_found()
    log.debug("Full w=%d h=%d", full_img.getWidth(), full_img.getHeight())
    return _file_response(full_img, "full_image.fits")
    def stopProduction(self, urgency, timeout=1800):
        # urgency - an indicator of how urgently to carry out the shutdown.  
        #
        # Recognized values are: 
        #   FINISH_PENDING_DATA - end after all currently available data has 
        #                         been processed 
        #   END_ITERATION       - end after the current data ooping iteration 
        #   CHECKPOINT          - end at next checkpoint opportunity 
        #                         (typically between stages) 
        #   NOW                 - end as soon as possible, forgoing any 
        #                         checkpointing
        if not self.isRunning():
            log.info("shutdown requested when production is not running")
            return
        
        log.info("Shutting down production (urgency=%s)" % urgency)

        for workflow in self._workflowManagers["__order"]:
            workflowMgr = self._workflowManagers[workflow.getName()]
            workflowMgr.stopWorkflow(urgency)


        pollintv = 0.2
        running = self.isRunning()
        lasttime = time.time()
        while running and timeout > 0:
            time.sleep(pollintv)
            for workflow in self._workflowManagers["__order"]:
                running = self._workflowManagers[workflow.getName()].isRunning()
                if running:  break
            timeout -= time.time() - lasttime
        if not running:
            with self._locked:
                self._locked.running = False
                self._locked.done = True
        else:
            log.debug("Failed to shutdown pipelines within timeout: %ss" % timeout)
            return False



        # stop loggers after everything else has died
        for lm in self._loggerManagers:
            lm.stop()

        return True
Ejemplo n.º 40
0
    def testFileAppender(self):
        """Test configuring logging to go to a file."""
        self.configure("""
log4j.rootLogger=DEBUG, FA
log4j.appender.FA=FileAppender
log4j.appender.FA.file={0}
log4j.appender.FA.layout=SimpleLayout
""")
        log.MDC("x", 3)
        log.trace("This is TRACE")
        log.info("This is INFO")
        log.debug("This is DEBUG")
        log.MDCRemove("x")

        self.check("""
INFO - This is INFO
DEBUG - This is DEBUG
""")
Ejemplo n.º 41
0
Archivo: test_log.py Proyecto: lsst/log
    def testFileAppender(self):
        """Test configuring logging to go to a file."""
        self.configure("""
log4j.rootLogger=DEBUG, FA
log4j.appender.FA=FileAppender
log4j.appender.FA.file={0}
log4j.appender.FA.layout=SimpleLayout
""")
        log.MDC("x", 3)
        log.trace("This is TRACE")
        log.info("This is INFO")
        log.debug("This is DEBUG")
        log.MDCRemove("x")

        self.check("""
INFO - This is INFO
DEBUG - This is DEBUG
""")
    def configure(self, provSetup=None, workflowVerbosity=None):
        log.debug("WorkflowManager:configure")
        if self._workflowConfigurator:
            log.info("production has already been configured.")
            return
        
        # lock this branch of code
        try:
            self._locked.acquire()

            self._workflowConfigurator = self.createConfigurator(self.runid, self.repository, self.name, self.wfConfig, self.prodConfig)
            self._workflowLauncher = self._workflowConfigurator.configure(provSetup, workflowVerbosity)
        finally:
            self._locked.release()

        # do specialized workflow level configuration here, this may include
        # calling ProvenanceSetup.getWorkflowCommands()
        return self._workflowLauncher
Ejemplo n.º 43
0
Archivo: test_log.py Proyecto: lsst/log
    def testMDCPutPid(self):
        """
        Test add of PID Mapped Diagnostic Context (MDC).
        """
        pid = os.fork()
        try:

            log.MDC("PID", os.getpid())
            self.configure("""
log4j.rootLogger=DEBUG, CA
log4j.appender.CA=ConsoleAppender
log4j.appender.CA.layout=PatternLayout
log4j.appender.CA.layout.ConversionPattern=%-5p PID:%X{{PID}} %c %C %M (%F:%L) %l - %m%n
""")  # noqa E501 line too long
            self.assertGreaterEqual(pid, 0, "Failed to fork")

            msg = "This is INFO"
            if pid == 0:
                self.tempDir = tempfile.mkdtemp()
                self.outputFilename = os.path.join(self.tempDir,
                                                   "log-child.out")
                msg += " in child process"
            elif pid > 0:
                child_pid, child_status = os.wait()
                self.assertEqual(child_status, 0,
                                 "Child returns incorrect code")
                msg += " in parent process"

            with TestLog.StdoutCapture(self.outputFilename):
                log.info(msg)
                line = 226  # line number for previous line
        finally:
            log.MDCRemove("PID")

        # Use format to make line numbers easier to change.
        self.check("""
INFO  PID:{1} root  testMDCPutPid (test_log.py:{0}) test_log.py({0}) - {2}
""".format(line, os.getpid(), msg))

        # don't pass other tests in child process
        if pid == 0:
            os._exit(0)
    def testBrokerOption(self):
        testEnv = TestEnvironment()
        topic = testEnv.getLoggingTopic()
        confStr = "log4j.rootLogger=TRACE, EA\n"
        confStr += "log4j.appender.EA=EventAppender\n"
        confStr += "log4j.appender.EA.BROKER="+testEnv.getBroker()+"\n"
        confStr += "log4j.appender.EA.TOPIC="+topic+"\n"

        self.configure(confStr)

        recv = events.EventReceiver(testEnv.getBroker(), topic)
        log.MDC("x", 3)
        with log.LogContext("component"):
            log.trace("This is TRACE")
            log.info("This is INFO")
            log.debug("This is DEBUG")
        log.MDCRemove("x")
        self.assertValidMessage(recv.receiveEvent(), "This is TRACE")
        self.assertValidMessage(recv.receiveEvent(), "This is INFO")
        self.assertValidMessage(recv.receiveEvent(), "This is DEBUG")
Ejemplo n.º 45
0
    def testBasic(self):
        """
        Test basic log output.  Since the default threshold is INFO, the
        TRACE message is not emitted.
        """
        with TestLog.StdoutCapture(self.outputFilename):
            log.configure()
            log.trace("This is TRACE")
            log.info("This is INFO")
            log.debug("This is DEBUG")
            log.warn("This is WARN")
            log.error("This is ERROR")
            log.fatal("This is FATAL")
            log.info("Format %d %g %s", 3, 2.71828, "foo")
        self.check("""
 INFO root null - This is INFO
 DEBUG root null - This is DEBUG
 WARN root null - This is WARN
 ERROR root null - This is ERROR
 FATAL root null - This is FATAL
 INFO root null - Format 3 2.71828 foo
""")
Ejemplo n.º 46
0
    def testBrokerAlternateTopicOption(self):
        testEnv = EventsEnvironment()
        topic = platform.node() + "_" + str(os.getpid())
        confStr = "log4j.rootLogger=TRACE, EA\n"
        confStr += "log4j.appender.EA=EventAppender\n"
        confStr += "log4j.appender.EA.BROKER="+testEnv.getBroker()+"\n"
        confStr += "log4j.appender.EA.PORT="+str(testEnv.getPort())+"\n"
        confStr += "log4j.appender.EA.TOPIC="+topic+"\n"

        self.configure(confStr)

        recv = events.EventReceiver(testEnv.getBroker(), topic)

        log.MDC("x", 3)
        with log.LogContext("component"):
            log.trace("This is TRACE")
            log.info("This is INFO")
            log.debug("This is DEBUG")
        log.MDCRemove("x")
        self.assertValidMessage(recv.receiveEvent(), "This is TRACE")
        self.assertValidMessage(recv.receiveEvent(), "This is INFO")
        self.assertValidMessage(recv.receiveEvent(), "This is DEBUG")
    def runWorkflow(self, statusListener, loggerManagers):
        log.debug("WorkflowManager:runWorkflow")

        if not self.isRunnable():
            if self.isRunning():
                log.info("Workflow %s is already running" % self.runid)
            if self.isDone():
                log.info("Workflow %s has already run; start with new runid" % self.runid)
            return False

        try:
            self._locked.acquire()

            if self._workflowConfigurator == None:
                self._workflowLauncher = self.configure()
            self._monitor = self._workflowLauncher.launch(statusListener, loggerManagers)
            
            # self.cleanUp()

        finally:
            self._locked.release()
        return self._monitor
Ejemplo n.º 48
0
def _getICutout(_request, W13db, units):
    '''Get a raw image from based on imput parameters.
    W13db should be the appropriate class (W13DeepCoadDb, W13RawDb, etc.)
    units should be 'pixel' or 'arcsecond'
    '''
    raIn = _request.args.get('ra')
    decIn = _request.args.get('dec')
    filt = _request.args.get('filter')
    widthIn = _request.args.get('width')
    heightIn = _request.args.get('height')
    # check inputs
    valid, ra, dec, filt, msg = checkRaDecFilter(raIn, decIn, filt, 'irg')
    if not valid:
        return _error(ValueError.__name__, msg, BAD_REQUEST)
    try:
        width = float(widthIn)
        height = float(heightIn)
    except ValueError:
        msg = "INVALID_INPUT width={} height={}".format(widthIn, heightIn)
        return _error(ValueError.__name__, msg, BAD_REQUEST)
    log.info("raw cutout pixel ra={} dec={} filt={} width={} height={}".format(
            ra, dec, filt, width, height))

    # fetch the image here
    w13db = dbOpen("~/.lsst/dbAuth-dbServ.ini", W13db)
    img = w13db.getImage(ra, dec, width, height, units)
    if img is None:
        return _imageNotFound()
    log.debug("Sub w={} h={}".format(img.getWidth(), img.getHeight()))
    tmpPath = tempfile.mkdtemp()
    fileName = os.path.join(tmpPath, "cutout.fits")
    log.info("temporary fileName=%s", fileName)
    img.writeFits(fileName)
    w13db.closeConnection()
    resp = responseFile(fileName)
    os.remove(fileName)
    os.removedirs(tmpPath)
    return resp
Ejemplo n.º 49
0
    def configure(self, workflowVerbosity=None):
        """Configure this production run

        Parameters
        ----------
        workflowVerbosity : `int`
            The verbosity to pass down to configured workflows and the pipelines they run.

        Raises
        ------
        `ConfigurationError`
            If any error arises during configuration or while checking the configuration.

        Notes
        -----
        If the production was already configured, this call will be ignored and will not be reconfigured.
        """

        if self._productionRunConfigurator:
            log.info("production has already been configured.")
            return

        # lock this branch of code
        try:
            self._locked.acquire()

            # TODO - SRP
            self._productionRunConfigurator = self.createConfigurator(self.runid,
                                                                      self.fullConfigFilePath)
            workflowManagers = self._productionRunConfigurator.configure(workflowVerbosity)

            self._workflowManagers = {"__order": []}
            for wfm in workflowManagers:
                self._workflowManagers["__order"].append(wfm)
                self._workflowManagers[wfm.getName()] = wfm

        finally:
            self._locked.release()
Ejemplo n.º 50
0
    def testBasic(self):
        """
        Test basic log output with default configuration.
        Since the default threshold is INFO, the DEBUG or TRACE
        message is not emitted.
        """
        with TestLog.StdoutCapture(self.outputFilename):
            log.configure()
            log.log(log.getDefaultLoggerName(), log.INFO, "This is INFO")
            log.info(u"This is unicode INFO")
            log.trace("This is TRACE")
            log.debug("This is DEBUG")
            log.warn("This is WARN")
            log.error("This is ERROR")
            log.fatal("This is FATAL")
            log.warn("Format %d %g %s", 3, 2.71828, "foo")
        self.check("""
root INFO: This is INFO
root INFO: This is unicode INFO
root WARN: This is WARN
root ERROR: This is ERROR
root FATAL: This is FATAL
root WARN: Format 3 2.71828 foo
""")
Ejemplo n.º 51
0
    def runWorkflow(self, statusListener):
        """ setup, launch and monitor a workflow to its completion, and then clean up
        """
        log.debug("WorkflowManager:runWorkflow")

        if not self.isRunnable():
            if self.isRunning():
                log.info("Workflow %s is already running" % self.runid)
            if self.isDone():
                log.info("Workflow %s has already run; start with new runid" % self.runid)
            return False

        try:
            self._locked.acquire()

            if self._workflowConfigurator is None:
                self._workflowLauncher = self.configure()
            self._monitor = self._workflowLauncher.launch(statusListener)

            self.cleanUp()

        finally:
            self._locked.release()
        return self._monitor
    def testRunidSelector(self):
        testEnv = TestEnvironment()
        topic = testEnv.getLoggingTopic()
        confStr = "log4j.rootLogger=TRACE, EA\n"
        confStr += "log4j.appender.EA=EventAppender\n"
        confStr += "log4j.appender.EA.BROKER="+testEnv.getBroker()+"\n"
        confStr += "log4j.appender.EA.RUNID="+str(os.getpid())+"\n";
        confStr += "log4j.appender.EA.TOPIC="+topic+"\n"

        self.configure(confStr)

        # receive for all events
        recvALL = events.EventReceiver(testEnv.getBroker(), topic)

        # receive for events for this runid
        recv = events.EventReceiver(testEnv.getBroker(), topic, "RUNID = '%s'" % str(os.getpid()))

        # send log messages
        log.MDC("x", 3)
        with log.LogContext("component"):
            log.trace("This is TRACE")
            log.info("This is INFO")
            log.debug("This is DEBUG")
        log.MDCRemove("x")

        # make sure we got all the events we should have
        self.assertValidMessage(recv.receiveEvent(), "This is TRACE")
        self.assertValidMessage(recv.receiveEvent(), "This is INFO")
        self.assertValidMessage(recv.receiveEvent(), "This is DEBUG")

        # make sure we didn't get any more than we should have
        ev = recv.receiveEvent(100)
        self.assertIsNone(ev)

        # reconfigure with a new run id
        confStr2 = "log4j.rootLogger=TRACE, EA\n"
        confStr2 += "log4j.appender.EA=EventAppender\n"
        confStr2 += "log4j.appender.EA.BROKER="+testEnv.getBroker()+"\n"
        confStr2 += "log4j.appender.EA.RUNID="+"blah_"+str(os.getpid())+"\n";
        confStr2 += "log4j.appender.EA.TOPIC="+topic+"\n"
        self.configure(confStr2)

        # set up a receiver for the new run id
        recv2 = events.EventReceiver(testEnv.getBroker(), topic, "RUNID = 'blah_%s'" % str(os.getpid()))

        # send log messages
        log.MDC("x", 3)
        with log.LogContext("component"):
            log.trace("This is TRACE")
            log.info("This is INFO")
            log.debug("This is DEBUG")
        log.MDCRemove("x")

        # make sure we didn't receive any events from another run id
        ev = recv.receiveEvent(100)
        self.assertIsNone(ev)

        # make sure we got all the events we should have
        self.assertValidMessage(recv2.receiveEvent(), "This is TRACE")
        self.assertValidMessage(recv2.receiveEvent(), "This is INFO")
        self.assertValidMessage(recv2.receiveEvent(), "This is DEBUG")

        # make sure we didn't get any more than we should have
        ev = recv2.receiveEvent(100)
        self.assertIsNone(ev)

        # make sure we got all the events, for all messages on this topic
        self.assertValidMessage(recvALL.receiveEvent(), "This is TRACE")
        self.assertValidMessage(recvALL.receiveEvent(), "This is INFO")
        self.assertValidMessage(recvALL.receiveEvent(), "This is DEBUG")

        self.assertValidMessage(recvALL.receiveEvent(), "This is TRACE")
        self.assertValidMessage(recvALL.receiveEvent(), "This is INFO")
        self.assertValidMessage(recvALL.receiveEvent(), "This is DEBUG")

        # make sure we didn't get any more than we should have
        ev = recvALL.receiveEvent(100)
        self.assertIsNone(ev)
 def stopWorkflow(self, urgency):
     log.debug("WorkflowManager:stopWorkflow")
     if self._monitor:
         self._monitor.stopWorkflow(urgency)
     else:
         log.info("Workflow %s is not running" % self.name)
    def runProduction(self, skipConfigCheck=False, workflowVerbosity=None):
        log.debug("Running production: " + self.runid)

        if not self.isRunnable():
            if self.isRunning():
                log.info("Production Run %s is already running" % self.runid)
            if self.isDone():
                log.info("Production Run %s has already run; start with new runid" % self.runid)
            return False

        # set configuration check care level.
        # Note: this is not a sanctioned pattern; should be replaced with use
        # of default config.
        checkCare = 1

        if self.config.production.configCheckCare != 0:
            checkCare = self.config.production.configCheckCare
        if checkCare < 0:
            skipConfigCheck = True
        

        # lock this branch of code
        try:
            self._locked.acquire()
            self._locked.running = True

            # configure the production run (if it hasn't been already)
            if not self._productionRunConfigurator:
                self.configure(workflowVerbosity)

            # make sure the configuration was successful.
            if not self._workflowManagers:
                raise ConfigurationError("Failed to obtain workflowManagers from configurator")


            if skipConfigCheck == False:
                self.checkConfiguration(checkCare)

            # launch the logger daemon
            for lm in self._loggerManagers:
                lm.start()

            # TODO - Re-add when Provenance is complete
            #provSetup = self._productionRunConfigurator.getProvenanceSetup()
            ## 
            #provSetup.recordProduction()

            for workflow in self._workflowManagers["__order"]:
                mgr = self._workflowManagers[workflow.getName()]

                statusListener = StatusListener()
                # this will block until the monitor is created.
                monitor = mgr.runWorkflow(statusListener, self._loggerManagers)
                self._workflowMonitors.append(monitor)

        finally:
            self._locked.release()

        # start the thread that will listen for shutdown events
        if self.config.production.productionShutdownTopic != None:
            self._startShutdownThread()

        # announce data, if it's available
        #print "waiting for startup"
        #time.sleep(5)
        #for workflow in self._workflowManagers["__order"]:
        #    mgr = self._workflowManagers[workflow.getName()]
        #    print "mgr = ",mgr
        #    mgr.announceData()
        print "Production launched."
        print "Waiting for shutdown request."