示例#1
0
文件: test_celery.py 项目: hughbg/tkp
 def test_local_task_logger(self):
     """
     Logging should also work if you run it locally
     """
     setup_event_listening(celery_app)
     mock_handler = MockLoggingHandler()
     root_logger = logging.getLogger()
     root_logger.setLevel(logging.DEBUG)
     root_logger.addHandler(mock_handler)
     check = {logging.INFO, logging.WARNING, logging.ERROR, logging.DEBUG}
     mock_handler.reset()
     bogus()
     for record in mock_handler.records:
         if record.name == 'tkp.distribute.celery.tasks':
             self.assertTrue(record.levelno in check)
             check.remove(record.levelno)
     self.assertFalse(len(check))
示例#2
0
 def test_local_task_logger(self):
     """
     Logging should also work if you run it locally
     """
     setup_event_listening(celery_app)
     mock_handler = MockLoggingHandler()
     root_logger = logging.getLogger()
     root_logger.setLevel(logging.DEBUG)
     root_logger.addHandler(mock_handler)
     check = {logging.INFO, logging.WARNING, logging.ERROR, logging.DEBUG}
     mock_handler.reset()
     test_log()
     for record in mock_handler.records:
         if record.name == 'tkp.distribute.celery.tasks':
             self.assertTrue(record.levelno in check)
             check.remove(record.levelno)
     self.assertFalse(len(check))
示例#3
0
文件: test_celery.py 项目: hughbg/tkp
    def test_remote_task_logger(self):
        """
        make sure the worker log->event->client log mechanism is working.
        """
        setup_event_listening(celery_app)
        mock_handler = MockLoggingHandler()
        root_logger = logging.getLogger()
        root_logger.setLevel(logging.DEBUG)
        root_logger.addHandler(mock_handler)
        check = {logging.INFO, logging.WARNING, logging.ERROR}
        mock_handler.reset()
        mock_handler.setLevel(logging.INFO)
        result = bogus()

        for record in mock_handler.records:
            if record.name == 'tkp.distribute.celery.tasks':
                self.assertTrue(record.levelno in check)
                check.remove(record.levelno)
        self.assertFalse(len(check))
示例#4
0
    def test_remote_task_logger(self):
        """
        make sure the worker log->event->client log mechanism is working.
        """
        setup_event_listening(celery_app)
        mock_handler = MockLoggingHandler()
        root_logger = logging.getLogger()
        root_logger.setLevel(logging.DEBUG)
        root_logger.addHandler(mock_handler)
        check = {logging.INFO, logging.WARNING, logging.ERROR}
        mock_handler.reset()
        mock_handler.setLevel(logging.INFO)
        result = test_log()

        for record in mock_handler.records:
            if record.name == 'tkp.distribute.celery.tasks':
                self.assertTrue(record.levelno in check)
                check.remove(record.levelno)
        self.assertFalse(len(check))
示例#5
0
config_module = 'celeryconfig'

celery_app = Celery('trap')
# try to load the celery config from the pipeline folder
try:
    celery_app.config_from_object(config_module)
    celery_app.connection()
except ImportError:
    msg = "can't find '%s' in your python path, using default config" % \
          config_module
    warnings.warn(msg)
    local_logger.warn(msg)
    celery_app.config_from_object({})


setup_event_listening(celery_app)


def map(func, iterable, arguments=[]):
    if iterable:
        return group(func.s(i, *arguments) for i in iterable)().get()
    else:
        # group()() returns None if group is called with no arguments,
        # leading to an AttributeError with get().
        return []

def set_cores(cores=0):
    """
    doesn't do anything for celery
    """
    pass
示例#6
0
文件: main.py 项目: hughbg/tkp
def run(job_name, mon_coords, local=False):
    setup_event_listening(celery_app)
    pipe_config = initialize_pipeline_config(
        os.path.join(os.getcwd(), "pipeline.cfg"),
        job_name)

    debug = pipe_config.logging.debug
    #Setup logfile before we do anything else
    log_dir = pipe_config.logging.log_dir
    setup_log_file(log_dir, debug)

    job_dir = pipe_config.DEFAULT.job_directory
    if not os.access(job_dir, os.X_OK):
        msg = "can't access job folder %s" % job_dir
        logger.error(msg)
        raise IOError(msg)
    logger.info("Job dir: %s", job_dir)

    db_config = get_database_config(pipe_config.database, apply=True)
    dump_database_backup(db_config, job_dir)

    job_config = load_job_config(pipe_config)
    se_parset = job_config.source_extraction
    deruiter_radius = job_config.association.deruiter_radius

    all_images = imp.load_source('images_to_process',
                                 os.path.join(job_dir,
                                              'images_to_process.py')).images

    logger.info("dataset %s contains %s images" % (job_name, len(all_images)))

    logger.info("performing database consistency check")
    if not dbconsistency.check():
        logger.error("Inconsistent database found; aborting")
        return 1

    dataset_id = create_dataset(job_config.persistence.dataset_id,
                                job_config.persistence.description)

    if job_config.persistence.dataset_id == -1:
        store_config(job_config, dataset_id)  # new data set
    else:
        job_config_from_db = fetch_config(dataset_id)  # existing data set
        if check_job_configs_match(job_config, job_config_from_db):
            logger.debug("Job configs from file / database match OK.")
        else:
            logger.warn("Job config file has changed since dataset was "
                        "first loaded into database. ")
            logger.warn("Using job config settings loaded from database, see "
                        "log dir for details")
        job_config = job_config_from_db

    dump_configs_to_logdir(log_dir, job_config, pipe_config)

    logger.info("performing persistence step")
    image_cache_params = pipe_config.image_cache
    imgs = [[img] for img in all_images]
    metadatas = runner(tasks.persistence_node_step, imgs, [image_cache_params],
                       local)
    metadatas = [m[0] for m in metadatas]

    logger.info("Storing images")
    image_ids = store_images(metadatas,
                             job_config.source_extraction.extraction_radius_pix,
                             dataset_id)

    db_images = [Image(id=image_id) for image_id in image_ids]

    logger.info("performing quality check")
    urls = [img.url for img in db_images]
    arguments = [job_config]
    rejecteds = runner(tasks.quality_reject_check, urls, arguments, local)

    good_images = []
    for image, rejected in zip(db_images, rejecteds):
        if rejected:
            reason, comment = rejected
            steps.quality.reject_image(image.id, reason, comment)
        else:
            good_images.append(image)

    if not good_images:
        logger.warn("No good images under these quality checking criteria")
        return

    grouped_images = group_per_timestep(good_images)
    timestep_num = len(grouped_images)
    for n, (timestep, images) in enumerate(grouped_images):
        msg = "processing %s images in timestep %s (%s/%s)"
        logger.info(msg % (len(images), timestep, n+1, timestep_num))

        logger.info("performing source extraction")
        urls = [img.url for img in images]
        arguments = [se_parset]
        extract_sources = runner(tasks.extract_sources, urls, arguments, local)

        logger.info("storing extracted to database")
        for image, sources in zip(images, extract_sources):
            dbgen.insert_extracted_sources(image.id, sources, 'blind')

        logger.info("performing database operations")
        for image in images:
            logger.info("performing DB operations for image %s" % image.id)

            logger.info("performing source association")
            dbass.associate_extracted_sources(image.id,
                                              deRuiter_r=deruiter_radius)
            logger.info("performing null detections")
            null_detections = dbnd.get_nulldetections(image.id)
            logger.info("Found %s null detections" % len(null_detections))
            # Only if we found null_detections the next steps are necessary
            if len(null_detections) > 0:
                logger.info("performing forced fits")
                ff_nd = forced_fits(image.url, null_detections, se_parset)
                dbgen.insert_extracted_sources(image.id, ff_nd, 'ff_nd')
                logger.info("adding null detections")
                dbnd.associate_nd(image.id)
            if len(mon_coords) > 0:
                logger.info("performing monitoringlist")
                ff_ms = forced_fits(image.url, mon_coords, se_parset)
                dbgen.insert_extracted_sources(image.id, ff_ms, 'ff_ms')
                logger.info("adding monitoring sources")
                dbmon.associate_ms(image.id)
            transients = search_transients(image.id,
                                           job_config['transient_search'])
        dbgen.update_dataset_process_end_ts(dataset_id)