Пример #1
0
    def test_intermittentToMonitorlist(self):
        dataset = tkp.db.DataSet(database=self.database, data={'description': "Monlist:" + self._testMethodName})
        n_images = 3
        im_params = db_subs.example_dbimage_datasets(n_images)

        steady_srcs = []
        # We will work with 2 sources per image
        # one being detected in all images and not in the monlist
        # the second having a null-detection in the second image
        # and stored in the monlist
        n_steady_srcs = 2
        for i in range(n_steady_srcs):
            src = db_subs.example_extractedsource_tuple()
            src = src._replace(ra=src.ra + 2 * i)
            steady_srcs.append(src)

        for idx, im in enumerate(im_params):
            image = tkp.db.Image(database=self.database, dataset=dataset, data=im)

            if idx == 1:
                # The second image has a null detection, so only the first source is detected
                image.insert_extracted_sources(steady_srcs[0:1])
            else:
                image.insert_extracted_sources(steady_srcs)

            # First, we check for null detections
            nd = monitoringlist.get_nulldetections(image.id, deRuiter_r=3.717)

            if idx == 0:
                self.assertEqual(len(nd), 0)
            elif idx == 1:
                self.assertEqual(len(nd), 1)
                # The null detection is found,
                # We simulate the forced fit result back into extractedsource
                # Check that the null-detection ra is the ra of source two
                self.assertEqual(nd[0][0], steady_srcs[1].ra)
                #print "nd=",nd
                tuple_ff_nd = steady_srcs[1:2]
                monitoringlist.insert_forcedfits_into_extractedsource(image.id, tuple_ff_nd, 'ff_nd')
            elif idx == 2:
                self.assertEqual(len(nd), 0)

            # Secondly, we do the source association
            dbass.associate_extracted_sources(image.id, deRuiter_r=3.717)
            monitoringlist.add_nulldetections(image.id)
            # We also need to run the transient search in order to pick up the variable
            # eta_lim, V_lim, prob_threshold, minpoints, resp.
            transients = tr_search.multi_epoch_transient_search(image.id,
                                                     0.0,
                                                     0.0,
                                                     0.5,
                                                     1)

            # Adjust (insert/update/remove) transients in monlist as well
            monitoringlist.adjust_transients_in_monitoringlist(image.id,
                                                               transients)

        # So after the three images have been processed,
        # We should have the null-detection source in the monlist

        # Get the null detection in extractedsource
        # These are of extract_type = 1
        query = """\
        select x.id
          from extractedsource x
              ,image i
         where x.image = i.id
           and i.dataset = %s
           and x.extract_type = 1
        """
        self.database.cursor.execute(query, (dataset.id,))
        result = zip(*self.database.cursor.fetchall())
        null_det = result[0]
        self.assertEqual(len(null_det), 1)

        query = """\
        select a.runcat
              ,a.xtrsrc
              ,r.wm_ra
              ,r.wm_decl
          from assocxtrsource a
              ,extractedsource x
              ,image i
              ,runningcatalog r
         where a.xtrsrc = x.id
           and x.id = %s
           and x.image = i.id
           and i.dataset = %s
           and a.runcat = r.id
           and r.dataset = i.dataset
        """
        self.database.cursor.execute(query, (null_det[0], dataset.id,))
        result = zip(*self.database.cursor.fetchall())
        assocruncat = result[0]
        xtrsrc = result[1]
        wm_ra = result[2]
        wm_decl = result[3]
        self.assertEqual(len(assocruncat), 1)

        query = """\
        SELECT runcat
              ,ra
              ,decl
          FROM monitoringlist
         WHERE dataset = %s
        """
        self.database.cursor.execute(query, (dataset.id,))
        result = zip(*self.database.cursor.fetchall())
#        print "len(result)=",len(result)
        self.assertEqual(len(result), 3)
        #self.assertEqual(0, 1)
        monruncat = result[0]
        ra = result[1]
        decl = result[2]
        self.assertEqual(len(monruncat), 1)
        self.assertEqual(monruncat[0], assocruncat[0])
        self.assertEqual(ra[0], wm_ra[0])
        self.assertAlmostEqual(decl[0], wm_decl[0])
Пример #2
0
def run(job_name, local=False):
    pipe_config = initialize_pipeline_config(
                             os.path.join(os.getcwd(), "pipeline.cfg"),
                             job_name)


    database_config(pipe_config)

    job_dir = pipe_config.get('layout', 'job_directory')

    if not os.access(job_dir, os.X_OK):
        msg = "can't access job folder %s" % job_dir
        logger.error(msg)
        raise IOError(msg)

    logger.info("Job dir: %s", job_dir)
    images = imp.load_source('images_to_process', os.path.join(job_dir,
                             'images_to_process.py')).images

    logger.info("dataset %s contains %s images" % (job_name, len(images)))

    job_config = load_job_config(pipe_config)
    dump_job_config_to_logdir(pipe_config, job_config)

    p_parset = parset.load_section(job_config, 'persistence')
    se_parset = parset.load_section(job_config, 'source_extraction')
    nd_parset = parset.load_section(job_config, 'null_detections')
    tr_parset = parset.load_section(job_config, 'transient_search')


    # persistence
    imgs = [[img] for img in images]
    arguments = [p_parset]
    metadatas = runner(tasks.persistence_node_step, imgs, arguments, local)
    metadatas = [m[0] for m in metadatas]

    dataset_id, image_ids = steps.persistence.master_steps(metadatas,
                                                           se_parset['radius'],
                                                           p_parset)

    # manual monitoringlist entries
    if not add_manual_monitoringlist_entries(dataset_id, []):
        return 1

    images = [Image(id=image_id) for image_id in image_ids]

    # quality_check
    urls = [img.url for img in images]
    arguments = [job_config]
    rejecteds = runner(tasks.quality_reject_check, urls, arguments, local)

    good_images = []
    for image, rejected in zip(images, rejecteds):
        if rejected:
            reason, comment = rejected
            steps.quality.reject_image(image.id, reason, comment)
        else:
            good_images.append(image)

    if not good_images:
        logger.warn("No good images under these quality checking criteria")
        return

    # Sourcefinding
    urls = [img.url for img in good_images]
    arguments = [se_parset]
    extract_sources = runner(tasks.extract_sources, urls, arguments, local)

    for image, sources in zip(good_images, extract_sources):
        dbgen.insert_extracted_sources(image.id, sources, 'blind')


    # null_detections
    deRuiter_radius = nd_parset['deruiter_radius']
    null_detectionss = [dbmon.get_nulldetections(image.id, deRuiter_radius)
                        for image in good_images]

    iters = zip([i.url for i in good_images], null_detectionss)
    arguments = [nd_parset]
    ff_nds = runner(tasks.forced_fits, iters, arguments, local)

    for image, ff_nd in zip(good_images, ff_nds):
        dbgen.insert_extracted_sources(image.id, ff_nd, 'ff_nd')

    for image in good_images:
        logger.info("performing DB operations for image %s" % image.id)
        dbass.associate_extracted_sources(image.id,
                                          deRuiter_r=deRuiter_radius)
        dbmon.add_nulldetections(image.id)
        transients = steps.transient_search.search_transients(image.id,
                                                              tr_parset)
        dbmon.adjust_transients_in_monitoringlist(image.id, transients)

    for transient in transients:
        steps.feature_extraction.extract_features(transient)
#            ingred.classification.classify(transient, cl_parset)

    now = datetime.datetime.utcnow()
    dbgen.update_dataset_process_ts(dataset_id, now)