Esempio n. 1
0
    def test_two_field_overlap_new_transient(self):
        """Now for something more interesting - two overlapping fields, 4 sources:
        one steady source only in lower field,
        one steady source in both fields,
        one steady source only in upper field,
        one transient source in both fields but only at 2nd timestep.
        """
        n_images = 2
        xtr_radius = 1.5
        im_params = db_subs.example_dbimage_datasets(n_images,
                                                     xtr_radius=xtr_radius)
        im_params[1]['centre_decl'] += xtr_radius * 1

        imgs = []

        lower_steady_src = db_subs.example_extractedsource_tuple(
                                ra=im_params[0]['centre_ra'],
                                dec=im_params[0]['centre_decl'] - 0.5 * xtr_radius)
        upper_steady_src = db_subs.example_extractedsource_tuple(
                                ra=im_params[1]['centre_ra'],
                                dec=im_params[1]['centre_decl'] + 0.5 * xtr_radius)
        overlap_steady_src = db_subs.example_extractedsource_tuple(
                                ra=im_params[0]['centre_ra'],
                                dec=im_params[0]['centre_decl'] + 0.2 * xtr_radius)
        overlap_transient = db_subs.example_extractedsource_tuple(
                                ra=im_params[0]['centre_ra'],
                                dec=im_params[0]['centre_decl'] + 0.8 * xtr_radius)

        imgs.append(tkp.db.Image(dataset=self.dataset, data=im_params[0]))
        imgs.append(tkp.db.Image(dataset=self.dataset, data=im_params[1]))

        imgs[0].insert_extracted_sources([lower_steady_src, overlap_steady_src])
        nd_posns = dbmon.get_nulldetections(imgs[0].id, deRuiter_r=1)
        self.assertEqual(len(nd_posns), 0)
        imgs[0].associate_extracted_sources(deRuiter_r=0.1)

        imgs[1].insert_extracted_sources([upper_steady_src, overlap_steady_src,
                                          overlap_transient])
        nd_posns = dbmon.get_nulldetections(imgs[1].id, deRuiter_r=1)
        self.assertEqual(len(nd_posns), 0)
        imgs[1].associate_extracted_sources(deRuiter_r=0.1)

        runcats = columns_from_table('runningcatalog',
                                where={'dataset': self.dataset.id})
        self.assertEqual(len(runcats), 4) #sanity check.

        monlist = columns_from_table('monitoringlist',
                                where={'dataset': self.dataset.id})
        self.assertEqual(len(monlist), 1)

        transients_qry = """\
        SELECT *
          FROM transient tr
              ,runningcatalog rc
        WHERE rc.dataset = %s
          AND tr.runcat = rc.id
        """
        self.database.cursor.execute(transients_qry, (self.dataset.id,))
        transients = get_db_rows_as_dicts(self.database.cursor)
        self.assertEqual(len(transients), 1)
Esempio n. 2
0
    def test_basic_same_field_case(self):
        """ Here we start with 1 source in image0.
        We then add image1 (same field as image0), with a double association
        for the source, and check assocskyrgn updates correctly.
       """
        n_images = 2
        im_params = db_subs.example_dbimage_datasets(n_images)

        idx = 0
        src_a = db_subs.example_extractedsource_tuple(
                        ra=im_params[idx]['centre_ra'],
                        dec=im_params[idx]['centre_decl'])

        src_b = src_a._replace(ra=src_a.ra + 1. / 60.) # 1 arcminute offset
        imgs = []
        imgs.append(tkp.db.Image(dataset=self.dataset, data=im_params[idx]))
        imgs[idx].insert_extracted_sources([src_a])
        imgs[idx].associate_extracted_sources(deRuiter_r=3.7)

        idx = 1
        imgs.append(tkp.db.Image(dataset=self.dataset, data=im_params[idx]))
        imgs[idx].insert_extracted_sources([src_a, src_b])
        imgs[idx].associate_extracted_sources(deRuiter_r=3.7)
        imgs[idx].update()
        runcats = columns_from_table('runningcatalog',
                                where={'dataset':self.dataset.id})
        self.assertEqual(len(runcats), 2) #Just a sanity check.
        skyassocs = columns_from_table('assocskyrgn',
                                   where={'skyrgn':imgs[idx]._data['skyrgn']})
        self.assertEqual(len(skyassocs), 2)
Esempio n. 3
0
    def TestMeridianLowerEdgeCase(self):
        """What happens if a source is right on the meridian?"""

        dataset = DataSet(data={'description':"Assoc 1-to-1:" +
                                self._testMethodName})
        n_images = 3
        im_params = db_subs.example_dbimage_datasets(n_images, centre_ra=0.5,
                                                      centre_decl=10)
        src_list = []
        src0 = db_subs.example_extractedsource_tuple(ra=0.0002, dec=10.5,
                                             ra_fit_err=0.01, dec_fit_err=0.01)
        src_list.append(src0)
        src1 = src0._replace(ra=0.0003)
        src_list.append(src1)
        src2 = src0._replace(ra=0.0004)
        src_list.append(src2)

        for idx, im in enumerate(im_params):
            im['centre_ra'] = 359.9
            image = tkp.db.Image(dataset=dataset, data=im)
            image.insert_extracted_sources([src_list[idx]])
            associate_extracted_sources(image.id, deRuiter_r=3.717)
        runcat = columns_from_table('runningcatalog', ['datapoints', 'wm_ra'],
                                   where={'dataset':dataset.id})
#        print "***\nRESULTS:", runcat, "\n*****"
        self.assertEqual(len(runcat), 1)
        self.assertEqual(runcat[0]['datapoints'], 3)
        avg_ra = (src0.ra + src1.ra +src2.ra)/3
        self.assertAlmostEqual(runcat[0]['wm_ra'], avg_ra)
Esempio n. 4
0
    def test_one2oneflux(self):
        dataset = tkp.db.DataSet(database=self.database, data={'description': 'flux test set: 1-1'})
        n_images = 3
        im_params = db_subs.example_dbimage_datasets(n_images)

        src_list = []
        src = db_subs.example_extractedsource_tuple()
        src0 = src._replace(flux=2.0)
        src_list.append(src0)
        src1 = src._replace(flux=2.5)
        src_list.append(src1)
        src2 = src._replace(flux=2.4)
        src_list.append(src2)

        for idx, im in enumerate(im_params):
            image = tkp.db.Image(database=self.database, dataset=dataset, data=im)
            image.insert_extracted_sources([src_list[idx]])
            associate_extracted_sources(image.id, deRuiter_r=3.717)

        query = """\
        SELECT rf.avg_f_int
          FROM runningcatalog r
              ,runningcatalog_flux rf
         WHERE r.dataset = %(dataset)s
           AND r.id = rf.runcat
        """
        self.database.cursor.execute(query, {'dataset': dataset.id})
        result = zip(*self.database.cursor.fetchall())
        avg_f_int = result[0]
        self.assertEqual(len(avg_f_int), 1)
        self.assertAlmostEqual(avg_f_int[0], 2.3)
Esempio n. 5
0
    def test_infinite(self):
        # Check that database insertion doesn't choke on infinite errors
        dataset = DataSet(data={'description': 'example dataset'},
                           database=self.database)
        image = Image(dataset=dataset, data=db_subs.example_dbimage_datasets(1)[0])

        # Inserting an example extractedsource should be fine
        extracted_source = db_subs.example_extractedsource_tuple()
        image.insert_extracted_sources([extracted_source])

        # But it should also be fine if the source has infinite errors
        extracted_source = db_subs.example_extractedsource_tuple(error_radius=float('inf'))
        image.insert_extracted_sources([extracted_source])
Esempio n. 6
0
    def test_basic_insertion(self):
        """Here we begin with a single insertion, and check a relevant entry
        exists in the skyregion table.

        The key logic checked here is that inserting an image with duplicate
        skyregion will return the same skyrgn id as the first image of that field,
        conversely a new region results in a new skyrgn entry.

        """
        self.database = tkp.db.Database()
        db_subs.delete_test_database(self.database)

        self.dataset = tkp.db.DataSet(database=self.database,
                data={'description': "Skyrgn:" + self._testMethodName})
        n_images = 3
        im_params = db_subs.example_dbimage_datasets(n_images)

        ##First image:
        image0 = tkp.db.Image(dataset=self.dataset, data=im_params[0])
        image0.update()

        skyrgns = columns_from_table('skyregion',
                                             where={'dataset':self.dataset.id})
#        if self.clean_table:
        self.assertEqual(len(skyrgns), 1)
        rgn_keys = ['centre_ra', 'centre_decl', 'xtr_radius']
        first_skyrgn_id = None
        for db_row in skyrgns:
            if all([db_row[k] == im_params[0][k] for k in rgn_keys]):
                first_skyrgn_id = db_row['id']
        self.assertNotEqual(first_skyrgn_id, None)
        self.assertEqual(image0._data['skyrgn'], first_skyrgn_id)

        ##Second, identical image:
        image1 = tkp.db.Image(dataset=self.dataset, data=im_params[1])
        image1.update()
        self.assertEqual(image1._data['skyrgn'], first_skyrgn_id)

        ##Third, different image:
        im_params[2]['centre_ra'] += im_params[2]['xtr_radius'] * 0.5
        image2 = tkp.db.Image(dataset=self.dataset, data=im_params[2])
        image2.update()
        self.assertNotEqual(image2._data['skyrgn'], first_skyrgn_id)
        skyrgns = columns_from_table('skyregion',
                                             where={'dataset':self.dataset.id})
        for db_row in skyrgns:
            if all([db_row[k] == im_params[2][k] for k in rgn_keys]):
                second_skyrgn_id = db_row['id']
        self.assertNotEqual(second_skyrgn_id, None)
        self.assertEqual(image2._data['skyrgn'], second_skyrgn_id)
Esempio n. 7
0
    def TestCrossMeridian(self):
        """
        A source is observed in two skyregions: one which crosses the
        meridian, and one which does not. We check that the associated source
        has the correct weighted mean RA.

        See also #4497.
        """
        dataset = DataSet(data={'description': "Test:" + self._testMethodName})

        im_list = [
            db_subs.example_dbimage_datasets(
                n_images=1, centre_ra=0, centre_decl=0, xtr_radius=10
            )[0],
            db_subs.example_dbimage_datasets(
                n_images=1, centre_ra=0, centre_decl=0, xtr_radius=10
            )[0],
            db_subs.example_dbimage_datasets(
                n_images=1, centre_ra=15, centre_decl=0, xtr_radius=10
            )[0],
            db_subs.example_dbimage_datasets(
                n_images=1, centre_ra=15, centre_decl=0, xtr_radius=10
            )[0],
        ]

        source_ra = 7.5
        src = db_subs.example_extractedsource_tuple(ra=source_ra, dec=0)

        for im in im_list:
            image = tkp.db.Image(dataset=dataset, data=im)
            image.insert_extracted_sources([src])
            associate_extracted_sources(image.id, deRuiter_r=3.717)

        runcat = columns_from_table('runningcatalog', ['wm_ra'],
            where={'dataset': dataset.id}
        )
        self.assertAlmostEqual(runcat[0]['wm_ra'], source_ra)
Esempio n. 8
0
    def test_two_field_overlap_nulling_src(self):
        """Similar to above, but one source disappears:
        Two overlapping fields, 4 sources:
        one steady source only in lower field,
        one steady source in both fields,
        one steady source only in upper field,
        one transient source in both fields but only at *1st* timestep.
        """
        n_images = 2
        xtr_radius = 1.5
        im_params = db_subs.example_dbimage_datasets(n_images,
                                                     xtr_radius=xtr_radius)
        im_params[1]['centre_decl'] += xtr_radius * 1

        imgs = []

        lower_steady_src = db_subs.example_extractedsource_tuple(
                                ra=im_params[0]['centre_ra'],
                                dec=im_params[0]['centre_decl'] - 0.5 * xtr_radius)
        upper_steady_src = db_subs.example_extractedsource_tuple(
                                ra=im_params[1]['centre_ra'],
                                dec=im_params[1]['centre_decl'] + 0.5 * xtr_radius)
        overlap_steady_src = db_subs.example_extractedsource_tuple(
                                ra=im_params[0]['centre_ra'],
                                dec=im_params[0]['centre_decl'] + 0.2 * xtr_radius)
        overlap_transient = db_subs.example_extractedsource_tuple(
                                ra=im_params[0]['centre_ra'],
                                dec=im_params[0]['centre_decl'] + 0.8 * xtr_radius)

        imgs.append(tkp.db.Image(dataset=self.dataset, data=im_params[0]))
        imgs.append(tkp.db.Image(dataset=self.dataset, data=im_params[1]))

        imgs[0].insert_extracted_sources([lower_steady_src, overlap_steady_src,
                                          overlap_transient])
        imgs[0].associate_extracted_sources(deRuiter_r=0.1)
        nd_posns = dbnd.get_nulldetections(imgs[0].id)
        self.assertEqual(len(nd_posns), 0)

        imgs[1].insert_extracted_sources([upper_steady_src, overlap_steady_src])

        imgs[1].associate_extracted_sources(deRuiter_r=0.1)
        #This time we don't expect to get an immediate transient detection,
        #but we *do* expect to get a null-source forced extraction request:
        nd_posns = dbnd.get_nulldetections(imgs[1].id)
        self.assertEqual(len(nd_posns), 1)

        runcats = columns_from_table('runningcatalog',
                                where={'dataset':self.dataset.id})
        self.assertEqual(len(runcats), 4) #sanity check.
Esempio n. 9
0
    def test_new_skyregion_insertion(self):
        """Here we test the association logic executed upon insertion of a
        new skyregion.

        We expect that any pre-existing entries in the runningcatalog
        which lie within the field of view will be marked as
        'within this region', through the presence of an entry in table
        ``assocskyrgn``.
        Conversely sources outside the FoV should not be marked as related.

        We begin with img0, with a source at centre.
        Then we add 2 more (empty) images/fields at varying positions.
        """
        n_images = 6
        im_params = db_subs.example_dbimage_datasets(n_images)

        src_in_img0 = db_subs.example_extractedsource_tuple(
                        ra=im_params[0]['centre_ra'],
                        dec=im_params[0]['centre_decl'],)

        ##First image:
        image0 = tkp.db.Image(dataset=self.dataset, data=im_params[0])
        image0.insert_extracted_sources([src_in_img0])
        image0.associate_extracted_sources(deRuiter_r=3.7)
        image0.update()

        runcats = columns_from_table('runningcatalog',
                                where={'dataset':self.dataset.id})
        self.assertEqual(len(runcats), 1) #Just a sanity check.
        ##Second, different *But overlapping* image:
        idx = 1
        im_params[idx]['centre_decl'] += im_params[idx]['xtr_radius'] * 0.9
        image1 = tkp.db.Image(dataset=self.dataset, data=im_params[idx])
        image1.update()

        assocs = columns_from_table('assocskyrgn',
                                    where={'skyrgn':image1._data['skyrgn']})
        self.assertEqual(len(assocs), 1)
        self.assertEqual(assocs[0]['runcat'], runcats[0]['id'])

        ##Third, different *and NOT overlapping* image:
        idx = 2
        im_params[idx]['centre_decl'] += im_params[idx]['xtr_radius'] * 1.1
        image2 = tkp.db.Image(dataset=self.dataset, data=im_params[idx])
        image2.update()
        assocs = columns_from_table('assocskyrgn',
                                    where={'skyrgn':image2._data['skyrgn']})
        self.assertEqual(len(assocs), 0)
Esempio n. 10
0
    def test_rejected_initial_image(self):
        """
        An image which is rejected should not be taken into account when
        deciding whether a patch of sky has been previously observed, and
        hence whether any detections in that area are (potential) transients.

        Here, we create a database with two images. The first
        (choronologically) is rejected; the second contains a source. That
        source should not be marked as a transient.
        """

        dataset = tkp.db.DataSet(
            data={'description':"Trans:" + self._testMethodName},
            database=tkp.db.Database()
        )

        # We use a dataset with two images
        # NB the routine in db_subs automatically increments time between
        # images.
        n_images = 2
        db_imgs = [
            tkp.db.Image(data=im_params, dataset=dataset) for
            im_params in db_subs.example_dbimage_datasets(n_images)
        ]

       # The first image is rejected for an arbitrary reason
       # (for the sake of argument, we use an unacceptable RMS).
        tkp.db.quality.reject(
            db_imgs[0].id, tkp.db.quality.reason['rms'].id, self._testMethodName
        )

        # Since we rejected the first image, we only find a source in the
        # second.
        source = db_subs.example_extractedsource_tuple()
        db_imgs[1].insert_extracted_sources([source])

        # Standard source association procedure etc.
        associate_extracted_sources(db_imgs[1].id, 3.7)

        # Our source should _not_ be a transient. That is, there should be no
        # entries in the transient table for this dataset.
        cursor = tkp.db.execute("""\
            SELECT t.id FROM transient t, runningcatalog rc
                    WHERE t.runcat = rc.id
                      AND rc.dataset = %(ds_id)s
            """, {"ds_id": dataset.id}
        )
        self.assertEqual(cursor.rowcount, 0)
Esempio n. 11
0
    def setUp(self):
        self.database = tkp.db.Database()
        self.dataset = tkp.db.DataSet(data={"description": "Trans:" + self._testMethodName}, database=self.database)

        self.n_images = 8
        self.im_params = db_subs.example_dbimage_datasets(self.n_images)
        self.db_imgs = []

        # Insert transient source extractions,
        # Include the non-detection points we expect from using monitoringlist:
        source_lists = db_subs.example_source_lists(n_images=8, include_non_detections=True)
        for i in xrange(self.n_images):
            self.db_imgs.append(tkp.db.Image(data=self.im_params[i], dataset=self.dataset))

            self.db_imgs[i].insert_extracted_sources(source_lists[i])
            self.db_imgs[i].associate_extracted_sources(deRuiter_r=3.7)
Esempio n. 12
0
    def test_one2oneflux(self):
        dataset = tkp.db.DataSet(database=self.database, data={'description': 'flux test set: 1-1'})
        n_images = 3
        im_params = db_subs.example_dbimage_datasets(n_images)

        src_list = []
        src = db_subs.example_extractedsource_tuple()
        src0 = src._replace(flux=2.0)
        src_list.append(src0)
        src1 = src._replace(flux=2.5)
        src_list.append(src1)
        src2 = src._replace(flux=2.4)
        src_list.append(src2)

        for idx, im in enumerate(im_params):
            image = tkp.db.Image(database=self.database, dataset=dataset, data=im)
            image.insert_extracted_sources([src_list[idx]])
            associate_extracted_sources(image.id, deRuiter_r=3.717)

        query = """\
        SELECT rf.avg_f_int
          FROM runningcatalog r
              ,runningcatalog_flux rf
         WHERE r.dataset = %(dataset)s
           AND r.id = rf.runcat
        """
        self.database.cursor.execute(query, {'dataset': dataset.id})
        result = zip(*self.database.cursor.fetchall())
        avg_f_int = result[0]
        self.assertEqual(len(avg_f_int), 1)
        py_metrics = db_subs.lightcurve_metrics(src_list)
        self.assertAlmostEqual(avg_f_int[0], py_metrics[-1]['avg_f_int'])
        runcat_id = columns_from_table('runningcatalog',
                                       where={'dataset':dataset.id})
        self.assertEqual(len(runcat_id),1)
        runcat_id = runcat_id[0]['id']
        # Check evolution of variability indices
        db_metrics = db_queries.per_timestep_variability_indices(self.database,
                                                           runcat_id)
        self.assertEqual(len(db_metrics), n_images)
        # Compare the python- and db-calculated values
        for i in range(len(db_metrics)):
            for key in ('v_int','eta_int'):
                self.assertAlmostEqual(db_metrics[i][key], py_metrics[i][key])
Esempio n. 13
0
    def TestDeRuiterCalculation(self):
        """Check all the unit conversions are correct"""
        dataset = DataSet(data={'description':"Assoc 1-to-1:" + self._testMethodName})
        n_images = 2
        im_params = db_subs.example_dbimage_datasets(n_images, centre_ra=10,
                                                     centre_decl=0)


        #Note ra / ra_fit_err are in degrees.
        # ra_sys_err is in arcseconds, but we set it = 0 so doesn't matter.
        #ra_fit_err cannot be zero or we get div by zero errors.
        #Also, there is a hard limit on association radii:
        #currently this defaults to 0.03 degrees== 108 arcseconds
        src0 = db_subs.example_extractedsource_tuple(ra=10.00, dec=0.0,
                                             ra_fit_err=0.1, dec_fit_err=1.00,
                                             ra_sys_err=0.0, dec_sys_err=0.0)
        src1 = db_subs.example_extractedsource_tuple(ra=10.02, dec=0.0,
                                             ra_fit_err=0.1, dec_fit_err=1.00,
                                             ra_sys_err=0.0, dec_sys_err=0.0)
        src_list = [src0, src1]
        #NB dec_fit_err nonzero, but since delta_dec==0 this simplifies to:
        expected_DR_radius = math.sqrt((src1.ra - src0.ra) ** 2 /
                               (src0.ra_fit_err ** 2 + src1.ra_fit_err ** 2))
#        print "Expected DR", expected_DR_radius

        for idx in [0, 1]:
            image = tkp.db.Image(dataset=dataset,
                                data=im_params[idx])
            image.insert_extracted_sources([src_list[idx]])
            #Peform very loose association since we just want to store DR value.
            associate_extracted_sources(image.id, deRuiter_r=100)
        runcat = columns_from_table('runningcatalog', ['id'],
                                   where={'dataset':dataset.id})
#        print "***\nRESULTS:", runcat, "\n*****"
        self.assertEqual(len(runcat), 1)
        assoc = columns_from_table('assocxtrsource', ['r'],
                                   where={'runcat':runcat[0]['id']})
#        print "Got assocs:", assoc
        self.assertEqual(len(assoc), 2)
        self.assertAlmostEqual(assoc[1]['r'], expected_DR_radius)
Esempio n. 14
0
    def test_two_field_basic_case(self):
        """Here we create 2 disjoint image fields, with one source in each,
        and check that the second source inserted does not get flagged as transient.
        """
        n_images = 2
        xtr_radius = 1.5
        im_params = db_subs.example_dbimage_datasets(n_images,
                                                     xtr_radius=xtr_radius)
        im_params[1]['centre_decl'] += xtr_radius * 2 + 0.5

        imgs = []
        for idx in range(len(im_params)):
            imgs.append(tkp.db.Image(dataset=self.dataset, data=im_params[idx]))

        for idx in range(len(im_params)):
            central_src = db_subs.example_extractedsource_tuple(
                                    ra=im_params[idx]['centre_ra'],
                                    dec=im_params[idx]['centre_decl'])

            imgs.append(tkp.db.Image(dataset=self.dataset, data=im_params[idx]))
            imgs[idx].insert_extracted_sources([central_src])
            imgs[idx].associate_extracted_sources(deRuiter_r=3.7)

        runcats = columns_from_table('runningcatalog',
                                where={'dataset':self.dataset.id})

        self.assertEqual(len(runcats), 2) #Just a sanity check.

        transients_qry = """\
        SELECT *
          FROM transient tr
              ,runningcatalog rc
        WHERE rc.dataset = %s
          AND tr.runcat = rc.id
        """
        self.database.cursor.execute(transients_qry, (self.dataset.id,))
        transients = get_db_rows_as_dicts(self.database.cursor)
        self.assertEqual(len(transients), 0)
Esempio n. 15
0
    def test_intermittentToMonitorlist(self):
        dataset = tkp.db.DataSet(database=self.database, data={'description': "Monlist:" + self._testMethodName})
        n_images = 3
        im_params = db_subs.example_dbimage_datasets(n_images)

        steady_srcs = []
        # We will work with 2 sources per image
        # one being detected in all images and not in the monlist
        # the second having a null-detection in the second image
        # and stored in the monlist
        n_steady_srcs = 2
        for i in range(n_steady_srcs):
            src = db_subs.example_extractedsource_tuple()
            src = src._replace(ra=src.ra + 2 * i)
            steady_srcs.append(src)

        for idx, im in enumerate(im_params):
            image = tkp.db.Image(database=self.database, dataset=dataset, data=im)

            if idx == 1:
                # The second image has a null detection, so only the first source is detected
                image.insert_extracted_sources(steady_srcs[0:1])
            else:
                image.insert_extracted_sources(steady_srcs)

            # First, we check for null detections
            nd = monitoringlist.get_nulldetections(image.id, deRuiter_r=3.717)

            if idx == 0:
                self.assertEqual(len(nd), 0)
            elif idx == 1:
                self.assertEqual(len(nd), 1)
                # The null detection is found,
                # We simulate the forced fit result back into extractedsource
                # Check that the null-detection ra is the ra of source two
                self.assertEqual(nd[0][0], steady_srcs[1].ra)
                #print "nd=",nd
                tuple_ff_nd = steady_srcs[1:2]
                monitoringlist.insert_forcedfits_into_extractedsource(image.id, tuple_ff_nd, 'ff_nd')
            elif idx == 2:
                self.assertEqual(len(nd), 0)

            # Secondly, we do the source association
            dbass.associate_extracted_sources(image.id, deRuiter_r=3.717)
            monitoringlist.add_nulldetections(image.id)
            # We also need to run the transient search in order to pick up the variable
            # eta_lim, V_lim, prob_threshold, minpoints, resp.
            transients = tr_search.multi_epoch_transient_search(image.id,
                                                     0.0,
                                                     0.0,
                                                     0.5,
                                                     1)

            # Adjust (insert/update/remove) transients in monlist as well
            monitoringlist.adjust_transients_in_monitoringlist(image.id,
                                                               transients)

        # So after the three images have been processed,
        # We should have the null-detection source in the monlist

        # Get the null detection in extractedsource
        # These are of extract_type = 1
        query = """\
        select x.id
          from extractedsource x
              ,image i
         where x.image = i.id
           and i.dataset = %s
           and x.extract_type = 1
        """
        self.database.cursor.execute(query, (dataset.id,))
        result = zip(*self.database.cursor.fetchall())
        null_det = result[0]
        self.assertEqual(len(null_det), 1)

        query = """\
        select a.runcat
              ,a.xtrsrc
              ,r.wm_ra
              ,r.wm_decl
          from assocxtrsource a
              ,extractedsource x
              ,image i
              ,runningcatalog r
         where a.xtrsrc = x.id
           and x.id = %s
           and x.image = i.id
           and i.dataset = %s
           and a.runcat = r.id
           and r.dataset = i.dataset
        """
        self.database.cursor.execute(query, (null_det[0], dataset.id,))
        result = zip(*self.database.cursor.fetchall())
        assocruncat = result[0]
        xtrsrc = result[1]
        wm_ra = result[2]
        wm_decl = result[3]
        self.assertEqual(len(assocruncat), 1)

        query = """\
        SELECT runcat
              ,ra
              ,decl
          FROM monitoringlist
         WHERE dataset = %s
        """
        self.database.cursor.execute(query, (dataset.id,))
        result = zip(*self.database.cursor.fetchall())
#        print "len(result)=",len(result)
        self.assertEqual(len(result), 3)
        #self.assertEqual(0, 1)
        monruncat = result[0]
        ra = result[1]
        decl = result[2]
        self.assertEqual(len(monruncat), 1)
        self.assertEqual(monruncat[0], assocruncat[0])
        self.assertEqual(ra[0], wm_ra[0])
        self.assertAlmostEqual(decl[0], wm_decl[0])
Esempio n. 16
0
    def test_many2manyflux_reduced_to_two_1to1(self):
        """
        (See also assoc. test test_many2many_reduced_to_two_1to1 )
        In this test-case we cross-associate between a rhombus of sources spread
        about a central position, east-west in the first image,
        north-south in the second.

        The latter, north-south pair are slightly offset towards positive RA
        and negative RA respectively.

        The result is that the candidate associations are pruned down to
        two one-to-one pairings..
        """
        dataset = tkp.db.DataSet(database=self.database, data={'description': 'flux test set: n-m, ' + self._testMethodName})
        n_images = 2
        im_params = db_subs.example_dbimage_datasets(n_images)
        centre_ra, centre_dec =  123., 10.5,
        offset_deg = 20 / 3600. #20 arcsec
        tiny_offset_deg = 1 / 3600. #1 arcsec

        eastern_src = db_subs.example_extractedsource_tuple(
            ra=centre_ra + offset_deg,
            dec=centre_dec,
            peak = 1.5, peak_err = 1e-1,
            flux = 3.0, flux_err = 1e-1,)

        western_src = db_subs.example_extractedsource_tuple(
            ra=centre_ra - offset_deg,
            dec=centre_dec,
            peak = 1.7, peak_err = 1e-1,
            flux = 3.2, flux_err = 1e-1,)

        northern_source = db_subs.example_extractedsource_tuple(
            ra=centre_ra + tiny_offset_deg,
            dec=centre_dec + offset_deg,
            peak = 1.8, peak_err = 1e-1,
            flux = 3.3, flux_err = 1e-1,
            )

        southern_source = db_subs.example_extractedsource_tuple(
            ra=centre_ra - tiny_offset_deg,
            dec=centre_dec - offset_deg,
            peak = 1.4, peak_err = 1e-1,
            flux = 2.9, flux_err = 1e-1,)

        # image 1
        image1 = tkp.db.Image(database=self.database, dataset=dataset,
                              data=im_params[0])
        dbgen.insert_extracted_sources(
            image1.id, [eastern_src,western_src], 'blind')
        associate_extracted_sources(image1.id, deRuiter_r = 3.717)

        # image 2
        image2 = tkp.db.Image(database=self.database, dataset=dataset,
                              data=im_params[1])
        dbgen.insert_extracted_sources(
            image2.id, [northern_source, southern_source], 'blind')
        associate_extracted_sources(image2.id, deRuiter_r = 3.717)

        # Manually compose the lists of sources we expect to see associated
        # into runningcatalog entries:
        # NB img1_srclist[1] has larger RA value.
        lightcurves_sorted_by_ra =[]
        lightcurves_sorted_by_ra.append( [western_src, southern_source])
        lightcurves_sorted_by_ra.append( [eastern_src, northern_source])

        #Check the summary statistics (avg flux, etc)
        query = """\
        SELECT rf.avg_f_int
              ,rf.avg_f_int_sq
              ,avg_weighted_f_int
              ,avg_f_int_weight
          FROM runningcatalog r
              ,runningcatalog_flux rf
         WHERE r.dataset = %(dataset)s
           AND r.id = rf.runcat
        ORDER BY r.wm_ra, r.wm_decl
        """
        self.database.cursor.execute(query, {'dataset': dataset.id})
        runcat_flux_entries = get_db_rows_as_dicts(self.database.cursor)
        self.assertEqual(len(runcat_flux_entries), len(lightcurves_sorted_by_ra))

        for idx, flux_summary in enumerate(runcat_flux_entries):
            py_results = db_subs.lightcurve_metrics(lightcurves_sorted_by_ra[idx])
            for key in flux_summary.keys():
                self.assertAlmostEqual(flux_summary[key], py_results[-1][key])

        #Now check the per-timestep statistics (variability indices)
        sorted_runcat_ids = columns_from_table('runningcatalog',
                                               where={'dataset':dataset.id},
                                               order='wm_ra,wm_decl')
        sorted_runcat_ids = [entry['id'] for entry in sorted_runcat_ids]

        for idx, rcid in enumerate(sorted_runcat_ids):
            db_indices = db_queries.per_timestep_variability_indices(self.database,
                                                                   rcid)
            py_indices = db_subs.lightcurve_metrics(lightcurves_sorted_by_ra[idx])
            self.assertEqual(len(db_indices), len(py_indices))
            for nstep in range(len(db_indices)):
                for key in ('v_int', 'eta_int'):
                    self.assertAlmostEqual(db_indices[nstep][key],
                                           py_indices[nstep][key])
Esempio n. 17
0
    def test_one2manyflux(self):
        dataset = tkp.db.DataSet(database=self.database,
                                 data={'description': 'flux test set: 1-n'})
        n_images = 2
        im_params = db_subs.example_dbimage_datasets(n_images)
        central_ra, central_dec = 123.1235, 10.55,
        position_offset_deg = 100./3600 #100 arcsec = 0.03 deg approx

        # image 1
        image = tkp.db.Image(database=self.database, dataset=dataset, data=im_params[0])
        imageid1 = image.id

        img1_srclist = []
        # 1 source
        img1_srclist.append(db_subs.example_extractedsource_tuple(central_ra, central_dec,
                                         peak = 1.5, peak_err = 5e-1,
                                         flux = 3.0, flux_err = 5e-1,
                                            ))

        dbgen.insert_extracted_sources(imageid1, img1_srclist, 'blind')
        associate_extracted_sources(imageid1, deRuiter_r=3.717)

        # image 2
        image = tkp.db.Image(database=self.database, dataset=dataset, data=im_params[1])
        imageid2 = image.id
        img2_srclist = []
        # 2 sources (both close to source 1, catching the 1-to-many case)
        img2_srclist.append(db_subs.example_extractedsource_tuple(
            central_ra,
            central_dec,
            peak = 1.6, peak_err = 5e-1,
            flux = 3.2, flux_err = 5e-1,
            ))
        img2_srclist.append(db_subs.example_extractedsource_tuple(
            central_ra + position_offset_deg,
            central_dec,
            peak = 1.9, peak_err = 5e-1,
            flux = 3.4, flux_err = 5e-1,
            ))

        dbgen.insert_extracted_sources(imageid2, img2_srclist, 'blind')
        associate_extracted_sources(imageid2, deRuiter_r=3.717)

        # Manually compose the lists of sources we expect to see associated
        # into runningcatalog entries:
        # NB img2_srclist[1] has larger RA value.
        lightcurves_sorted_by_ra =[]
        lightcurves_sorted_by_ra.append( [img1_srclist[0], img2_srclist[0]])
        lightcurves_sorted_by_ra.append( [img1_srclist[0], img2_srclist[1]])


        #Check the summary statistics (avg flux, etc)
        query = """\
        SELECT rf.avg_f_int
              ,rf.avg_f_int_sq
              ,avg_weighted_f_int
              ,avg_f_int_weight
          FROM runningcatalog r
              ,runningcatalog_flux rf
         WHERE r.dataset = %(dataset)s
           AND r.id = rf.runcat
        ORDER BY r.wm_ra
        """
        self.database.cursor.execute(query, {'dataset': dataset.id})
        runcat_flux_entries = get_db_rows_as_dicts(self.database.cursor)
        self.assertEqual(len(runcat_flux_entries), 2)
        for idx, flux_summary in enumerate(runcat_flux_entries):
            py_results = db_subs.lightcurve_metrics(lightcurves_sorted_by_ra[idx])
            for key in flux_summary.keys():
                self.assertAlmostEqual(flux_summary[key], py_results[-1][key])


        #Now check the per-timestep statistics (variability indices)
        sorted_runcat_ids = columns_from_table('runningcatalog',
                                               where={'dataset':dataset.id},
                                               order='wm_ra')
        sorted_runcat_ids = [entry['id'] for entry in sorted_runcat_ids]

        for idx, rcid in enumerate(sorted_runcat_ids):
            db_indices = db_queries.per_timestep_variability_indices(self.database,
                                                                   rcid)
            py_indices = db_subs.lightcurve_metrics(lightcurves_sorted_by_ra[idx])
            self.assertEqual(len(db_indices), len(py_indices))
            for nstep in range(len(db_indices)):
                for key in ('v_int', 'eta_int'):
                    self.assertAlmostEqual(db_indices[nstep][key],
                                           py_indices[nstep][key])
Esempio n. 18
0
    def test_one2many(self):
        dataset = DataSet(data={'description': 'assoc test set: 1-n'})
        n_images = 2
        im_params = db_subs.example_dbimage_datasets(n_images)

        # image 1
        image = tkp.db.Image(dataset=dataset, data=im_params[0])
        imageid1 = image.id
        src = []
        # 1 source
        src.append(db_subs.example_extractedsource_tuple(ra=123.1235, dec=10.55,
                                                     ra_fit_err=5./3600, dec_fit_err=6./3600,
                                                     peak = 15e-3, peak_err = 5e-4,
                                                     flux = 15e-3, flux_err = 5e-4,
                                                     sigma = 15,
                                                     beam_maj = 100, beam_min = 100, beam_angle = 45,
                                                     ra_sys_err=20, dec_sys_err=20
                                                        ))
        results = []
        results.append(src[-1])
        dbgen.insert_extracted_sources(imageid1, results, 'blind')
        associate_extracted_sources(imageid1, deRuiter_r = 3.717)

        query = """\
        SELECT id
          FROM extractedsource
         WHERE image = %s
        """
        self.database.cursor.execute(query, (imageid1,))
        im1 = zip(*self.database.cursor.fetchall())
        self.assertNotEqual(len(im1), 0)
        im1src1 = im1[0]
        self.assertEqual(len(im1src1), 1)

        query = """\
        SELECT id
              ,xtrsrc
          FROM runningcatalog
         WHERE dataset = %s
        """
        self.database.cursor.execute(query, (dataset.id,))
        rc1 = zip(*self.database.cursor.fetchall())
        self.assertNotEqual(len(rc1), 0)
        runcat1 = rc1[0]
        xtrsrc1 = rc1[1]
        self.assertEqual(len(runcat1), len(src))
        self.assertEqual(xtrsrc1[0], im1src1[0])

        query = """\
        SELECT a.runcat
              ,a.xtrsrc
              ,a.type
          FROM assocxtrsource a
              ,runningcatalog r
         WHERE a.runcat = r.id
           AND r.dataset = %s
        """
        self.database.cursor.execute(query, (dataset.id,))
        assoc1 = zip(*self.database.cursor.fetchall())
        self.assertNotEqual(len(assoc1), 0)
        aruncat1 = assoc1[0]
        axtrsrc1 = assoc1[1]
        atype = assoc1[2]
        self.assertEqual(len(aruncat1), len(src))
        self.assertEqual(axtrsrc1[0], im1src1[0])
        self.assertEqual(axtrsrc1[0], xtrsrc1[0])
        self.assertEqual(atype[0], 4)
        #TODO: Add runcat_flux test

        # image 2
        image = tkp.db.Image(dataset=dataset, data=im_params[1])
        imageid2 = image.id
        src = []
        # 2 sources (located close to source 1, catching the 1-to-many case
        src.append(db_subs.example_extractedsource_tuple(ra=123.12349, dec=10.549,
                                                     ra_fit_err=5./3600, dec_fit_err=6./3600,
                                                     peak = 15e-3, peak_err = 5e-4,
                                                     flux = 15e-3, flux_err = 5e-4,
                                                     sigma = 15,
                                                     beam_maj = 100, beam_min = 100, beam_angle = 45,
                                                     ra_sys_err=20, dec_sys_err=20
                                                        ))
        src.append(db_subs.example_extractedsource_tuple(ra=123.12351, dec=10.551,
                                                     ra_fit_err=5./3600, dec_fit_err=6./3600,
                                                     peak = 15e-3, peak_err = 5e-4,
                                                     flux = 15e-3, flux_err = 5e-4,
                                                     sigma = 15,
                                                     beam_maj = 100, beam_min = 100, beam_angle = 45,
                                                     ra_sys_err=20, dec_sys_err=20
                                                        ))
        results = []
        results.append(src[0])
        results.append(src[1])
        dbgen.insert_extracted_sources(imageid2, results, 'blind')
        associate_extracted_sources(imageid2, deRuiter_r = 3.717)

        query = """\
        SELECT id
          FROM extractedsource
         WHERE image = %s
        ORDER BY id
        """
        self.database.cursor.execute(query, (imageid2,))
        im2 = zip(*self.database.cursor.fetchall())
        self.assertNotEqual(len(im2), 0)
        im2src = im2[0]
        self.assertEqual(len(im2src), len(src))

        query = """\
        SELECT r.id
              ,r.xtrsrc
              ,x.image
              ,r.datapoints
              ,r.wm_ra
              ,r.wm_decl
              ,r.wm_ra_err
              ,r.wm_decl_err
          FROM runningcatalog r
              ,extractedsource x
         WHERE r.xtrsrc = x.id
           AND dataset = %s
        ORDER BY r.id
        """
        self.database.cursor.execute(query, (dataset.id,))
        rc2 = zip(*self.database.cursor.fetchall())
        self.assertNotEqual(len(rc2), 0)
        runcat2 = rc2[0]
        xtrsrc2 = rc2[1]
        image2 = rc2[2]
        self.assertEqual(len(runcat2), len(src))
        self.assertNotEqual(xtrsrc2[0], xtrsrc2[1])
        self.assertEqual(image2[0], image2[1])
        self.assertEqual(image2[0], imageid2)

        query = """\
        SELECT a.runcat
              ,a.xtrsrc
              ,a.type
              ,x.image
          FROM assocxtrsource a
              ,runningcatalog r
              ,extractedsource x
         WHERE a.runcat = r.id
           AND a.xtrsrc = x.id
           AND r.dataset = %s
        ORDER BY a.xtrsrc
                ,a.runcat
        """
        self.database.cursor.execute(query, (dataset.id,))
        assoc2 = zip(*self.database.cursor.fetchall())
        self.assertNotEqual(len(assoc2), 0)
        aruncat2 = assoc2[0]
        axtrsrc2 = assoc2[1]
        atype2 = assoc2[2]
        aimage2 = assoc2[3]
        self.assertEqual(len(aruncat2), 2*len(src))
        self.assertEqual(axtrsrc2[0], im1src1[0])
        self.assertEqual(axtrsrc2[1], im1src1[0])
        self.assertNotEqual(axtrsrc2[2], axtrsrc2[3])
        self.assertEqual(aimage2[2], aimage2[3])
        self.assertEqual(aimage2[2], imageid2)
        self.assertEqual(atype2[0], 6)
        self.assertEqual(atype2[1], 6)
        self.assertEqual(atype2[2], 2)
        self.assertEqual(atype2[3], 2)
        self.assertEqual(aruncat2[0], runcat2[0])
        self.assertEqual(aruncat2[1], runcat2[1])

        query = """\
        SELECT COUNT(*)
          FROM runningcatalog
         WHERE dataset = %s
           AND xtrsrc IN (SELECT id
                            FROM extractedsource
                           WHERE image = %s
                         )
        """
        self.database.cursor.execute(query, (dataset.id, imageid1))
        count = zip(*self.database.cursor.fetchall())
        self.assertEqual(count[0][0], 0)
Esempio n. 19
0
    def test_many2many(self):
        dataset = DataSet(data={'description': 'assoc test set: n-m'})
        n_images = 2
        im_params = db_subs.example_dbimage_datasets(n_images)

        # image 1
        image = tkp.db.Image(dataset=dataset, data=im_params[0])
        imageid1 = image.id
        src1 = []
        # 2 sources (located relatively close together, so the catching the many-to-1 case in next image
        src1.append(db_subs.example_extractedsource_tuple(ra=122.985, dec=10.5,
                                                     ra_fit_err=5./3600, dec_fit_err=6./3600,
                                                     peak = 15e-3, peak_err = 5e-4,
                                                     flux = 15e-3, flux_err = 5e-4,
                                                     sigma = 15,
                                                     beam_maj = 100, beam_min = 100, beam_angle = 45,
                                                     ra_sys_err=20, dec_sys_err=20
                                                        ))
        src1.append(db_subs.example_extractedsource_tuple(ra=123.015, dec=10.5,
                                                     ra_fit_err=5./3600, dec_fit_err=6./3600,
                                                     peak = 15e-3, peak_err = 5e-4,
                                                     flux = 15e-3, flux_err = 5e-4,
                                                     sigma = 15,
                                                     beam_maj = 100, beam_min = 100, beam_angle = 45,
                                                     ra_sys_err=20, dec_sys_err=20
                                                        ))
        results = []
        results.append(src1[0])
        results.append(src1[1])
        dbgen.insert_extracted_sources(imageid1, results, 'blind')
        # We use a default value of 3.717
        associate_extracted_sources(imageid1, deRuiter_r = 3.717)

        query = """\
        SELECT id
          FROM extractedsource
         WHERE image = %s
        ORDER BY id
        """
        self.database.cursor.execute(query, (imageid1,))
        im1 = zip(*self.database.cursor.fetchall())
        self.assertNotEqual(len(im1), 0)
        im1src = im1[0]
        self.assertEqual(len(im1src), len(src1))

        # image 2
        image = tkp.db.Image(dataset=dataset, data=im_params[1])
        imageid2 = image.id
        src2 = []
        # 2 sources, where both can be associated with both from image 1
        src2.append(db_subs.example_extractedsource_tuple(ra=123.0, dec=10.485,
                                                     ra_fit_err=5./3600, dec_fit_err=6./3600,
                                                     peak = 15e-3, peak_err = 5e-4,
                                                     flux = 15e-3, flux_err = 5e-4,
                                                     sigma = 15,
                                                     beam_maj = 100, beam_min = 100, beam_angle = 45,
                                                     ra_sys_err=20, dec_sys_err=20
                                                        ))
        src2.append(db_subs.example_extractedsource_tuple(ra=123.0, dec=10.515,
                                                     ra_fit_err=5./3600, dec_fit_err=6./3600,
                                                     peak = 15e-3, peak_err = 5e-4,
                                                     flux = 15e-3, flux_err = 5e-4,
                                                     sigma = 15,
                                                     beam_maj = 100, beam_min = 100, beam_angle = 45,
                                                     ra_sys_err=20, dec_sys_err=20
                                                        ))
        results = []
        results.append(src2[0])
        results.append(src2[1])
        dbgen.insert_extracted_sources(imageid2, results, 'blind')
        associate_extracted_sources(imageid2, deRuiter_r = 3.717)

        query = """\
        SELECT id
          FROM extractedsource
         WHERE image = %s
        ORDER BY id
        """
        self.database.cursor.execute(query, (imageid2,))
        im2 = zip(*self.database.cursor.fetchall())
        self.assertNotEqual(len(im2), 0)
        im2src = im2[0]
        self.assertEqual(len(im2src), len(src2))

        query = """\
        SELECT id
              ,xtrsrc
              ,datapoints
          FROM runningcatalog
         WHERE dataset = %s
        ORDER BY xtrsrc
        """
        self.database.cursor.execute(query, (dataset.id,))
        rc2 = zip(*self.database.cursor.fetchall())
        self.assertNotEqual(len(rc2), 0)
        runcat2 = rc2[0]
        xtrsrc2 = rc2[1]
        datapoints = rc2[2]
        self.assertEqual(len(runcat2), 2)
        self.assertEqual(xtrsrc2[0], im1src[0])
        self.assertEqual(xtrsrc2[1], im1src[1])
        self.assertEqual(datapoints[0], datapoints[1])
        self.assertEqual(datapoints[0], 2)

        query = """\
        SELECT a.runcat
              ,r.xtrsrc as rxtrsrc
              ,a.xtrsrc as axtrsrc
              ,a.type
              ,x.image
          FROM assocxtrsource a
              ,runningcatalog r
              ,extractedsource x
         WHERE a.runcat = r.id
           AND a.xtrsrc = x.id
           AND r.dataset = %s
        ORDER BY r.xtrsrc
                ,a.xtrsrc
        """
        self.database.cursor.execute(query, (dataset.id,))
        assoc2 = zip(*self.database.cursor.fetchall())
        self.assertNotEqual(len(assoc2), 0)
        aruncat2 = assoc2[0]
        rxtrsrc2 = assoc2[1]
        axtrsrc2 = assoc2[2]
        atype2 = assoc2[3]
        aimage2 = assoc2[4]
        self.assertEqual(len(aruncat2), 4)
        # Idem as many-to-one case
        #self.assertEqual(aruncat2[0], aruncat2[3])
        #self.assertEqual(aruncat2[1], aruncat2[2])
        self.assertEqual(rxtrsrc2[0], rxtrsrc2[1])
        self.assertEqual(rxtrsrc2[2], rxtrsrc2[3])
        self.assertEqual(rxtrsrc2[0], axtrsrc2[0])
        self.assertEqual(rxtrsrc2[1], axtrsrc2[1] - 3)
        self.assertEqual(rxtrsrc2[2], axtrsrc2[2])
        self.assertEqual(rxtrsrc2[3], axtrsrc2[3] - 1)
        self.assertEqual(axtrsrc2[0], im1src[0])
        self.assertEqual(axtrsrc2[1], im2src[1])
        self.assertEqual(axtrsrc2[2], im1src[1])
        self.assertEqual(axtrsrc2[3], im2src[0])
        self.assertEqual(atype2[0], 4)
        self.assertEqual(atype2[1], 3)
        self.assertEqual(atype2[2], 4)
        self.assertEqual(atype2[3], 3)
        self.assertEqual(aimage2[0], imageid1)
        self.assertEqual(aimage2[1], imageid2)
        self.assertEqual(aimage2[2], imageid1)
        self.assertEqual(aimage2[3], imageid2)
Esempio n. 20
0
    def test_monitoringSource(self):
        data = {'description': "monitoringlist:" + self._testMethodName}
        dataset = DataSet(data=data)

        # Three timesteps, 1 band -> 3 images.
        taustart_tss = [datetime.datetime(2013, 8, 1),
                        datetime.datetime(2013, 9, 1),
                        datetime.datetime(2013, 10, 1)]
        #freq_effs = [124, 149, 156, 185]
        freq_effs = [124]
        freq_effs = [f * 1e6 for f in freq_effs]

        im_params = db_subs.example_dbimage_datasets(len(freq_effs)
                                                     * len(taustart_tss))
        timestamps = itertools.repeat(taustart_tss, len(freq_effs))

        for im, freq, ts in zip(im_params, itertools.cycle(freq_effs),
                                itertools.chain.from_iterable(zip(*timestamps))):
            im['freq_eff'] = freq
            im['taustart_ts'] = ts

        images = []
        for im in im_params:
            image = tkp.db.Image(dataset=dataset, data=im)
            images.append(image)

        # Arbitrary parameters, except that they fall inside our image.
        # We have one to be monitored source and one "normal" source
        src0 = db_subs.example_extractedsource_tuple(ra=122.5, dec=9.5)
        src1_mon = db_subs.example_extractedsource_tuple(ra=123.5, dec=10.5)

        # Group images in blocks of 1, corresponding to all frequency bands at
        # a given timestep.
        for images in zip(*(iter(images),) * len(freq_effs)):
            for image in images:
                # The "normal" source is seen at all timesteps
                dbgen.insert_extracted_sources(image.id, [src0], 'blind')

            for image in images:
                dbass.associate_extracted_sources(image.id, deRuiter_r=5.68)
                # The monitoring sources are the positional inputs for the forced
                # fits, which on their turn return additional parameters,
                # e.g. from src0_mon
                # src1_mon is the monitoring source at all timesteps
                dbgen.insert_extracted_sources(image.id, [src1_mon], 'ff_ms')

                # And here we have to associate the monitoring sources with the
                # runcat sources...
                dbmon.associate_ms(image.id)

        query = """\
        SELECT id
              ,mon_src
          FROM runningcatalog r
         WHERE dataset = %(dataset_id)s
           AND datapoints = 3
        ORDER BY id
        """
        cursor = tkp.db.execute(query, {'dataset_id': dataset.id})
        result = cursor.fetchall()

        # We should have two runningcatalog sources, one for the "normal"
        # source and one for the monitoring source.
        # Both should have three datapoints.
        print "dp_result:",result
        self.assertEqual(len(result), 2)
        # The first source is the "normal" one
        self.assertEqual(result[0][1], False)
        # The second source is the monitoring one
        self.assertEqual(result[1][1], True)

        query = """\
        SELECT r.id
              ,r.mon_src
              ,rf.f_datapoints
          FROM runningcatalog r
              ,runningcatalog_flux rf
         WHERE r.dataset = %(dataset_id)s
           AND rf.runcat = r.id
        ORDER BY r.id
        """
        cursor = tkp.db.execute(query, {'dataset_id': dataset.id})
        result = cursor.fetchall()

        # We should have two runningcatalog_flux entries,
        # one for every source, where every source has
        # three f_datapoints
        self.assertEqual(len(result), 2)

        # "Normal" source: three flux datapoints
        self.assertEqual(result[0][1], False)
        self.assertEqual(result[0][2], 3)
        # Monitoring source: three flux datapoints
        self.assertEqual(result[1][1], True)
        self.assertEqual(result[1][2], 3)

        # We should also have two lightcurves for both sources,
        # where both sources have three datapoints.
        # The association types of the "normal" source are
        # 3 (first) or 4 (later ones), while the monitoring source
        # association types are 8 (first) or 9 (later ones).
        query = """\
        SELECT a.runcat
              ,a.xtrsrc
              ,a.type
              ,i.taustart_ts
              ,r.mon_src
              ,x.extract_type
          FROM assocxtrsource a
              ,extractedsource x
              ,image i
              ,runningcatalog r
         WHERE a.xtrsrc = x.id
           AND x.image = i.id
           AND i.dataset = %(dataset_id)s
           AND a.runcat = r.id
        ORDER BY a.runcat
                ,i.taustart_ts
        """
        cursor = tkp.db.execute(query, {'dataset_id': dataset.id})
        result = cursor.fetchall()

        # 3 + 3 entries for source 1 and 2 resp.
        self.assertEqual(len(result), 6)

        # The individual light-curve datapoints for the "normal" source
        # It was new at first timestep
        self.assertEqual(result[0][2], 4)
        self.assertEqual(result[0][3], taustart_tss[0])
        self.assertEqual(result[0][4], False)
        self.assertEqual(result[0][5], 0)

        # It was known at second timestep
        self.assertEqual(result[1][2], 3)
        self.assertEqual(result[1][3], taustart_tss[1])
        self.assertEqual(result[1][4], result[0][4])
        self.assertEqual(result[1][5], result[0][5])

        # It was known at third timestep
        self.assertEqual(result[2][2], result[1][2])
        self.assertEqual(result[2][3], taustart_tss[2])
        self.assertEqual(result[2][4], result[1][4])
        self.assertEqual(result[2][5], result[1][5])

        # The individual light-curve datapoints for the monitoring source
        # It was new at first timestep
        self.assertEqual(result[3][2], 8)
        self.assertEqual(result[3][3], taustart_tss[0])
        self.assertEqual(result[3][4], True)
        self.assertEqual(result[3][5], 2)

        # It was known at second timestep
        self.assertEqual(result[4][2], 9)
        self.assertEqual(result[4][3], taustart_tss[1])
        self.assertEqual(result[4][4], result[3][4])
        self.assertEqual(result[4][5], result[3][5])

        # It was known at third timestep
        self.assertEqual(result[5][2], result[4][2])
        self.assertEqual(result[5][3], taustart_tss[2])
        self.assertEqual(result[5][4], result[4][4])
        self.assertEqual(result[5][5], result[4][5])
Esempio n. 21
0
    def setUp(self):
        self.dataset = DataSet(data={'description': "Src. assoc:" +
                                                    self._testMethodName})

        self.im_params = db_subs.example_dbimage_datasets(n_images=8)
        self.db_imgs=[]
Esempio n. 22
0
    def test_nullDetection(self):
        data = {'description': "null detection:" + self._testMethodName}
        dataset = DataSet(data=data)

        # Three timesteps, each with 4 bands -> 12 images.
        taustart_tss = [datetime.datetime(2013, 8, 1),
                        datetime.datetime(2013, 9, 1),
                        datetime.datetime(2013, 10, 1)]
        freq_effs = [124, 149, 156, 185]
        freq_effs = [f * 1e6 for f in freq_effs]

        im_params = db_subs.example_dbimage_datasets(len(freq_effs)
                                                     * len(taustart_tss))
        timestamps = itertools.repeat(taustart_tss, len(freq_effs))

        for im, freq, ts in zip(im_params, itertools.cycle(freq_effs),
                                itertools.chain.from_iterable(zip(*timestamps))):
            im['freq_eff'] = freq
            im['taustart_ts'] = ts

        images = []
        for im in im_params:
            image = tkp.db.Image(dataset=dataset, data=im)
            images.append(image)

        # Arbitrary parameters, except that they fall inside our image.
        src0 = db_subs.example_extractedsource_tuple(ra=122.5, dec=9.5)
        src1 = db_subs.example_extractedsource_tuple(ra=123.5, dec=10.5)

        # Group images in blocks of 4, corresponding to all frequency bands at
        # a given timestep.
        for images in zip(*(iter(images),) * len(freq_effs)):
            for image in images:
                # The first source is only seen at timestep 0, band 0.
                # The second source is only seen at timestep 1, band 3.
                if (image.taustart_ts == taustart_tss[0] and
                            image.freq_eff == freq_effs[0]):
                    dbgen.insert_extracted_sources(image.id, [src0], 'blind')
                elif (image.taustart_ts == taustart_tss[1] and
                      image.freq_eff == freq_effs[3]):
                    dbgen.insert_extracted_sources(image.id, [src1], 'blind')
                else:
                    pass

            for image in images:
                dbass.associate_extracted_sources(image.id, deRuiter_r=5.68)
                null_detections = dbnd.get_nulldetections(image.id)
                # The null_detections are the positional inputs for the forced
                #  fits, which on their turn return additional parameters,
                # e.g. from src0, src1
                if image.taustart_ts == taustart_tss[0]:
                    # There are no null detections at the first timestep
                    self.assertEqual(len(null_detections), 0)
                elif image.taustart_ts == taustart_tss[1]:
                    # src0 is a null detection at the second timestep
                    self.assertEqual(len(null_detections), 1)
                    dbgen.insert_extracted_sources(image.id, [src0], 'ff_nd')
                else:
                    # All other images have two null detections.
                    self.assertEqual(len(null_detections), 2)
                    dbgen.insert_extracted_sources(image.id, [src0, src1],
                                                   'ff_nd')

                # And here we have to associate the null detections with the
                # runcat sources...
                dbnd.associate_nd(image.id)

        query = """\
        SELECT id
              ,datapoints
        FROM runningcatalog r
        WHERE dataset = %(dataset_id)s
        ORDER BY datapoints
        """
        cursor = tkp.db.execute(query, {'dataset_id': dataset.id})
        result = cursor.fetchall()

        # We should have two runningcatalog sources, with a datapoint for
        # every image in which the sources were seen.
        self.assertEqual(len(result), 2)

        query = """\
        SELECT r.id
              ,rf.band
              ,rf.f_datapoints
          FROM runningcatalog r
              ,runningcatalog_flux rf
         WHERE r.dataset = %(dataset_id)s
           AND rf.runcat = r.id
        ORDER BY r.id
                ,rf.band
        """
        cursor = tkp.db.execute(query, {'dataset_id': dataset.id})
        result = cursor.fetchall()

        # We should have eight runningcatalog_flux entries,
        # one for every source in every band, i.e. 2 x 4.
        # The number of flux datapoints differ per source, though
        self.assertEqual(len(result), 8)

        # Source 1: inserted into timestep 0, band 0.
        # Force-fits in band 0 images at next timesteps,
        # so 1+2 for band 0.
        self.assertEqual(result[0][2], 3)

        # Source 1: inserted into timestep 0, band 0.
        # Force-fits in bands 1,2,3 images at next timesteps.
        # so 0+2 for bands 1,2,3.
        self.assertEqual(result[1][2], 2)
        self.assertEqual(result[2][2], 2)
        self.assertEqual(result[3][2], 2)

        # Source 2: inserted into timestep 1, band 3.
        # Force-fits in band 0,1,2 images at next timestep,
        # so 1 for band 0,1,2
        self.assertEqual(result[4][2], 1)
        self.assertEqual(result[5][2], 1)
        self.assertEqual(result[6][2], 1)

        # Source 2: inserted into timestep 1, band 3.
        # Force-fit in band 3 image at next timestep,
        # so 1+1 for band 3
        self.assertEqual(result[7][2], 2)

        # We should also have two lightcurves for both sources,
        # where source 1 has 3 datapoints in band0 (t1,t2,t3)
        # and 2 datapoints for the other three bands (t2,t3).
        # Source 2 has two datapoints for band3 (t2,t3) and
        # one for the other three bands (t3).
        query = """\
        SELECT a.runcat
              ,a.xtrsrc
              ,a.type
              ,i.band
              ,i.taustart_ts
          FROM assocxtrsource a
              ,extractedsource x
              ,image i
         WHERE a.xtrsrc = x.id
           AND x.image = i.id
           AND i.dataset = %(dataset_id)s
        ORDER BY a.runcat
                ,i.band
                ,i.taustart_ts
        """
        cursor = tkp.db.execute(query, {'dataset_id': dataset.id})
        result = cursor.fetchall()

        # 9 + 5 entries for source 1 and 2 resp.
        self.assertEqual(len(result), 14)

        # The individual light-curve datapoints
        # Source1: new at t1, band0
        self.assertEqual(result[0][2], 4)
        self.assertEqual(result[0][4], taustart_tss[0])

        # Source1: Forced fit at t2, same band
        self.assertEqual(result[1][2], 7)
        self.assertEqual(result[1][3], result[0][3])
        self.assertEqual(result[1][4], taustart_tss[1])

        # Source1: Forced fit at t3, same band
        self.assertEqual(result[2][2], 7)
        self.assertEqual(result[2][3], result[1][3])
        self.assertEqual(result[2][4], taustart_tss[2])

        # Source1: Forced fit at t2, band1
        self.assertEqual(result[3][2], 7)
        self.assertTrue(result[3][3] > result[2][3])
        self.assertEqual(result[3][4], taustart_tss[1])

        # Source1: Forced fit at t3, band1
        self.assertEqual(result[4][2], 7)
        self.assertEqual(result[4][3], result[3][3])
        self.assertEqual(result[4][4], taustart_tss[2])

        # Source1: Forced fit at t2, band2
        self.assertEqual(result[5][2], 7)
        self.assertTrue(result[5][3] > result[4][3])
        self.assertEqual(result[5][4], taustart_tss[1])

        # Source1: Forced fit at t3, band2
        self.assertEqual(result[6][2], 7)
        self.assertEqual(result[6][3], result[5][3])
        self.assertEqual(result[6][4], taustart_tss[2])

        # Source1: Forced fit at t2, band3
        self.assertEqual(result[7][2], 7)
        self.assertTrue(result[7][3] > result[6][3])
        self.assertEqual(result[7][4], taustart_tss[1])

        # Source1: Forced fit at t3, band3
        self.assertEqual(result[8][2], 7)
        self.assertEqual(result[8][3], result[7][3])
        self.assertEqual(result[8][4], taustart_tss[2])

        # Source2: Forced fit at t3, band0
        self.assertEqual(result[9][2], 7)
        self.assertEqual(result[9][3], result[0][3])
        self.assertEqual(result[9][4], taustart_tss[2])

        # Source2: Forced fit at t3, band1
        self.assertEqual(result[10][2], 7)
        self.assertTrue(result[10][3] > result[9][3])
        self.assertEqual(result[10][4], taustart_tss[2])

        # Source2: Forced fit at t3, band2
        self.assertEqual(result[11][2], 7)
        self.assertTrue(result[11][3] > result[10][3])
        self.assertEqual(result[11][4], taustart_tss[2])

        # Source2: new at t2, band3
        self.assertEqual(result[12][2], 4)
        self.assertTrue(result[12][3] > result[11][3])
        self.assertEqual(result[12][4], taustart_tss[1])

        # Source2: Forced fit at t3, band3
        self.assertEqual(result[13][2], 7)
        self.assertEqual(result[13][3], result[12][3])
        self.assertEqual(result[13][4], taustart_tss[2])
Esempio n. 23
0
    def test_one2manyflux(self):
        dataset = tkp.db.DataSet(database=self.database, data={'description': 'flux test set: 1-n'})
        n_images = 2
        im_params = db_subs.example_dbimage_datasets(n_images)

        # image 1
        image = tkp.db.Image(database=self.database, dataset=dataset, data=im_params[0])
        imageid1 = image.id
        src = []
        # 1 source
        src.append(db_subs.example_extractedsource_tuple(ra=123.1235, dec=10.55,
                                                     ra_fit_err=5./3600, dec_fit_err=6./3600,
                                                     peak = 1.5, peak_err = 5e-1,
                                                     flux = 3.0, flux_err = 5e-1,
                                                     sigma = 15,
                                                     beam_maj = 100, beam_min = 100, beam_angle = 45,
                                                     ra_sys_err=20, dec_sys_err=20
                                                        ))
        results = []
        results.append(src[-1])
        dbgen.insert_extracted_sources(imageid1, results, 'blind')
        associate_extracted_sources(imageid1, deRuiter_r=3.717)

        # image 2
        image = tkp.db.Image(database=self.database, dataset=dataset, data=im_params[1])
        imageid2 = image.id
        src = []
        # 2 sources (located close to source 1, catching the 1-to-many case
        src.append(db_subs.example_extractedsource_tuple(ra=123.12349, dec=10.549,
                                                     ra_fit_err=5./3600, dec_fit_err=6./3600,
                                                     peak = 1.6, peak_err = 5e-1,
                                                     flux = 3.2, flux_err = 5e-1,
                                                     sigma = 15,
                                                     beam_maj = 100, beam_min = 100, beam_angle = 45,
                                                     ra_sys_err=20, dec_sys_err=20
                                                        ))
        src.append(db_subs.example_extractedsource_tuple(ra=123.12351, dec=10.551,
                                                     ra_fit_err=5./3600, dec_fit_err=6./3600,
                                                     peak = 1.9, peak_err = 5e-1,
                                                     flux = 3.4, flux_err = 5e-1,
                                                     sigma = 15,
                                                     beam_maj = 100, beam_min = 100, beam_angle = 45,
                                                     ra_sys_err=20, dec_sys_err=20
                                                        ))
        results = []
        results.append(src[0])
        results.append(src[1])
        dbgen.insert_extracted_sources(imageid2, results, 'blind')
        associate_extracted_sources(imageid2, deRuiter_r=3.717)

        query = """\
        SELECT rf.avg_f_int
              ,rf.avg_f_int_sq
              ,avg_weighted_f_int/avg_f_int_weight as wI
          FROM runningcatalog r
              ,runningcatalog_flux rf
         WHERE r.dataset = %(dataset)s
           AND r.id = rf.runcat
        ORDER BY rf.avg_f_int
        """
        self.database.cursor.execute(query, {'dataset': dataset.id})
        result = zip(*self.database.cursor.fetchall())
        avg_f_int = result[0]
        avg_f_int_sq = result[1]
        wI = result[2]
        self.assertEqual(len(avg_f_int), 2)
        self.assertAlmostEqual(avg_f_int[0], 3.1)
        self.assertAlmostEqual(avg_f_int[1], 3.2)
        self.assertAlmostEqual(avg_f_int_sq[0], 9.62)
        self.assertAlmostEqual(avg_f_int_sq[1], 10.28)
        self.assertAlmostEqual(wI[0], 3.1)
        self.assertAlmostEqual(wI[1], 3.2)
Esempio n. 24
0
 def setUp(self):
     self.fake_images = db_subs.example_dbimage_datasets(n_images=1)
     self.dataset = tkp.db.DataSet(data={'description':
                                               "Reject:" + self._testMethodName})
     self.image = tkp.db.Image(data=self.fake_images[0],
                                     dataset=self.dataset)
Esempio n. 25
0
    def test_new_runcat_insertion(self):
        """Here we test the association logic executed upon insertion of a
        new runningcatalog source.

        We add an empty image0, then proceed to image1,
        which is partially overlapping.
        We add one new overlapping source, and one source only in image1's skyrgn.
        Then we check that the back-associations to image0 are correct.
        """
        n_images = 6
        im_params = db_subs.example_dbimage_datasets(n_images)

        #We first create 2 overlapping images,
        #one above the other in dec by 1.0*xtr_radius
        idx = 0
        image0 = tkp.db.Image(dataset=self.dataset, data=im_params[idx])
        image0.update()

        #Bump up the centre of img1 to higher declination
        im_params[1]['centre_decl'] += im_params[1]['xtr_radius']
        #We place one source half-way between the field centres (i.e. in both)
        src_in_imgs_0_1 = db_subs.example_extractedsource_tuple(
                                    ra=im_params[1]['centre_ra'],
                                    dec=im_params[1]['centre_decl'] -
                                            im_params[1]['xtr_radius'] * 0.5)

        #And one source only in field 1
        src_in_img_1_only = db_subs.example_extractedsource_tuple(
                        ra=im_params[1]['centre_ra'],
                        dec=im_params[1]['centre_decl'] +
                            im_params[1]['xtr_radius'] * 0.5)

        ##First insert new sources in img1 and check association to parent field:
        ## (This is always asserted without calculation, for efficiency)
        image1 = tkp.db.Image(dataset=self.dataset, data=im_params[1])
        image1.insert_extracted_sources([src_in_imgs_0_1, src_in_img_1_only])
        image1.associate_extracted_sources(deRuiter_r=3.7)
        image1.update()

        runcats = columns_from_table('runningcatalog',
                        where={'dataset':self.dataset.id})

        #We now expect to see both runcat entries in the field of im1
        im1_assocs = columns_from_table('assocskyrgn',
                                    where={'skyrgn':image1._data['skyrgn']})
        self.assertEqual(len(im1_assocs), 2)
        runcat_ids = [r['id'] for r in  runcats]
        for assoc in im1_assocs:
            self.assertTrue(assoc['runcat'] in runcat_ids)

        #The new sources are *also checked against previous regions*
        #Only expect one in field of im0 ( the first source).
        im0_assocs = columns_from_table('assocskyrgn',
                                    where={'skyrgn':image0._data['skyrgn']})

        runcats_only_in_im0 = columns_from_table('runningcatalog',
                                        where={'dataset':self.dataset.id,
                                               'wm_decl':15})

        self.assertEqual(len(im0_assocs), 1)
        self.assertEqual(len(runcats_only_in_im0), 1)
        self.assertEqual(im0_assocs[0]['runcat'], runcats_only_in_im0[0]['id'])
Esempio n. 26
0
    def test_one2one(self):
        dataset = DataSet(data={'description': 'assoc test set: 1-1'})
        n_images = 8
        im_params = db_subs.example_dbimage_datasets(n_images)

        steady_srcs = []
        n_steady_srcs = 3
        for i in range(n_steady_srcs):
            src = db_subs.example_extractedsource_tuple()
            src = src._replace(ra=src.ra + 2 * i)
            steady_srcs.append(src)

        for im in im_params:
            image = tkp.db.Image(dataset=dataset, data=im)
            dbgen.insert_extracted_sources(image.id, steady_srcs, 'blind')
            associate_extracted_sources(image.id, deRuiter_r = 3.717)

        # Check runningcatalog, runningcatalog_flux, assocxtrsource.
        # note that the order of insertions is not garanteed, so we ORDER by
        # wm_RA
        query = """\
        SELECT datapoints
              ,wm_ra
              ,wm_decl
              ,wm_ra_err
              ,wm_decl_err
              ,x
              ,y
              ,z
          FROM runningcatalog
         WHERE dataset = %s
        ORDER BY wm_ra
        """
        cursor = tkp.db.execute(query, (dataset.id,))
        runcat = zip(*cursor.fetchall())
        self.assertNotEqual(len(runcat), 0)
        dp = runcat[0]
        wm_ra = runcat[1]
        wm_decl = runcat[2]
        wm_ra_err = runcat[3]
        wm_decl_err = runcat[4]
        x = runcat[5]
        y = runcat[6]
        z = runcat[7]
        # Check for 1 entry in runcat
        self.assertEqual(len(dp), len(steady_srcs))
        self.assertEqual(dp[0], n_images)
        self.assertAlmostEqual(wm_ra[0], steady_srcs[0].ra)
        self.assertAlmostEqual(wm_decl[0], steady_srcs[0].dec)
        self.assertAlmostEqual(wm_ra_err[0], math.sqrt(
                           1./ (n_images / ( (steady_srcs[0].ra_fit_err*3600.)**2 + (steady_srcs[0].ra_sys_err)**2))
                               ))
        self.assertAlmostEqual(wm_decl_err[0], math.sqrt(
                           1./ (n_images / ((steady_srcs[0].dec_fit_err*3600.)**2 + (steady_srcs[0].dec_sys_err)**2 ))
                               ))

        self.assertAlmostEqual(x[0],
                    math.cos(math.radians(steady_srcs[0].dec))*
                        math.cos(math.radians(steady_srcs[0].ra)))
        self.assertAlmostEqual(y[0],
                   math.cos(math.radians(steady_srcs[0].dec))*
                        math.sin(math.radians(steady_srcs[0].ra)))
        self.assertAlmostEqual(z[0], math.sin(math.radians(steady_srcs[0].dec)))

        # Check that xtrsrc ids in assocxtrsource are the ones from extractedsource
        query ="""\
        SELECT a.runcat
              ,a.xtrsrc
          FROM assocxtrsource a
              ,runningcatalog r
         WHERE a.runcat = r.id
           AND r.dataset = %s
        ORDER BY a.xtrsrc
        """
        cursor = tkp.db.execute(query, (dataset.id,))
        assoc = zip(*cursor.fetchall())
        self.assertNotEqual(len(assoc), 0)
        aruncat = assoc[0]
        axtrsrc = assoc[1]
        self.assertEqual(len(axtrsrc), n_images * n_steady_srcs)

        query = """\
        SELECT x.id
          FROM extractedsource x
              ,image i
         WHERE x.image = i.id
           AND i.dataset = %s
        ORDER BY x.id
        """
        cursor = tkp.db.execute(query, (dataset.id,))
        xtrsrcs = zip(*cursor.fetchall())
        self.assertNotEqual(len(xtrsrcs), 0)
        xtrsrc = xtrsrcs[0]
        self.assertEqual(len(xtrsrc), n_images * n_steady_srcs)

        for i in range(len(xtrsrc)):
            self.assertEqual(axtrsrc[i], xtrsrc[i])

        # Check runcat_fluxes
        query = """\
        SELECT rf.band
              ,rf.stokes
              ,rf.f_datapoints
              ,rf.avg_f_peak
              ,rf.avg_f_peak_weight
              ,rf.avg_f_int
              ,rf.avg_f_int_weight
          FROM runningcatalog_flux rf
              ,runningcatalog r
         WHERE r.id = rf.runcat
           AND r.dataset = %s
        """
        cursor = tkp.db.execute(query, (dataset.id,))
        fluxes = zip(*cursor.fetchall())
        self.assertNotEqual(len(fluxes), 0)
        f_datapoints = fluxes[2]
        avg_f_peak = fluxes[3]
        avg_f_peak_weight = fluxes[4]
        avg_f_int = fluxes[5]
        avg_f_int_weight = fluxes[6]
        self.assertEqual(len(f_datapoints), n_steady_srcs)
        self.assertEqual(f_datapoints[0], n_images)
        self.assertEqual(avg_f_peak[0], steady_srcs[0].peak)
        self.assertEqual(avg_f_peak_weight[0], 1./steady_srcs[0].peak_err**2)
        self.assertEqual(avg_f_int[0], steady_srcs[0].flux)
        self.assertEqual(avg_f_int_weight[0], 1./steady_srcs[0].flux_err**2)
Esempio n. 27
0
    def test_single_band_transient_search(self):
        """test_single_band_transient_search

            Test simplest functional case - only source is a transient source.
            This makes it simple to verify the database properties.

        """
        # We have to add a dataset, some images all with some measurements
        # After insertion, and source association, we run the transient search.
        dataset = tkp.db.DataSet(database=self.database,
                                data={'description':"Trans:"
                                        + self._testMethodName})
        n_images = 4
        im_params = db_subs.example_dbimage_datasets(n_images)

        TransientSource = db_subs.MockSource(
             ex=db_subs.example_extractedsource_tuple(),
             lightcurve=[
                 db_subs.MockLCPoint(index=2, peak=20e-2, flux=20e-2, sigma=200),
                 db_subs.MockLCPoint(index=3, peak=1e-2, flux=1e-2, sigma=10),
                    ]
                 )

        measurements = TransientSource.synthesise_measurements(
                                               n_images,
                                               include_non_detections=True)

        images = []
        for idx in range(len(im_params)):
            image = tkp.db.Image(dataset=dataset, data=im_params[idx])
            images.append(image)
            if measurements[idx] is not None:
                image.insert_extracted_sources([ measurements[idx] ])
            image.associate_extracted_sources(deRuiter_r=3.7)
            freq_bands = dataset.frequency_bands()
            self.assertEqual(len(freq_bands), 1)
            transients = multi_epoch_transient_search(
                                            eta_lim=1,
                                            V_lim=0.1,
                                            probability_threshold=0.7,
                                            minpoints=1,
                                            image_id=image.id)

        # Check the number of detected transients
        self.assertEqual(len(transients), 1)

        # Check that the bands for the images are the same as the transient's band
        freq_bands = dataset.frequency_bands()
        self.assertEqual(len(freq_bands), 1)
        for tr in transients:
            self.assertEqual(freq_bands[0], tr['band'])

        runcats = dataset.runcat_entries()
        self.assertEqual(len(runcats), 1)
        for tr in transients:
            self.assertEqual(runcats[0]['runcat'], tr['runcat'])

        # Check that the trigger xtrsrc happened in the third image
        query = """\
        select taustart_ts
          from extractedsource x
              ,image i
         where x.image = i.id
           and x.id = (select trigger_xtrsrc
                         from transient t
                             ,runningcatalog r
                        where t.runcat = r.id
                          and r.dataset = %s
                      )
        """
        self.database.cursor.execute(query, (dataset.id,))
        taustart_ts = zip(*self.database.cursor.fetchall())[0]
        self.assertEqual(len(taustart_ts), 1)
        ts = taustart_ts[0]
        self.assertEqual(ts, images[2].taustart_ts)

        # Check the variability indices, first eta_int:
        #query = """\
        #select sum((f_int - 0.06) * (f_int-0.06) / (f_int_err * f_int_err)) / 3
        #  from assocxtrsource a
        #      ,extractedsource x
        # where a.xtrsrc = x.id
        #   and a.runcat = 7
        #"""

        #FIXME: I'm not sure what this query is for?
        query = """\
Esempio n. 28
0
    def test_many2manyflux(self):
        dataset = tkp.db.DataSet(database=self.database, data={'description': 'flux test set: n-m'})
        n_images = 2
        im_params = db_subs.example_dbimage_datasets(n_images)

        # image 1
        image = tkp.db.Image(database=self.database, dataset=dataset, data=im_params[0])
        imageid1 = image.id
        src1 = []
        # 2 sources (located relatively close together, so the catching the many-to-1 case in next image
        src1.append(db_subs.example_extractedsource_tuple(ra=122.985, dec=10.5,
                                                     ra_fit_err=5./3600, dec_fit_err=6./3600,
                                                     peak = 1.5, peak_err = 1e-1,
                                                     flux = 3.0, flux_err = 1e-1,
                                                     sigma = 15,
                                                     beam_maj = 100, beam_min = 100, beam_angle = 45,
                                                     ra_sys_err=20, dec_sys_err=20
                                                        ))
        src1.append(db_subs.example_extractedsource_tuple(ra=123.015, dec=10.5,
                                                     ra_fit_err=5./3600, dec_fit_err=6./3600,
                                                     peak = 1.7, peak_err = 1e-1,
                                                     flux = 3.2, flux_err = 1e-1,
                                                     sigma = 15,
                                                     beam_maj = 100, beam_min = 100, beam_angle = 45,
                                                     ra_sys_err=20, dec_sys_err=20
                                                        ))
        results = []
        results.append(src1[0])
        results.append(src1[1])
        dbgen.insert_extracted_sources(imageid1, results, 'blind')
        # We use a default value of 3.717
        associate_extracted_sources(imageid1, deRuiter_r = 3.717)

        # image 2
        image = tkp.db.Image(database=self.database, dataset=dataset, data=im_params[1])
        imageid2 = image.id
        src2 = []
        # 2 sources, where both can be associated with both from image 1
        src2.append(db_subs.example_extractedsource_tuple(ra=123.0, dec=10.485,
                                                     ra_fit_err=5./3600, dec_fit_err=6./3600,
                                                     peak = 1.8, peak_err = 1e-1,
                                                     flux = 3.3, flux_err = 1e-1,
                                                     sigma = 15,
                                                     beam_maj = 100, beam_min = 100, beam_angle = 45,
                                                     ra_sys_err=20, dec_sys_err=20
                                                        ))
        src2.append(db_subs.example_extractedsource_tuple(ra=123.0, dec=10.515,
                                                     ra_fit_err=5./3600, dec_fit_err=6./3600,
                                                     peak = 1.4, peak_err = 1e-1,
                                                     flux = 2.9, flux_err = 1e-1,
                                                     sigma = 15,
                                                     beam_maj = 100, beam_min = 100, beam_angle = 45,
                                                     ra_sys_err=20, dec_sys_err=20
                                                        ))
        results = []
        results.append(src2[0])
        results.append(src2[1])
        dbgen.insert_extracted_sources(imageid2, results, 'blind')
        associate_extracted_sources(imageid2, deRuiter_r = 3.717)

        query = """\
        SELECT rf.avg_f_int
              ,rf.avg_f_int_sq
              ,avg_weighted_f_int/avg_f_int_weight as wI
          FROM runningcatalog r
              ,runningcatalog_flux rf
         WHERE r.dataset = %(dataset)s
           AND r.id = rf.runcat
        ORDER BY rf.avg_f_int
        """
        self.database.cursor.execute(query, {'dataset': dataset.id})
        result = zip(*self.database.cursor.fetchall())
        avg_f_int = result[0]
        avg_f_int_sq = result[1]
        wI = result[2]
        self.assertEqual(len(avg_f_int), 2)
        self.assertAlmostEqual(avg_f_int[0], 2.95)
        self.assertAlmostEqual(avg_f_int[1], 3.25)
        self.assertAlmostEqual(avg_f_int_sq[0], 8.705)
        self.assertAlmostEqual(avg_f_int_sq[1], 10.565)
        self.assertAlmostEqual(wI[0], 2.95)
        self.assertAlmostEqual(wI[1], 3.25)