Beispiel #1
0
    def test_create(self):

        dataset = DataSet(data={'description': 'dataset with images'},
                          database=self.database)
        # create 4 images, separated by one day each
        image = Image(
                dataset=dataset,
                data=db_subs.example_dbimage_data_dict())
        extracted_source_data = dict(zone=13,
                    ra=12.12, decl=13.13, ra_err=21.1, decl_err=21.09,
                    ra_fit_err=1.12, decl_fit_err=1.23,
                    uncertainty_ew=0.1,uncertainty_ns=0.1,
                    ew_sys_err=20, ns_sys_err=20,
                    error_radius=10.0,
                    x=0.11, y=0.22, z=0.33,
                    racosdecl=0.44, det_sigma=10.)
        src1 = ExtractedSource(data=extracted_source_data, image=image)
        src2 = ExtractedSource(data=extracted_source_data, image=image, database=self.database)
        self.assertNotEqual(src1.id, src2.id)

        extracted_source_data['image'] = image.id
        src3 = ExtractedSource(data=extracted_source_data, database=self.database)
        extracted_source_data['ra'] = 23.23
        src4 = ExtractedSource(data=extracted_source_data, database=self.database, id=src1.id)
        self.assertEqual(src1.id, src4.id)
        self.assertAlmostEqual(src1.ra, src4.ra)
        del extracted_source_data['x']
        self.assertRaisesRegexp(
            AttributeError, "missing required data key: x",
            ExtractedSource, data=extracted_source_data, database=self.database)
Beispiel #2
0
    def test_infinite(self):
        # Check that database insertion doesn't choke on infinite errors.

        dataset = DataSet(data={'description': 'example dataset'},
                           database=self.database)
        image = Image(dataset=dataset, data=db_subs.example_dbimage_data_dict())

        # Inserting a standard example extractedsource should be fine
        extracted_source = db_subs.example_extractedsource_tuple()
        image.insert_extracted_sources([extracted_source])
        inserted = columns_from_table('extractedsource',
                                      where= {'image' : image.id})
        self.assertEqual(len(inserted), 1)

        # But if the source has infinite errors we drop it and log a warning
        extracted_source = db_subs.example_extractedsource_tuple(error_radius=float('inf'),
                                                                 peak_err=float('inf'),
                                                                 flux_err=float('inf'))

                # We will add a handler to the root logger which catches all log
        # output in a buffer.
        iostream = BytesIO()
        hdlr = logging.StreamHandler(iostream)
        logging.getLogger().addHandler(hdlr)

        image.insert_extracted_sources([extracted_source])

        logging.getLogger().removeHandler(hdlr)
        # We want to be sure that the error has been appropriately logged.
        self.assertIn("Dropped source fit with infinite flux errors",
                      iostream.getvalue())

        inserted = columns_from_table('extractedsource',
                                      where= {'image' : image.id})
        self.assertEqual(len(inserted), 1)
Beispiel #3
0
    def test_update(self):
        """
        Check that ORM-updates work for the Image class.
        """

        dataset1 = DataSet(data={'description':
                                  'dataset with changing images'},
                                  database=self.database)
        image_data = db_subs.example_dbimage_data_dict()

        image1 = Image(dataset=dataset1, data=image_data)
        # Now, update (without changing anything) and make sure it remains the
        # same (sanity check):
        image1.update()
        self.assertEqual(image1.tau_time, image_data['tau_time'])

        ##This time, change something:
        tau_time1 = image_data['tau_time'] + 100
        tau_time2 = tau_time1 + 300
        tau_time3 = tau_time2 + 300
        tau_time4 = tau_time3 + 300

        freq_eff1 = image_data['freq_eff']*1.2

        image1.update(tau_time = tau_time1)
        self.assertEqual(image1.tau_time, tau_time1)


        # New 'image' orm-object, created from the database id of image1.
        image2 = Image(dataset=dataset1, id=image1.id)
        self.assertAlmostEqual(image2.tau_time, tau_time1)
        self.assertAlmostEqual(image2.freq_eff, image_data['freq_eff'])
        # Same id, so changing image2 changes image1
        # but *only* after calling update()
        image2.update(tau_time=tau_time2)
        self.assertAlmostEqual(image1.tau_time, tau_time1)
        image1.update()
        self.assertAlmostEqual(image1.tau_time, tau_time2)
        image1.update(tau_time=tau_time3)
        image2.update()
        self.assertAlmostEqual(image2.tau_time, tau_time3)
        #Test with multiple items:
        image1.update(tau_time=tau_time4, freq_eff=freq_eff1)
        self.assertAlmostEqual(image1.tau_time, tau_time4)
        self.assertAlmostEqual(image1.freq_eff, freq_eff1)

        #And that datetime conversion roundtrips work correctly:
        dtime0 = image_data['taustart_ts']
        dtime1 = dtime0 + datetime.timedelta(days=3)
        self.assertEqual(image1.taustart_ts, dtime0)
        self.assertEqual(image2.taustart_ts, dtime0)
        image2.update(taustart_ts=dtime1)
        #image1 orm-object not yet updated, still caches old value:
        self.assertEqual(image1.taustart_ts, dtime0)
        self.assertEqual(image2.taustart_ts, dtime1)
        image1.update()
        self.assertEqual(image1.taustart_ts, dtime1)
Beispiel #4
0
    def test_update(self):
        """
        Check that ORM-updates work for the Image class.
        """

        dataset1 = DataSet(
            data={'description': 'dataset with changing images'},
            database=self.database)
        image_data = db_subs.example_dbimage_data_dict()

        image1 = Image(dataset=dataset1, data=image_data)
        # Now, update (without changing anything) and make sure it remains the
        # same (sanity check):
        image1.update()
        self.assertEqual(image1.tau_time, image_data['tau_time'])

        ##This time, change something:
        tau_time1 = image_data['tau_time'] + 100
        tau_time2 = tau_time1 + 300
        tau_time3 = tau_time2 + 300
        tau_time4 = tau_time3 + 300

        freq_eff1 = image_data['freq_eff'] * 1.2

        image1.update(tau_time=tau_time1)
        self.assertEqual(image1.tau_time, tau_time1)

        # New 'image' orm-object, created from the database id of image1.
        image2 = Image(dataset=dataset1, id=image1.id)
        self.assertAlmostEqual(image2.tau_time, tau_time1)
        self.assertAlmostEqual(image2.freq_eff, image_data['freq_eff'])
        # Same id, so changing image2 changes image1
        # but *only* after calling update()
        image2.update(tau_time=tau_time2)
        self.assertAlmostEqual(image1.tau_time, tau_time1)
        image1.update()
        self.assertAlmostEqual(image1.tau_time, tau_time2)
        image1.update(tau_time=tau_time3)
        image2.update()
        self.assertAlmostEqual(image2.tau_time, tau_time3)
        #Test with multiple items:
        image1.update(tau_time=tau_time4, freq_eff=freq_eff1)
        self.assertAlmostEqual(image1.tau_time, tau_time4)
        self.assertAlmostEqual(image1.freq_eff, freq_eff1)

        #And that datetime conversion roundtrips work correctly:
        dtime0 = image_data['taustart_ts']
        dtime1 = dtime0 + datetime.timedelta(days=3)
        self.assertEqual(image1.taustart_ts, dtime0)
        self.assertEqual(image2.taustart_ts, dtime0)
        image2.update(taustart_ts=dtime1)
        #image1 orm-object not yet updated, still caches old value:
        self.assertEqual(image1.taustart_ts, dtime0)
        self.assertEqual(image2.taustart_ts, dtime1)
        image1.update()
        self.assertEqual(image1.taustart_ts, dtime1)
Beispiel #5
0
    def test_create2(self):

        dataset1 = DataSet(data={'description': 'dataset with images'},
                           database=self.database)
        self.assertEqual(dataset1.images, set())
        image_data = db_subs.example_dbimage_data_dict()
        image1 = Image(dataset=dataset1, data=image_data)
        image2 = Image(dataset=dataset1, data=image_data)

        extractedsource_data = {
            'ra': 123.123,
            'decl': 23.23,
            'ra_err': 21.1,
            'decl_err': 21.09,
            'ra_fit_err': 0.1,
            'decl_fit_err': 0.1,
            'uncertainty_ew': 0.1,
            'uncertainty_ns': 0.1,
            'zone': 1,
            'x': 0.11,
            'y': 0.22,
            'z': 0.33,
            'racosdecl': 0.44,
            'det_sigma': 10.0,
            'ew_sys_err': 20,
            'ns_sys_err': 20,
            'error_radius': 10.0
        }
        source1 = ExtractedSource(image=image1, data=extractedsource_data)
        extractedsource_data['ra'] = 45.45
        extractedsource_data['decl'] = 55.55
        source2 = ExtractedSource(image=image1, data=extractedsource_data)
        self.assertEqual(len(image1.sources), 2)
        # Source #3 points to the same source as #2
        source3 = ExtractedSource(id=source2.id, database=self.database)
        # Which means there are no extra sources for image1
        self.assertEqual(len(image1.sources), 2)
        srcids = set([source.id for source in image1.sources])
        # If, however, we create a new source with
        # an image reference in the constructor, we get a
        # 'deep' copy:
        source4 = ExtractedSource(image=image1, id=source2.id)
        # Now there's a new source for image1!
        self.assertEqual(len(image1.sources), 3)
        # But if we filter on the source ids,
        # we see there are really only two sources
        srcids = set([source.id for source in image1.sources])
        self.assertEqual(set([source1.id, source2.id]), srcids)

        extractedsource_data['ra'] = 89.89
        extractedsource_data['decl'] = 78.78
        source5 = ExtractedSource(image=image2, data=extractedsource_data)
        self.assertEqual(len(image2.sources), 1)
        self.assertEqual(image2.sources.pop().id, source5.id)
Beispiel #6
0
 def test_update(self):
     image_data = db_subs.example_dbimage_data_dict()
     dataset1 = DataSet(data={'description': 'dataset with images'},
                        database=self.database)
     self.assertEqual(dataset1.images, set())
     image1 = Image(dataset=dataset1, data=image_data)
     image2 = Image(dataset=dataset1, data=image_data)
     extractedsource_data = {
         'ra': 123.123,
         'decl': 23.23,
         'ra_err': 21.1,
         'decl_err': 21.09,
         'ra_fit_err': 0.1,
         'decl_fit_err': 0.1,
         'uncertainty_ew': 0.1,
         'uncertainty_ns': 0.1,
         'zone': 1,
         'x': 0.11,
         'y': 0.22,
         'z': 0.33,
         'racosdecl': 0.44,
         'det_sigma': 11.1,
         'ew_sys_err': 20,
         'ns_sys_err': 20,
         'error_radius': 10.0
     }
     source1 = ExtractedSource(image=image1, data=extractedsource_data)
     extractedsource_data['ra'] = 45.45
     extractedsource_data['decl'] = 55.55
     source2 = ExtractedSource(image=image1, data=extractedsource_data)
     self.assertEqual(len(image1.sources), 2)
     # Source #3 points to the same source as #2
     source3 = ExtractedSource(id=source2.id, database=self.database)
     # Update source3
     source3.update(decl=44.44)
     # But no change for #1 and #2
     self.assertAlmostEqual(source1.decl, 23.23)
     self.assertAlmostEqual(source2.decl, 55.55)
     self.assertAlmostEqual(source3.decl, 44.44)
     source1.update()  # nothing changes for #1
     self.assertAlmostEqual(source1.decl, 23.23)
     self.assertAlmostEqual(source2.decl, 55.55)
     self.assertAlmostEqual(source3.decl, 44.44)
     # Now we make sure source #2 takes note of the change done through #3
     source2.update()
     self.assertAlmostEqual(source1.decl, 23.23)
     self.assertAlmostEqual(source1.decl, 23.23)
     self.assertAlmostEqual(source2.decl, 44.44)
Beispiel #7
0
    def test_create(self):

        dataset = DataSet(data={'description': 'dataset with images'},
                          database=self.database)
        # create 4 images, separated by one day each
        image = Image(dataset=dataset,
                      data=db_subs.example_dbimage_data_dict())
        extracted_source_data = dict(zone=13,
                                     ra=12.12,
                                     decl=13.13,
                                     ra_err=21.1,
                                     decl_err=21.09,
                                     ra_fit_err=1.12,
                                     decl_fit_err=1.23,
                                     uncertainty_ew=0.1,
                                     uncertainty_ns=0.1,
                                     ew_sys_err=20,
                                     ns_sys_err=20,
                                     error_radius=10.0,
                                     x=0.11,
                                     y=0.22,
                                     z=0.33,
                                     racosdecl=0.44,
                                     det_sigma=10.)
        src1 = ExtractedSource(data=extracted_source_data, image=image)
        src2 = ExtractedSource(data=extracted_source_data,
                               image=image,
                               database=self.database)
        self.assertNotEqual(src1.id, src2.id)

        extracted_source_data['image'] = image.id
        src3 = ExtractedSource(data=extracted_source_data,
                               database=self.database)
        extracted_source_data['ra'] = 23.23
        src4 = ExtractedSource(data=extracted_source_data,
                               database=self.database,
                               id=src1.id)
        self.assertEqual(src1.id, src4.id)
        self.assertAlmostEqual(src1.ra, src4.ra)
        del extracted_source_data['x']
        self.assertRaisesRegexp(AttributeError,
                                "missing required data key: x",
                                ExtractedSource,
                                data=extracted_source_data,
                                database=self.database)
Beispiel #8
0
    def test_create2(self):

        dataset1 = DataSet(data={'description': 'dataset with images'},
                                 database=self.database)
        self.assertEqual(dataset1.images, set())
        image_data = db_subs.example_dbimage_data_dict()
        image1 = Image(dataset=dataset1, data=image_data)
        image2 = Image(dataset=dataset1, data=image_data)

        extractedsource_data = {'ra': 123.123, 'decl': 23.23,
                'ra_err': 21.1, 'decl_err': 21.09,
                'ra_fit_err': 0.1, 'decl_fit_err': 0.1,
                'uncertainty_ew': 0.1, 'uncertainty_ns': 0.1,
                'zone': 1, 'x': 0.11, 'y': 0.22, 'z': 0.33,
                'racosdecl': 0.44,
                'det_sigma': 10.0,
                'ew_sys_err': 20, 'ns_sys_err': 20,
                'error_radius': 10.0}
        source1 = ExtractedSource(image=image1, data=extractedsource_data)
        extractedsource_data['ra'] = 45.45
        extractedsource_data['decl'] = 55.55
        source2 = ExtractedSource(
            image=image1, data=extractedsource_data)
        self.assertEqual(len(image1.sources), 2)
        # Source #3 points to the same source as #2
        source3 = ExtractedSource(id=source2.id, database=self.database)
        # Which means there are no extra sources for image1
        self.assertEqual(len(image1.sources), 2)
        srcids = set([source.id for source in image1.sources])
        # If, however, we create a new source with
        # an image reference in the constructor, we get a
        # 'deep' copy:
        source4 = ExtractedSource(image=image1, id=source2.id)
        # Now there's a new source for image1!
        self.assertEqual(len(image1.sources), 3)
        # But if we filter on the source ids,
        # we see there are really only two sources
        srcids = set([source.id for source in image1.sources])
        self.assertEqual(set([source1.id, source2.id]), srcids)

        extractedsource_data['ra'] = 89.89
        extractedsource_data['decl'] = 78.78
        source5 = ExtractedSource(image=image2, data=extractedsource_data)
        self.assertEqual(len(image2.sources), 1)
        self.assertEqual(image2.sources.pop().id, source5.id)
Beispiel #9
0
 def test_update(self):
     image_data = db_subs.example_dbimage_data_dict()
     dataset1 = DataSet(data={'description': 'dataset with images'},
                        database=self.database)
     self.assertEqual(dataset1.images, set())
     image1 = Image(dataset=dataset1, data=image_data)
     image2 = Image(dataset=dataset1, data=image_data)
     extractedsource_data = {'ra': 123.123, 'decl': 23.23,
             'ra_err': 21.1, 'decl_err': 21.09,
             'ra_fit_err': 0.1, 'decl_fit_err': 0.1,
             'uncertainty_ew': 0.1, 'uncertainty_ns': 0.1,
             'zone': 1, 'x': 0.11, 'y': 0.22, 'z': 0.33,
             'racosdecl': 0.44,
             'det_sigma': 11.1,
             'ew_sys_err': 20, 'ns_sys_err': 20,
             'error_radius': 10.0}
     source1 = ExtractedSource(image=image1, data=extractedsource_data)
     extractedsource_data['ra'] = 45.45
     extractedsource_data['decl'] = 55.55
     source2 = ExtractedSource(image=image1, data=extractedsource_data)
     self.assertEqual(len(image1.sources), 2)
     # Source #3 points to the same source as #2
     source3 = ExtractedSource(id=source2.id, database=self.database)
     # Update source3
     source3.update(decl=44.44)
     # But no change for #1 and #2
     self.assertAlmostEqual(source1.decl, 23.23)
     self.assertAlmostEqual(source2.decl, 55.55)
     self.assertAlmostEqual(source3.decl, 44.44)
     source1.update()  # nothing changes for #1
     self.assertAlmostEqual(source1.decl, 23.23)
     self.assertAlmostEqual(source2.decl, 55.55)
     self.assertAlmostEqual(source3.decl, 44.44)
     # Now we make sure source #2 takes note of the change done through #3
     source2.update()
     self.assertAlmostEqual(source1.decl, 23.23)
     self.assertAlmostEqual(source1.decl, 23.23)
     self.assertAlmostEqual(source2.decl, 44.44)
Beispiel #10
0
    def test_infinite(self):
        # Check that database insertion doesn't choke on infinite errors.

        dataset = DataSet(data={'description': 'example dataset'},
                          database=self.database)
        image = Image(dataset=dataset,
                      data=db_subs.example_dbimage_data_dict())

        # Inserting a standard example extractedsource should be fine
        extracted_source = db_subs.example_extractedsource_tuple()
        image.insert_extracted_sources([extracted_source])
        inserted = columns_from_table('extractedsource',
                                      where={'image': image.id})
        self.assertEqual(len(inserted), 1)

        # But if the source has infinite errors we drop it and log a warning
        extracted_source = db_subs.example_extractedsource_tuple(
            error_radius=float('inf'),
            peak_err=float('inf'),
            flux_err=float('inf'))

        # We will add a handler to the root logger which catches all log
        # output in a buffer.
        iostream = BytesIO()
        hdlr = logging.StreamHandler(iostream)
        logging.getLogger().addHandler(hdlr)

        image.insert_extracted_sources([extracted_source])

        logging.getLogger().removeHandler(hdlr)
        # We want to be sure that the error has been appropriately logged.
        self.assertIn("Dropped source fit with infinite flux errors",
                      iostream.getvalue())

        inserted = columns_from_table('extractedsource',
                                      where={'image': image.id})
        self.assertEqual(len(inserted), 1)
Beispiel #11
0
 def setUp(self):
     import tkp.db.database
     self.database = tkp.db.database.Database()
     # Basic template data for each image.
     self.image_data = db_subs.example_dbimage_data_dict()
Beispiel #12
0
 def setUp(self):
     import tkp.db.database
     self.database = tkp.db.database.Database()
     # Basic template data for each image.
     self.image_data = db_subs.example_dbimage_data_dict()
Beispiel #13
0
    def test_lightsurface(self):
        images = []
        # make 4 * 5 images with different frequencies and date
        for frequency in [80e6, 90e6, 100e6, 110e6, 120e6]:
            for day in [3, 4, 5, 6]:
                img_data = db_subs.example_dbimage_data_dict(
                    taustart_ts=datetime.datetime(2010, 3, day),
                    freq_eff=frequency)
                image = Image(dataset=self.dataset, data=img_data)
                images.append(image)

        # 3 sources per image, with different coordinates & flux
        data_list = []
        for i in range(1, 4):
            data_list.append({
                'ra': 111.111 + i,
                'decl': 11.11 + i,
                'ra_fit_err': 0.01,
                'decl_fit_err': 0.01,
                'i_peak': 10. * i,
                'i_peak_err': 0.1,
                'error_radius': 10.0,
                'fit_type': 1,
                #  x=0.11, y=0.22, z=0.33, det_sigma=11.1, zone=i
            })

        # Insert the 3 sources in each image, while further varying the flux
        for i, image in enumerate(images):
            # Create the "source finding results"
            sources = []
            for data in data_list:
                source = db_subs.example_extractedsource_tuple(
                    ra=data['ra'],
                    dec=data['decl'],
                    ra_fit_err=data['ra_fit_err'],
                    dec_fit_err=data['decl_fit_err'],
                    peak=data['i_peak'] * (1 + i),
                    peak_err=data['i_peak_err'],
                    flux=data['i_peak'] * (1 + i),
                    flux_err=data['i_peak_err'],
                    fit_type=data['fit_type'])
                sources.append(source)

            # Insert the sources
            insert_extracted_sources(image._id, sources)

            # Run the association for each list of source for an image
            associate_extracted_sources(image._id,
                                        deRuiter_r=3.7,
                                        new_source_sigma_margin=3)

        # updates the dataset and its set of images
        self.dataset.update()
        self.dataset.update_images()

        # update the images and their sets of sources
        for image in self.dataset.images:
            image.update()
            image.update_sources()

        # Now pick any image, select the first source (smallest RA)
        # and extract its light curve

        # TODO: aaarch this is so ugly. Because this a set we need to pop it.
        sources = self.dataset.images.pop().sources
        #sources = self.dataset.images[-1].sources

        sources = sorted(sources, key=attrgetter('ra'))
        extracted_source = sources[0].id
        lightcurve = tkp.db.general.lightcurve(extracted_source)
Beispiel #14
0
    def test_lightsurface(self):
        images = []
        # make 4 * 5 images with different frequencies and date
        for frequency in [80e6, 90e6, 100e6, 110e6, 120e6]:
            for day in [3, 4, 5, 6]:
                img_data = db_subs.example_dbimage_data_dict(
                    taustart_ts=datetime.datetime(2010, 3, day),
                    freq_eff = frequency
                )
                image = Image(dataset=self.dataset, data=img_data)
                images.append(image)

        # 3 sources per image, with different coordinates & flux
        data_list = []
        for i in range(1, 4):
            data_list.append({
                'ra': 111.111 + i,
                'decl': 11.11 + i,
                'ra_fit_err': 0.01,
                'decl_fit_err': 0.01,
                'i_peak': 10.*i,
                'i_peak_err': 0.1,
                'error_radius': 10.0,
                'fit_type': 1,
                #  x=0.11, y=0.22, z=0.33, det_sigma=11.1, zone=i
            })

        # Insert the 3 sources in each image, while further varying the flux
        for i, image in enumerate(images):
            # Create the "source finding results"
            sources = []
            for data in data_list:
                source = db_subs.example_extractedsource_tuple(
                        ra=data['ra'], dec=data['decl'],
                        ra_fit_err=data['ra_fit_err'],
                        dec_fit_err= data['decl_fit_err'],
                        peak = data['i_peak']*(1+i),
                        peak_err = data['i_peak_err'],
                        flux = data['i_peak']*(1+i),
                        flux_err = data['i_peak_err'],
                        fit_type=data['fit_type']
                        )
                sources.append(source)

            # Insert the sources
            insert_extracted_sources(image._id, sources)

            # Run the association for each list of source for an image
            associate_extracted_sources(image._id, deRuiter_r=3.7,
                                        new_source_sigma_margin=3)

        # updates the dataset and its set of images
        self.dataset.update()
        self.dataset.update_images()

        # update the images and their sets of sources
        for image in self.dataset.images:
            image.update()
            image.update_sources()

        # Now pick any image, select the first source (smallest RA)
        # and extract its light curve

        # TODO: aaarch this is so ugly. Because this a set we need to pop it.
        sources = self.dataset.images.pop().sources
        #sources = self.dataset.images[-1].sources

        sources = sorted(sources, key=attrgetter('ra'))
        extracted_source = sources[0].id
        lightcurve = tkp.db.general.lightcurve(extracted_source)