コード例 #1
0
def test_run_release_wrong_db(PRIVATE_INPUT_DIR, jobs, changes,
                              expect_returncode, expect_msg):
    """
	Try to run package release on different input.

	.. codeauthor:: Rasmus Handberg <*****@*****.**>
	"""

    input_file = os.path.join(PRIVATE_INPUT_DIR, 'ready_for_release',
                              'todo-cbv.sqlite')
    tpf_rootdir = os.path.dirname(input_file)
    print(input_file)

    with closing(sqlite3.connect(input_file)) as conn:
        conn.row_factory = sqlite3.Row
        cursor = conn.cursor()
        cursor.execute(changes)
        conn.commit()
        cursor.close()

    out, err, exitcode = capture_run_cli('run_package_release.py', [
        '--quiet', '--jobs={0:d}'.format(jobs), '--version=5',
        '--tpf=' + tpf_rootdir, input_file
    ])
    assert exitcode == expect_returncode
    assert expect_msg in out or expect_msg in err
コード例 #2
0
def test_todolist_run_create(PRIVATE_INPUT_DIR):

	# Private folder containing the file to build todo-file from:
	input_folder = os.path.join(PRIVATE_INPUT_DIR, 'create_todolist')

	# Run CLI version of create todolist program:
	out, err, exitcode = conftest.capture_run_cli('run_create_todolist.py', input_folder)
	assert exitcode == 0, "run_create_todolist failed"
コード例 #3
0
def test_run_dataval_methods(PRIVATE_INPUT_DIR, method, expected_retcode):
    """
	Try to run DataValidation on different individual methods
	"""
    test_dir = os.path.join(PRIVATE_INPUT_DIR, 'with_corr', 'todo.sqlite')
    out, err, exitcode = capture_run_cli(
        'run_dataval.py', ['--corrected', f'--method={method:s}', test_dir])
    assert exitcode == expected_retcode
コード例 #4
0
ファイル: test_run_training.py プロジェクト: tasoc/starclass
def test_run_training_invalid_testfraction(tf):

    with tempfile.TemporaryDirectory(prefix='testing-') as tmpdir:

        out, err, exitcode = capture_run_cli('run_training.py', [
            '--classifier=meta', '--trainingset=testing', '--level=L1',
            f'--testfraction={tf:f}', '--output=' + tmpdir
        ])
        assert exitcode == 2
        assert 'error: Testfraction must be between 0 and 1' in err
コード例 #5
0
def test_run_tesscorr(SHARED_INPUT_DIR, method, starid, cadence, var_goal,
                      rms_goal, ptp_goal):
    with tempfile.TemporaryDirectory() as tmpdir:
        params = [
            '--overwrite', '--debug', '--plot', f'--starid={starid:d}',
            f'--method={method:s}', f'--cadence={cadence:d}', SHARED_INPUT_DIR,
            tmpdir
        ]
        out, err, exitcode = capture_run_cli('run_tesscorr.py', params)

    assert ' - ERROR - ' not in err
    assert exitcode == 0
コード例 #6
0
def test_run_release_wrong_file(SHARED_INPUT_DIR):
    """
	Try to run package release on different input.

	.. codeauthor:: Rasmus Handberg <*****@*****.**>
	"""

    input_file = os.path.join(SHARED_INPUT_DIR, 'ready_for_release',
                              'todo-does-not-exist.sqlite')
    out, err, exitcode = capture_run_cli('run_package_release.py',
                                         ['--debug', input_file])
    assert exitcode == 2
    assert 'Input file does not exist' in out
コード例 #7
0
def test_run_dataval(PRIVATE_INPUT_DIR, inp, corr, save):
    """
	Try to run DataValidation on different input
	"""

    test_dir = os.path.join(PRIVATE_INPUT_DIR, inp, 'todo.sqlite')

    params = ['--quiet']
    if corr:
        params.append('--corrected')
    if save:
        params.append('--validate')
    params.append(test_dir)
    print(params)

    out, err, exitcode = capture_run_cli('run_dataval.py', params)
    assert exitcode == 4  # Since the files are missing, this should result in error-state
コード例 #8
0
def test_run_tesscorr_invalid_cadence():
    out, err, exitcode = capture_run_cli('run_tesscorr.py',
                                         "-t --starid=29281992 --cadence=15")
    assert exitcode == 2
    assert "error: argument --cadence: invalid choice: 15 (choose from 'ffi', 1800, 600, 120, 20)" in err
コード例 #9
0
def test_run_tesscorr_invalid_sector():
    out, err, exitcode = capture_run_cli(
        'run_tesscorr.py', "-t --starid=29281992 --sector=invalid")
    assert exitcode == 2
    assert "error: argument --sector: invalid int value: 'invalid'" in err
コード例 #10
0
def test_run_tesscorr_invalid_method():
    out, err, exitcode = capture_run_cli(
        'run_tesscorr.py', "-t --starid=29281992 --method=invalid")
    assert exitcode == 2
    assert "error: argument -m/--method: invalid choice: 'invalid'" in err
コード例 #11
0
ファイル: test_classifiers.py プロジェクト: tasoc/starclass
def test_run_training(PRIVATE_INPUT_DIR, classifier):

    tset = testing_tset(tf=0.2, random_seed=42)
    print(tset.StellarClasses)

    with tempfile.TemporaryDirectory(prefix='starclass-testing-') as tmpdir:
        logfile = os.path.join(tmpdir, 'training.log')
        todo_file = os.path.join(PRIVATE_INPUT_DIR, 'todo_run.sqlite')

        # Train the classifier:
        out, err, exitcode = capture_run_cli('run_training.py', [
            '--classifier=' + classifier, '--trainingset=testing',
            '--level=L1', '--testfraction=0.2', '--log=' + logfile,
            '--log-level=info', '--output=' + tmpdir
        ])
        assert exitcode == 0

        # Check that a log-file was indeed generated:
        assert os.path.isfile(logfile), "Log-file not generated"

        # We now have a trained classifier, so we should be able to run the classification:
        for mpi in ([False, True] if MPI_AVAILABLE else [False]):
            cli = 'run_starclass_mpi.py' if mpi else 'run_starclass.py'
            out, err, exitcode = capture_run_cli(cli, [
                '--debug', '--overwrite', '--classifier=' + classifier,
                '--trainingset=testing', '--level=L1', '--datadir=' + tmpdir,
                todo_file
            ],
                                                 mpiexec=mpi)
            assert exitcode == 0

            # Do a deep inspection of the todo-file:
            with closing(
                    sqlite3.connect('file:' + todo_file + '?mode=ro',
                                    uri=True)) as conn:
                conn.row_factory = sqlite3.Row
                cursor = conn.cursor()

                cursor.execute("SELECT * FROM starclass_settings;")
                row = cursor.fetchall()
                assert len(row) == 1, "Only one settings row should exist"
                settings = row[0]
                print(dict(settings))
                assert settings['tset'] == 'testtset'

                cursor.execute(
                    "SELECT * FROM starclass_diagnostics WHERE priority=17;")
                row = cursor.fetchone()
                print(dict(row))
                assert row['priority'] == 17
                assert row['classifier'] == classifier
                assert row['status'] == starclass.STATUS.OK.value
                assert row['errors'] is None

                cursor.execute("SELECT * FROM starclass_results;")
                results = cursor.fetchall()
                assert len(results) == len(tset.StellarClasses)
                for row in cursor.fetchall():
                    print(dict(row))
                    assert row['priority'] == 17
                    assert row['classifier'] == classifier
                    tset.StellarClasses[
                        row['class']]  # Will result in KeyError of not correct
                    assert 0 <= row['prob'] <= 1, "Invalid probability"

                cursor.execute("SELECT * FROM starclass_features_common;")
                results = cursor.fetchall()
                assert len(results) == 1
                row = dict(results[0])
                print(row)
                assert row['priority'] == 17
                assert len(row) > 1

                if classifier != 'slosh':
                    cursor.execute(
                        f"SELECT * FROM starclass_features_{classifier:s};")
                    results = cursor.fetchall()
                    assert len(results) == 1
                    row = dict(results[0])
                    print(row)
                    assert row['priority'] == 17
                    assert len(row) > 1
                else:
                    # For SLOSH the table should not exist:
                    cursor.execute(
                        "SELECT name FROM sqlite_master WHERE type='table' AND name='starclass_features_{classifier:s}';"
                    )
                    assert len(cursor.fetchall()) == 0
コード例 #12
0
ファイル: test_run_cbvprep.py プロジェクト: tasoc/corrections
def test_run_cbvprep_invalid_cadence():
    out, err, exitcode = capture_run_cli('run_cbvprep.py', "--cadence=15")
    assert exitcode == 2
    assert "error: argument --cadence: invalid choice: 15 (choose from 'ffi', 1800, 600, 120, 20)" in err
コード例 #13
0
ファイル: test_run_cbvprep.py プロジェクト: tasoc/corrections
def test_run_cbvprep(SHARED_INPUT_DIR):
    with tempfile.TemporaryDirectory() as tmpdir:
        # Run CLI program:
        params = [
            '--version=17', '--sector=1', '--area=114', '--output=' + tmpdir,
            SHARED_INPUT_DIR
        ]
        out, err, exitcode = capture_run_cli('run_cbvprep.py', params)
        assert ' - ERROR - ' not in err
        assert exitcode == 0

        # Check that the plots directory was created:
        print(os.listdir(tmpdir))
        assert os.path.isdir(os.path.join(
            tmpdir, 'plots')), "Plots directory does not exist"

        # This should create CBVs for several cadences:
        for cadence in (1800, 120):

            # The CBV file should now exist:
            cbvfile = os.path.join(tmpdir,
                                   f'cbv-s0001-c{cadence:04d}-a114.hdf5')
            assert os.path.isfile(
                cbvfile), f"HDF5 file does not exist ({cadence:d}s)"

            # Create CBV object:
            cbv = corrections.CBV(cbvfile)

            # Check the returned object:
            assert cbv.cbv_area == 114
            assert cbv.datasource == 'ffi' if cadence == 1800 else 'tpf'
            assert cbv.cadence == cadence
            assert cbv.sector == 1
            assert cbv.camera == 1
            assert cbv.ccd == 1
            assert cbv.data_rel == 1
            assert cbv.ncomponents == 16
            assert cbv.threshold_variability == 1.3
            assert cbv.threshold_correlation == 0.5
            assert cbv.threshold_snrtest == 5.0
            assert cbv.threshold_entropy == -0.5

            # Check that the version has been set correctly:
            assert cbv.version == corrections.__version__

            # The file point to the one we use as input:
            assert cbv.filepath == cbvfile

            # Check arrays stored in CBV object:
            N = len(cbv.time)
            assert N > 0, "Time column is length 0"
            assert len(cbv.cadenceno) == N
            assert cbv.cbv.shape == (N, cbv.ncomponents)
            assert cbv.cbv_s.shape == (N, cbv.ncomponents)

            # Check the FITS file exists:
            fitsfile = os.path.join(
                tmpdir,
                f'tess-s0001-c{cadence:04d}-a114-v17-tasoc_cbv.fits.gz')
            assert os.path.isfile(
                fitsfile), f"FITS file does not exist ({cadence:d}s)"

            # Open FITS file and check headers and data:
            with fits.open(fitsfile, mode='readonly') as hdu:
                # Header:
                hdr = hdu[0].header
                assert hdr['CADENCE'] == cadence
                assert hdr['SECTOR'] == cbv.sector
                assert hdr['CAMERA'] == cbv.camera
                assert hdr['CCD'] == cbv.ccd
                assert hdr['CBV_AREA'] == cbv.cbv_area
                assert hdr['DATA_REL'] == cbv.data_rel
                assert hdr['VERSION'] == 17
                assert hdr['PROCVER'] == cbv.version

                for k in range(1, len(hdu)):
                    hdr1 = hdu[k].header
                    assert hdr1['CAMERA'] == cbv.camera
                    assert hdr1['CCD'] == cbv.ccd
                    assert hdr1['CBV_AREA'] == cbv.cbv_area
                    assert hdr1['THR_COR'] == cbv.threshold_correlation
                    assert hdr1['THR_VAR'] == cbv.threshold_variability
                    assert hdr1['THR_SNR'] == cbv.threshold_snrtest
                    assert hdr1['THR_ENT'] == cbv.threshold_entropy

                # Data:
                assert hdu['CBV.SINGLE-SCALE.114'].data.shape[0] == N
                assert hdu['CBV.SPIKE.114'].data.shape[0] == N
コード例 #14
0
ファイル: test_run_cbvprep.py プロジェクト: tasoc/corrections
def test_run_cbvprep_invalid_ccd():
    out, err, exitcode = capture_run_cli('run_cbvprep.py', "--ccd=14")
    assert exitcode == 2
    assert 'error: argument --ccd: invalid choice: 14 (choose from 1, 2, 3, 4)' in err
コード例 #15
0
ファイル: test_run_cbvprep.py プロジェクト: tasoc/corrections
def test_run_cbvprep_invalid_camera():
    out, err, exitcode = capture_run_cli('run_cbvprep.py', "--camera=5")
    assert exitcode == 2
    assert 'error: argument --camera: invalid choice: 5 (choose from 1, 2, 3, 4)' in err
コード例 #16
0
def test_run_tesscorr_invalid_ccd():
    out, err, exitcode = capture_run_cli('run_tesscorr.py',
                                         "-t --starid=29281992 --ccd=14")
    assert exitcode == 2
    assert 'error: argument --ccd: invalid choice: 14 (choose from 1, 2, 3, 4)' in err
コード例 #17
0
def test_run_release(PRIVATE_INPUT_DIR, jobs, corrector):
    """
	Try to run package release on different input.

	Todo-files for the tests can be produced by taking the final TASOC_DR05/S06 todo-XX.sqlite
	files and trimming them using the following SQL commands:

	.. code-block:: SQL

		DROP TABLE datavalidation_raw;
		DROP TABLE diagnostics;
		DROP TABLE photometry_skipped;
		DELETE FROM todolist WHERE starid >= 5000000;
		DELETE FROM todolist WHERE priority NOT IN (SELECT priority FROM todolist ORDER BY priority LIMIT 20);
		VACUUM;
		ANALYZE;
		VACUUM;

	.. codeauthor:: Rasmus Handberg <*****@*****.**>
	"""

    input_file = os.path.join(PRIVATE_INPUT_DIR, 'ready_for_release',
                              'todo-{0:s}.sqlite'.format(corrector))
    tpf_rootdir = os.path.dirname(input_file)

    params = [
        '--jobs={0:d}'.format(jobs), '--version=5', '--tpf=' + tpf_rootdir,
        input_file
    ]
    out, err, exitcode = capture_run_cli('run_package_release.py', params)
    assert exitcode == 0

    # It should have created a release file:
    release_file = os.path.join(PRIVATE_INPUT_DIR, 'ready_for_release',
                                'release-{0:s}.sqlite'.format(corrector))
    assert os.path.isfile(release_file), "Release file does not exist"

    with closing(sqlite3.connect(release_file)) as conn:
        conn.row_factory = sqlite3.Row
        cursor = conn.cursor()

        cursor.execute("SELECT * FROM settings;")
        row = cursor.fetchone()
        assert row['dataval_version'] == __version__
        assert row['corrector'] == corrector
        assert row['version'] == 5

        cursor.execute("SELECT COUNT(*) FROM release;")
        antal = cursor.fetchone()[0]
        if corrector == 'cbv':
            assert antal == 19
        else:
            assert antal == 12

        if corrector == 'cbv':
            cursor.execute("SELECT * FROM release_cbv ORDER BY cadence DESC;")
            cbvs = cursor.fetchall()
            assert len(cbvs) == 2
            row = dict(cbvs[0])
            assert row[
                'path'] == 'cbv-prepare/tess-s0006-c1800-a114-v5-tasoc_cbv.fits.gz'
            assert row['sector'] == 6
            assert row['camera'] == 1
            assert row['ccd'] == 1
            assert row['cadence'] == 1800
            assert row['cbv_area'] == 114
            assert row['datarel'] == 8

            row = dict(cbvs[1])
            assert row[
                'path'] == 'cbv-prepare/tess-s0006-c0120-a114-v5-tasoc_cbv.fits.gz'
            assert row['sector'] == 6
            assert row['camera'] == 1
            assert row['ccd'] == 1
            assert row['cadence'] == 120
            assert row['cbv_area'] == 114
            assert row['datarel'] == 8

        cursor.execute("SELECT * FROM release;")
        for row in cursor.fetchall():
            fpath = os.path.join(PRIVATE_INPUT_DIR, 'ready_for_release',
                                 row['lightcurve'])
            print("-" * 30)
            print(fpath)

            assert os.path.isfile(fpath), "File does not exist"
            assert get_filehash(fpath) == row['filehash']
            assert os.path.getsize(fpath) == row['filesize']
            assert row['filesize'] > 0

            # Test the dependency:
            if row['cadence'] > 200:
                assert row['dependency_tpf'] is None
            else:
                assert row['dependency_tpf'] is not None
                if row['starid'] == 4256961:  # This is a secondary target
                    assert row['dependency_tpf'] == 4255638
                else:  # These are "main" targets:
                    assert row['dependency_tpf'] == row['starid']

            with fits.open(fpath, mode='readonly', memmap=True) as hdu:
                hdr = hdu[0].header
                assert hdr['DATAVAL'] == row['dataval']
                assert hdr['DATA_REL'] == row['datarel']
                assert hdr['TICID'] == row['starid']
                assert hdr['CAMERA'] == row['camera']
                assert hdr['CCD'] == row['ccd']
                assert hdr['SECTOR'] == row['sector']

                assert row['cadence'] == int(
                    np.round(hdu[1].header['TIMEDEL'] * 86400))

                # Check that no header keywords are duplicated:
                for k, h in enumerate(hdu):
                    keys = list(h.header.keys())
                    nonunique_keys = set(
                        [r for r in keys if keys.count(r) > 1])
                    assert len(
                        nonunique_keys
                    ) == 0, f"Non-unique keys found in header #{k:d}: {nonunique_keys}"

                # Check the fix of invalid header in ENSEMBLE extension:
                if corrector == 'ensemble':
                    # Check the fix of invalid header in ENSEMBLE extension:
                    assert hdu['ENSEMBLE'].header['TDISP2'] != 'E'

                    # Check that the stars used to build ensemble were stored:
                    dependency_lc = set(
                        [int(t) for t in row['dependency_lc'].split(',')])
                    assert set(hdu['ENSEMBLE'].data['TIC']) == dependency_lc

                elif corrector == 'cbv':
                    assert hdu[1].header['CBV_AREA'] == row['cbv_area']

                # Check the modification of the WCS solution in 120s data:
                if row['cadence'] == 120:
                    tpf_file = find_tpf_files(tpf_rootdir,
                                              starid=row['dependency_tpf'],
                                              sector=row['sector'],
                                              camera=row['camera'],
                                              ccd=row['ccd'],
                                              cadence=row['cadence'])
                    print(tpf_file)

                    with warnings.catch_warnings():
                        warnings.filterwarnings('ignore',
                                                category=FITSFixedWarning)

                        # World Coordinate System from the original Target Pixel File:
                        wcs_tpf = WCS(header=fits.getheader(
                            tpf_file[0], extname='APERTURE'),
                                      relax=True)

                        # World coordinate systems from the final FITS lightcurve files:
                        wcs_aperture = WCS(header=hdu['APERTURE'].header,
                                           relax=True)
                        wcs_sumimage = WCS(header=hdu['SUMIMAGE'].header,
                                           relax=True)
                        #wcs_tpf.printwcs()
                        #wcs_aperture.printwcs()
                        #wcs_sumimage.printwcs()

                    # Try calculating the pixel-coordinate of the target star in the three WCS:
                    radec = [[hdr['RA_OBJ'], hdr['DEC_OBJ']]]
                    pix_tpf = wcs_tpf.all_world2pix(radec, 0)
                    pix_aperture = wcs_aperture.all_world2pix(radec, 0)
                    pix_sumimage = wcs_sumimage.all_world2pix(radec, 0)

                    # They should give exactly the same results:
                    np.testing.assert_allclose(pix_aperture, pix_tpf)
                    np.testing.assert_allclose(pix_sumimage, pix_tpf)

        cursor.close()

    # Re-running should not process anything:
    out, err, exitcode = capture_run_cli('run_package_release.py', params)
    assert exitcode == 0
    assert 'Nothing to process' in out

    # Re-running with different VERSION should trigger error:
    params = [
        '--jobs={0:d}'.format(jobs), '--version=17', '--tpf=' + tpf_rootdir,
        input_file
    ]
    out, err, exitcode = capture_run_cli('run_package_release.py', params)
    assert exitcode == 2
    assert 'Inconsistent VERSION provided' in out
コード例 #18
0
ファイル: test_run_cbvprep.py プロジェクト: tasoc/corrections
def test_run_cbvprep_invalid_sector():
    out, err, exitcode = capture_run_cli('run_cbvprep.py', "--sector=invalid")
    assert exitcode == 2
    assert "error: argument --sector: invalid int value: 'invalid'" in err