Beispiel #1
0
def test_pool(tmpdir):
    pool = AssociationPool.read(POOL_FILE)
    assert len(pool) == 636

    tmp_pool = str(tmpdir.mkdir(__name__).join('tmp_pool.csv'))
    pool.write(tmp_pool)

    roundtrip = AssociationPool.read(tmp_pool)
    assert len(pool) == len(roundtrip)
    assert set(pool.colnames) == set(roundtrip.colnames)
Beispiel #2
0
def full_pool_rules(request):
    """Setup to use the full example pool and registry"""
    pool_fname = t_path('data/mega_pool.csv')
    pool = AssociationPool.read(pool_fname)
    rules = AssociationRegistry()

    return (pool, rules, pool_fname)
Beispiel #3
0
def full_pool_rules(request):
    """Setup to use the full example pool and registry"""
    pool_fname = t_path('data/mega_pool.csv')
    pool = AssociationPool.read(pool_fname)
    rules = AssociationRegistry()

    return (pool, rules, pool_fname)
Beispiel #4
0
def test_simple():
    """Test generate on simple registry"""
    registry = AssociationRegistry([t_path('data/rules_basic.py')],
                                   include_default=False)
    pool = AssociationPool()
    pool['value'] = ['row1', 'row2']

    asns = generate(pool, registry)
    assert len(asns) == 1
    assert len(asns[0]['members']) == 2
Beispiel #5
0
def test_level35_names(pool_file):
    rules = registry_level3_only()
    pool = AssociationPool.read(pool_file)
    asns = generate(pool, rules)
    for asn in asns:
        product_name = asn['products'][0]['name']
        if asn['asn_rule'] == 'Asn_IFU':
            m = re.match(LEVEL3_PRODUCT_NAME_NO_OPTELEM_REGEX, product_name)
        else:
            m = re.match(LEVEL3_PRODUCT_NAME_REGEX, product_name)
        assert m is not None
Beispiel #6
0
def test_level3_names(pool_file, global_constraints):
    rules = registry_level3_only(
        global_constraints=global_constraints
    )
    pool = AssociationPool.read(pool_file)
    asns = generate(pool, rules)
    for asn in asns:
        product_name = asn['products'][0]['name']
        if asn['asn_rule'] == 'Asn_Lv3MIRMRS':
            m = re.match(LEVEL3_PRODUCT_NAME_NO_OPTELEM_REGEX, product_name)
        else:
            m = re.match(LEVEL3_PRODUCT_NAME_REGEX, product_name)
        assert m is not None
        assert m.groupdict()['acid'] == 'o002'
Beispiel #7
0
def test_duplicate_names():
    """
    For Level 3 association, there should be no association
    with the same product name. Generation should produce
    log messages indicating when duplicate names have been found.
    """
    pool = AssociationPool.read(t_path('data/jw00632_dups.csv'))
    constrain_all_candidates = constrain_on_candidates(None)
    rules = registry_level3_only(global_constraints=constrain_all_candidates)

    with pytest.warns(RuntimeWarning):
        asns = generate(pool, rules)

    # There should only be one association left.
    assert len(asns) == 1
Beispiel #8
0
def test_multiple_optelems(pool_file):
    rules = registry_level3_only()
    pool = AssociationPool.read(pool_file)
    asns = generate(pool, rules)
    for asn in asns:
        product_name = asn['products'][0]['name']
        if asn['asn_rule'] != 'Asn_Lv3MIRMRS':
            m = re.match(LEVEL3_PRODUCT_NAME_REGEX, product_name)
            assert m is not None
            try:
                value = '-'.join(asn.constraints['opt_elem2'].found_values)
            except KeyError:
                value = None
            if value in EMPTY:
                assert '-' not in m.groupdict()['opt_elem']
            else:
                assert '-' in m.groupdict()['opt_elem']
Beispiel #9
0
def combine_pools(pools, **pool_kwargs):
    """Combine pools into a single pool

    Parameters
    ----------
    pools: str, astropy.table.Table, [str|Table, ...]
        The pools to combine. Either a singleton is
        passed or and iterable can be passed.
        The entries themselves can be either a file path
        or an astropy.table.Table-like object.

    pool_kwargs: dict
        Other keyword arguments to pass to AssociationPool.read

    Returns
    -------
    AssociationPool|astropy.table.Table
        The combined pool
    """
    if not is_iterable(pools):
        pools = [pools]
    just_pools = []
    for pool in pools:
        if not isinstance(pool, Table):
            pool = AssociationPool.read(pool, **pool_kwargs)
        just_pools.append(pool)
    if len(just_pools) > 1:
        mega_pool = vstack(just_pools, metadata_conflicts='silent')
    else:
        mega_pool = just_pools[0].copy(copy_data=True)

    # Replace OBS_NUM and ASN_CANDIDATE_ID with actual numbers, if
    # necessary
    expnum = Counter(start=0)
    obsnum = Counter(start=0)
    acid = Counter(start=999)
    local_env = locals()
    global_env = globals()
    for row in mega_pool:
        mega_pool[row.index] = [
            parse_value(v, global_env=global_env, local_env=local_env)
            for v in row
        ]

    return mega_pool
Beispiel #10
0
def test_niriss_wfss():
    """Test association properties for NIRISS WFSS"""

    pool = AssociationPool.read(
        t_path(path.join('data', 'jw87800_20180412T163456_pool.csv'))
    )
    cmd_args = [
        '--dry-run',
        '--D'
    ]
    results = Main(
        cmd_args,
        pool=pool
    )
    asns = results.associations

    # Need 4 associations: image2, spec2, image3, spec3
    assert len(asns) == 12
    asn_types = [
        asn['asn_type']
        for asn in asns
    ]
    assert REQUIRED_ASN_TYPES == set(asn_types)

    # Arrange associations by type
    asn_by_type = {
        asn['asn_type']: asn
        for asn in asns
    }

    # Ensure catalog and segmentation map names are correct in the spec2 associations
    l3name = asn_by_type['image3']['products'][0]['name']
    source_cat = l3name + '_cat.ecsv'
    segmap = l3name + '_segm.fits'
    for product in asn_by_type['spec2']['products']:
        members_by_type = {
            member['exptype']: member
            for member in product['members']
        }
        assert members_by_type['sourcecat']['expname'] == source_cat
        assert members_by_type['segmap']['expname'] == segmap
Beispiel #11
0
def test_duplicate_generate():
    """Test for duplicate/overwrite association

    The pool has two exposures, one without a valid `asn_candidate`,
    and one with a valid observation `asn_candidate`.
    When set with the "all candidates" constraint, only one association
    should be made.

    The prompt for this test was that three associations were being created,
    two of which were the observation candidate, with the second
    being a duplicate of the first. The third was an extraneous
    discovered candidate.
    """
    pool = AssociationPool.read(t_path('data/pool_duplicate.csv'))
    constrain_all_candidates = constrain_on_candidates(None)
    rules = registry_level3_only(global_constraints=constrain_all_candidates)
    asns = generate(pool, rules)
    assert len(asns) == 1
    asn = asns[0]
    assert asn['asn_type'] == 'image3'
    assert asn['asn_id'] == 'o029'
Beispiel #12
0
def test_toomanyoptions(args):
    """Test argument parsing for failures"""
    pool = AssociationPool()

    with pytest.raises(SystemExit):
        Main(args, pool=pool)
Beispiel #13
0
    def __init__(self, args=None, pool=None):

        if args is None:
            args = sys.argv[1:]
        if isinstance(args, str):
            args = args.split(' ')

        parser = argparse.ArgumentParser(
            description='Generate Assocation Data Products',
            usage='asn_generate pool'
        )
        if pool is None:
            parser.add_argument(
                'pool', type=str, help='Association Pool'
            )
        op_group = parser.add_mutually_exclusive_group()
        op_group.add_argument(
            '-i', '--ids', nargs='+',
            dest='asn_candidate_ids',
            help='space-separated list of association candidate IDs to operate on.'
        )
        op_group.add_argument(
            '--discover',
            action='store_true',
            help='Produce discovered associations'
        )
        op_group.add_argument(
            '--all-candidates',
            action='store_true', dest='all_candidates',
            help='Produce all association candidate-specific associations'
        )
        parser.add_argument(
            '-p', '--path', type=str,
            default='.',
            help='Folder to save the associations to. Default: "%(default)s"'
        )
        parser.add_argument(
            '--save-orphans', dest='save_orphans',
            nargs='?', const='orphaned.csv', default=False,
            help='Save orphaned items into the specified table. Default: "%(default)s"'
        )
        parser.add_argument(
            '--version-id', dest='version_id',
            nargs='?', const=True, default=None,
            help=(
                'Version tag to add into association name and products.'
                ' If not specified, no version will be used.'
                ' If specified without a value, the current time is used.'
                ' Otherwise, the specified string will be used.'
            )
        )
        parser.add_argument(
            '-r', '--rules', action='append',
            help='Association Rules file.'
        )
        parser.add_argument(
            '--ignore-default', action='store_true',
            help='Do not include default rules. -r should be used if set.'
        )
        parser.add_argument(
            '--dry-run',
            action='store_true', dest='dry_run',
            help='Execute but do not save results.'
        )
        parser.add_argument(
            '-d', '--delimiter', type=str,
            default='|',
            help='''Delimiter
            to use if pool files are comma-separated-value
            (csv) type files. Default: "%(default)s"
            '''
        )
        parser.add_argument(
            '--pool-format', type=str,
            default='ascii',
            help=(
                'Format of the pool file.'
                ' Any format allowed by the astropy'
                ' Unified File I/O interface is allowed.'
                ' Default: "%(default)s"'
            )
        )
        parser.add_argument(
            '-v', '--verbose',
            action='store_const', dest='loglevel',
            const=logging.INFO, default=logging.NOTSET,
            help='Output progress and results.'
        )
        parser.add_argument(
            '-D', '--debug',
            action='store_const', dest='loglevel',
            const=logging.DEBUG,
            help='Output detailed debugging information.'
        )
        parser.add_argument(
            '--DMS',
            action='store_true', dest='DMS_enabled',
            help='Running under DMS workflow conditions.'
        )
        parser.add_argument(
            '--format',
            default='json',
            help='Format of the association files. Default: "%(default)s"'
        )
        parser.add_argument(
            '--version', action='version',
            version='%(prog)s {}'.format(__version__),
            help='Version of the generator.'
        )
        parser.add_argument(
            '--no-merge', action='store_true',
            help='Do not merge Level2 associations into one'
        )

        parsed = parser.parse_args(args=args)

        # Configure logging
        config = None
        if parsed.DMS_enabled:
            config = DMS_config
        logger = log_config(name=__package__, config=config)
        logger.setLevel(parsed.loglevel)

        # Preamble
        logger.info('Command-line arguments: {}'.format(args))
        logger.context.set('asn_candidate_ids', parsed.asn_candidate_ids)

        if pool is None:
            logger.info('Reading pool {}'.format(parsed.pool))
            self.pool = AssociationPool.read(
                parsed.pool, delimiter=parsed.delimiter,
                format=parsed.pool_format,
            )
        else:
            self.pool = pool

        # DMS: Add further info to logging.
        try:
            logger.context.set('program', self.pool[0]['PROGRAM'])
        except KeyError:
            pass

        # Determine mode of operation. Options are
        #  1) Only specified candidates
        #  2) Only discovered assocations that do not match
        #     candidate associations
        #  3) Both discovered and all candidate associations.
        logger.info('Reading rules.')
        if not parsed.discover and\
           not parsed.all_candidates and\
           parsed.asn_candidate_ids is None:
            parsed.discover = True
            parsed.all_candidates = True
        if parsed.discover or parsed.all_candidates:
            global_constraints = constrain_on_candidates(
                None
            )
        elif parsed.asn_candidate_ids is not None:
            global_constraints = constrain_on_candidates(
                parsed.asn_candidate_ids
            )

        self.rules = AssociationRegistry(
            parsed.rules,
            include_default=not parsed.ignore_default,
            global_constraints=global_constraints,
            name=CANDIDATE_RULESET
        )

        if parsed.discover:
            self.rules.update(
                AssociationRegistry(
                    parsed.rules,
                    include_default=not parsed.ignore_default,
                    name=DISCOVER_RULESET
                )
            )

        logger.info('Generating associations.')
        self.associations = generate(
            self.pool, self.rules, version_id=parsed.version_id
        )

        if parsed.discover:
            logger.debug(
                '# asns found before discover filtering={}'.format(
                    len(self.associations)
                )
            )
            self.associations = filter_discovered_only(
                self.associations,
                DISCOVER_RULESET,
                CANDIDATE_RULESET,
                keep_candidates=parsed.all_candidates,
            )
            self.rules.Utility.resequence(self.associations)

        # Do a grand merging. This is done particularly for
        # Level2 associations.
        if not parsed.no_merge:
            try:
                self.associations = self.rules.Utility.merge_asns(self.associations)
            except AttributeError:
                pass

        logger.info(self.__str__())

        if not parsed.dry_run:
            self.save(
                path=parsed.path,
                format=parsed.format,
                save_orphans=parsed.save_orphans
            )
Beispiel #14
0
    def __init__(self, args=None, pool=None):

        if args is None:
            args = sys.argv[1:]
        if isinstance(args, str):
            args = args.split(' ')

        parser = argparse.ArgumentParser(
            description='Generate Assocation Data Products',
            usage='asn_generate pool')
        if pool is None:
            parser.add_argument('pool', type=str, help='Association Pool')
        op_group = parser.add_mutually_exclusive_group()
        op_group.add_argument(
            '-i',
            '--ids',
            nargs='+',
            dest='asn_candidate_ids',
            help=
            'space-separated list of association candidate IDs to operate on.')
        op_group.add_argument('--discover',
                              action='store_true',
                              help='Produce discovered associations')
        op_group.add_argument(
            '--all-candidates',
            action='store_true',
            dest='all_candidates',
            help='Produce all association candidate-specific associations')
        parser.add_argument(
            '-p',
            '--path',
            type=str,
            default='.',
            help='Folder to save the associations to. Default: "%(default)s"')
        parser.add_argument(
            '--save-orphans',
            dest='save_orphans',
            nargs='?',
            const='orphaned.csv',
            default=False,
            help=
            'Save orphaned items into the specified table. Default: "%(default)s"'
        )
        parser.add_argument(
            '--version-id',
            dest='version_id',
            nargs='?',
            const=True,
            default=None,
            help=('Version tag to add into association name and products.'
                  ' If not specified, no version will be used.'
                  ' If specified without a value, the current time is used.'
                  ' Otherwise, the specified string will be used.'))
        parser.add_argument('-r',
                            '--rules',
                            action='append',
                            help='Association Rules file.')
        parser.add_argument(
            '--ignore-default',
            action='store_true',
            help='Do not include default rules. -r should be used if set.')
        parser.add_argument('--dry-run',
                            action='store_true',
                            dest='dry_run',
                            help='Execute but do not save results.')
        parser.add_argument('-d',
                            '--delimiter',
                            type=str,
                            default='|',
                            help='''Delimiter
            to use if pool files are comma-separated-value
            (csv) type files. Default: "%(default)s"
            ''')
        parser.add_argument('--pool-format',
                            type=str,
                            default='ascii',
                            help=('Format of the pool file.'
                                  ' Any format allowed by the astropy'
                                  ' Unified File I/O interface is allowed.'
                                  ' Default: "%(default)s"'))
        parser.add_argument('-v',
                            '--verbose',
                            action='store_const',
                            dest='loglevel',
                            const=logging.INFO,
                            default=logging.NOTSET,
                            help='Output progress and results.')
        parser.add_argument('-D',
                            '--debug',
                            action='store_const',
                            dest='loglevel',
                            const=logging.DEBUG,
                            help='Output detailed debugging information.')
        parser.add_argument('--DMS',
                            action='store_true',
                            dest='DMS_enabled',
                            help='Running under DMS workflow conditions.')
        parser.add_argument(
            '--format',
            default='json',
            help='Format of the association files. Default: "%(default)s"')
        parser.add_argument('--version',
                            action='version',
                            version='%(prog)s {}'.format(__version__),
                            help='Version of the generator.')
        parser.add_argument('--no-merge',
                            action='store_true',
                            help='Do not merge Level2 associations into one')

        parsed = parser.parse_args(args=args)

        # Configure logging
        config = None
        if parsed.DMS_enabled:
            config = DMS_config
        logger = log_config(name=__package__, config=config)
        logger.setLevel(parsed.loglevel)

        # Preamble
        logger.info('Command-line arguments: {}'.format(args))
        logger.context.set('asn_candidate_ids', parsed.asn_candidate_ids)

        if pool is None:
            logger.info('Reading pool {}'.format(parsed.pool))
            self.pool = AssociationPool.read(
                parsed.pool,
                delimiter=parsed.delimiter,
                format=parsed.pool_format,
            )
        else:
            self.pool = pool

        # DMS: Add further info to logging.
        try:
            logger.context.set('program', self.pool[0]['PROGRAM'])
        except KeyError:
            pass

        # Determine mode of operation. Options are
        #  1) Only specified candidates
        #  2) Only discovered assocations that do not match
        #     candidate associations
        #  3) Both discovered and all candidate associations.
        logger.info('Reading rules.')
        if not parsed.discover and\
           not parsed.all_candidates and\
           parsed.asn_candidate_ids is None:
            parsed.discover = True
            parsed.all_candidates = True
        if parsed.discover or parsed.all_candidates:
            global_constraints = constrain_on_candidates(None)
        elif parsed.asn_candidate_ids is not None:
            global_constraints = constrain_on_candidates(
                parsed.asn_candidate_ids)

        self.rules = AssociationRegistry(
            parsed.rules,
            include_default=not parsed.ignore_default,
            global_constraints=global_constraints,
            name=CANDIDATE_RULESET)

        if parsed.discover:
            self.rules.update(
                AssociationRegistry(parsed.rules,
                                    include_default=not parsed.ignore_default,
                                    name=DISCOVER_RULESET))

        logger.info('Generating associations.')
        self.associations = generate(self.pool,
                                     self.rules,
                                     version_id=parsed.version_id)

        if parsed.discover:
            logger.debug('# asns found before discover filtering={}'.format(
                len(self.associations)))
            self.associations = filter_discovered_only(
                self.associations,
                DISCOVER_RULESET,
                CANDIDATE_RULESET,
                keep_candidates=parsed.all_candidates,
            )
            self.rules.Utility.resequence(self.associations)

        # Do a grand merging. This is done particularly for
        # Level2 associations.
        if not parsed.no_merge:
            try:
                self.associations = self.rules.Utility.merge_asns(
                    self.associations)
            except AttributeError:
                pass

        logger.info(self.__str__())

        if not parsed.dry_run:
            self.save(path=parsed.path,
                      format=parsed.format,
                      save_orphans=parsed.save_orphans)