Exemplo n.º 1
0
def overwrite_synphot_config(root):
    """Silently overwrite ``synphot`` configurable items to point to
    given root directory.

    Parameters
    ----------
    root : str
        Root directory name.

    """
    subdir_keys = ['calspec', 'extinction', 'nonhst']

    # Need this for Windows support
    if root.startswith('http') or root.startswith('ftp'):
        sep = '/'
    else:
        sep = os.sep  # Can be / or \

    for cfgitem in _get_synphot_cfgitems():
        path, fname = os.path.split(cfgitem())

        i = np.where(list(map(path.__contains__, subdir_keys)))[0]
        if len(i) == 0:
            continue

        subdir = subdir_keys[i[0]]

        if subdir == 'nonhst':
            cfgval = sep.join([root, 'comp', subdir, fname])
        else:
            cfgval = sep.join([root, subdir, fname])

        cfgitem.set(cfgval)
Exemplo n.º 2
0
    def interpret_array(self, data):
        """Interpret DQ values for an array.

        .. warning::

            If the array is large and has a lot of flagged elements,
            this can be resource intensive.

        Parameters
        ----------
        data : ndarray
            DQ values.

        Returns
        -------
        dqs_by_flag : dict
            Dictionary mapping each interpreted DQ value to indices
            of affected array elements.

        """
        data = np.asarray(data, dtype=np.int)  # Ensure int array
        dqs_by_flag = {}

        def _one_flag(vf):
            dqs_by_flag[vf] = np.where((data & vf) != 0)

        # Skip good flag
        list(map(_one_flag, self._valid_flags[1:]))

        return dqs_by_flag
Exemplo n.º 3
0
    def interpret_array(self, data):
        """Interpret DQ values for an array.

        .. warning::

            If the array is large and has a lot of flagged elements,
            this can be resource intensive.

        Parameters
        ----------
        data : ndarray
            DQ values.

        Returns
        -------
        dqs_by_flag : dict
            Dictionary mapping each interpreted DQ value to indices
            of affected array elements.

        """
        data = np.asarray(data, dtype=np.int)  # Ensure int array
        dqs_by_flag = {}

        def _one_flag(vf):
            dqs_by_flag[vf] = np.where((data & vf) != 0)

        # Skip good flag
        list(map(_one_flag, self._valid_flags[1:]))

        return dqs_by_flag
Exemplo n.º 4
0
    def interpret_array(self, data, verbose=True):
        """Interpret DQ values for an array.

        .. warning::

            If the array is large and has a lot of flagged elements,
            this can be resource intensive.

        Parameters
        ----------
        data : ndarray
            DQ values.

        verbose : bool
            Print info to screen.

        Returns
        -------
        dqs_by_flag : dict
            Dictionary mapping each interpreted DQ value to indices
            of affected array elements.

        """
        if verbose:
            print('Parsing DQ flag(s)...')
            t_beg = time.time()

        data = np.asarray(data, dtype=np.int)  # Ensure int array
        dqs_by_flag = {}

        def _one_flag(vf):
            dqs_by_flag[vf] = np.where((data & vf) != 0)

        # Skip good flag
        list(map(_one_flag, self._valid_flags[1:]))

        if verbose:
            t_end = time.time()
            nbad = np.sum(data != self._okflag)
            ntot = data.size
            pbad = 100.0 * nbad / ntot
            print('Done!\nRun time: {0:.3f} s\nN_FLAGGED: {1}/{2} '
                  '({3:.3f}%)'.format(t_end - t_beg, nbad, ntot, pbad))
            for key in sorted(dqs_by_flag):
                nbad = len(dqs_by_flag[key][0])
                pbad = 100.0 * nbad / ntot
                print('FLAG={0:<5d}: {1} ({2:.3f}%)'.format(key, nbad, pbad))

        return dqs_by_flag
Exemplo n.º 5
0
def _get_valid_indices(shape, ix0, ix1, iy0, iy1):
    """Give array shape and desired indices, return indices that are
    correctly bounded by the shape."""
    ymax, xmax = shape

    if ix0 < 0:
        ix0 = 0
    if ix1 > xmax:
        ix1 = xmax
    if iy0 < 0:
        iy0 = 0
    if iy1 > ymax:
        iy1 = ymax

    if iy1 <= iy0 or ix1 <= ix0:
        raise IndexError('array[{0}:{1},{2}:{3}] is invalid'.format(
            iy0, iy1, ix0, ix1))

    return list(map(int, [ix0, ix1, iy0, iy1]))
Exemplo n.º 6
0
def _get_valid_indices(shape, ix0, ix1, iy0, iy1):
    """Give array shape and desired indices, return indices that are
    correctly bounded by the shape."""
    ymax, xmax = shape

    if ix0 < 0:
        ix0 = 0
    if ix1 > xmax:
        ix1 = xmax
    if iy0 < 0:
        iy0 = 0
    if iy1 > ymax:
        iy1 = ymax

    if iy1 <= iy0 or ix1 <= ix0:
        raise IndexError(
            'array[{0}:{1},{2}:{3}] is invalid'.format(iy0, iy1, ix0, ix1))

    return list(map(int, [ix0, ix1, iy0, iy1]))
Exemplo n.º 7
0
    def query_object_async(self,
                           wavelength_range=None,
                           wavelength_type='',
                           wavelength_accuracy=None,
                           element_spectrum=None,
                           minimal_abundance=None,
                           depl_factor=None,
                           lower_level_energy_range=None,
                           upper_level_energy_range=None,
                           nmax=None,
                           multiplet=None,
                           transitions=None,
                           show_fine_structure=None,
                           show_auto_ionizing_transitions=None):
        """
        Returns
        -------
        response : `requests.Response`
            The HTTP response returned from the service.
        """
        if self._default_form_values is None:
            response = self._request("GET",
                                     url=self.FORM_URL,
                                     data={},
                                     timeout=self.TIMEOUT)
            bs = BeautifulSoup(response.text)
            form = bs.find('form')
            self._default_form_values = self._get_default_form_values(form)
        default_values = self._default_form_values
        wltype = (wavelength_type or default_values.get('air', '')).lower()
        if wltype in ('air', 'vacuum'):
            air = wltype.capitalize()
        else:
            raise ValueError('parameter wavelength_type must be either "air" '
                             'or "vacuum".')
        wlrange = wavelength_range or []
        if len(wlrange) not in (0, 2):
            raise ValueError('Length of `wavelength_range` must be 2 or 0, '
                             'but is: {}'.format(len(wlrange)))
        if not is_valid_transitions_param(transitions):
            raise ValueError(
                'Invalid parameter "transitions": {0!r}'.format(transitions))
        if transitions is None:
            _type = self._default_form_values.get('type')
            type2 = self._default_form_values.get('type2')
        else:
            s = str(transitions)
            if len(s.split(',')) > 1:
                _type = 'Sel'
                type2 = s.split(',')
            else:
                _type = s
                type2 = ''
        # convert wavelengths in incoming wavelength range to Angstroms
        wlrange_in_angstroms = (wl.to(u.Angstrom,
                                      equivalencies=u.spectral()).value
                                for wl in wlrange)

        lower_level_erange = lower_level_energy_range
        if lower_level_erange is not None:
            lower_level_erange = lower_level_erange.to(
                u.cm**-1, equivalencies=u.spectral()).value()
        upper_level_erange = upper_level_energy_range
        if upper_level_erange is not None:
            upper_level_erange = upper_level_erange.to(
                u.cm**-1, equivalencies=u.spectral()).value()
        input = {
            'wavl': '-'.join(map(str, wlrange_in_angstroms)),
            'wave': 'Angstrom',
            'air': air,
            'wacc': wavelength_accuracy,
            'elmion': element_spectrum,
            'abun': minimal_abundance,
            'depl': depl_factor,
            'elo': lower_level_erange,
            'ehi': upper_level_erange,
            'ener': 'cm^-1',
            'nmax': nmax,
            'term': multiplet,
            'type': _type,
            'type2': type2,
            'hydr': show_fine_structure,
            'auto': show_auto_ionizing_transitions
        }
        response = self._submit_form(input)
        return response
Exemplo n.º 8
0
    def query_object_async(self, wavelength_range=None, wavelength_type='',
                           wavelength_accuracy=None, element_spectrum=None,
                           minimal_abundance=None, depl_factor=None,
                           lower_level_energy_range=None,
                           upper_level_energy_range=None, nmax=None,
                           multiplet=None, transitions=None,
                           show_fine_structure=None,
                           show_auto_ionizing_transitions=None,
                           output_columns=('spec', 'type', 'conf',
                                           'term', 'angm', 'prob',
                                           'ener')):
        """
        Returns
        -------
        response : `requests.Response`
            The HTTP response returned from the service.
        """
        if self._default_form_values is None:
            response = self._request("GET", url=self.FORM_URL, data={},
                                     timeout=self.TIMEOUT)
            bs = BeautifulSoup(response.text)
            form = bs.find('form')
            self._default_form_values = self._get_default_form_values(form)
        default_values = self._default_form_values
        wltype = (wavelength_type or default_values.get('air', '')).lower()
        if wltype in ('air', 'vacuum'):
            air = wltype.capitalize()
        else:
            raise ValueError('parameter wavelength_type must be either "air" '
                             'or "vacuum".')
        wlrange = wavelength_range or []
        if len(wlrange) not in (0, 2):
            raise ValueError('Length of `wavelength_range` must be 2 or 0, '
                             'but is: {}'.format(len(wlrange)))
        if not is_valid_transitions_param(transitions):
            raise ValueError('Invalid parameter "transitions": {0!r}'
                             .format(transitions))
        if transitions is None:
            _type = self._default_form_values.get('type')
            type2 = self._default_form_values.get('type2')
        else:
            s = str(transitions)
            if len(s.split(',')) > 1:
                _type = 'Sel'
                type2 = s.split(',')
            else:
                _type = s
                type2 = ''
        # convert wavelengths in incoming wavelength range to Angstroms
        wlrange_in_angstroms = (wl.to(u.Angstrom,
                                      equivalencies=u.spectral()).value
                                for wl in wlrange)

        lower_level_erange = lower_level_energy_range
        if lower_level_erange is not None:
            lower_level_erange = lower_level_erange.to(
                u.cm ** -1, equivalencies=u.spectral()).value()
        upper_level_erange = upper_level_energy_range
        if upper_level_erange is not None:
            upper_level_erange = upper_level_erange.to(
                u.cm ** -1, equivalencies=u.spectral()).value()
        input = {
            'wavl': '-'.join(map(str, wlrange_in_angstroms)),
            'wave': 'Angstrom',
            'air': air,
            'wacc': wavelength_accuracy,
            'elmion': element_spectrum,
            'abun': minimal_abundance,
            'depl': depl_factor,
            'elo': lower_level_erange,
            'ehi': upper_level_erange,
            'ener': 'cm^-1',
            'nmax': nmax,
            'term': multiplet,
            'type': _type,
            'type2': type2,
            'hydr': show_fine_structure,
            'auto': show_auto_ionizing_transitions,
            'form': output_columns,
            'tptype': 'as_a'}
        response = self._submit_form(input)
        return response
Exemplo n.º 9
0
Arquivo: mosaic.py Projeto: rij/jwst
    def make_mosaic(self,
                    images,
                    outpath='',
                    outsuffix='mosaic',
                    clobber=False,
                    debug=False):
        """Construct one mosaic for each dataset, for multiple datasets.

        Images are sorted into datasets by JWST naming convention,
        ``jw<PPPPP><OOO><VVV>_<GGSAA>_<EEEEE>_<detector>_<suffix>.fits``,
        where the ROOTNAME is defined as
        ``jw<PPPPP><OOO><VVV>_<GGSAA>_<EEEEE>``.
        Each mosaic is saved as ``ROOTNAME_<outsuffix>.fits``,
        a single-extension FITS image.

        Parameters
        ----------
        images : list
            List of filenames.

        outpath : str
            Output directory. If not given, it is the current
            working directory.

        outsuffix : str
            Output suffix.

        clobber : bool
            If `True`, overwrite existing mosaic file(s).

        debug : bool
            If `True`, print extra information to screen.

        Returns
        -------
        mosaiclist : list
            List of mosaic filenames.

        """
        # Separate different datasets
        root_list = {}
        for im in images:
            rootname = os.path.basename('_'.join(im.split('_')[:-2]))
            if rootname not in root_list:
                root_list[rootname] = [im]
            else:
                root_list[rootname].append(im)

        # Process each dataset
        def _mosaic_one(rootname):
            imlist = root_list[rootname]
            outname = os.path.join(outpath,
                                   '{0}_{1}.fits'.format(rootname, outsuffix))

            # Avoid regenerating mosaic if already exist.
            # This also avoids crashing at the very end.
            if not clobber and os.path.exists(outname):
                if debug:
                    print('Using existing {0}'.format(outname))
                return outname

            mosaic = self.get_single_mosaic_array(imlist)
            if mosaic is None:
                if debug:
                    print('No mosaic for {0}'.format(imlist))
                return ''

            hdu = fits.PrimaryHDU(mosaic)

            # Inherit some keywords from primary header from first image in list
            prihdr = fits.getheader(imlist[0])
            for key in ('ROOTNAME', 'TARGNAME', 'INSTRUME', 'FILTER', 'PUPIL',
                        'DATE-OBS', 'TIME-OBS'):
                if key not in prihdr:
                    continue
                hdu.header[key] = prihdr[key]

            hdu.header.add_history('Mosaic from {0}'.format(','.join(imlist)))
            hdu.writeto(outname, clobber=clobber)
            return outname

        mosaiclist = sorted(map(_mosaic_one, list(root_list.keys())))
        return [m for m in mosaiclist if m]
Exemplo n.º 10
0
def check_conesearch_sites(destdir=os.curdir, verbose=True, parallel=True,
                           url_list='default'):
    """
    Validate Cone Search Services.

    .. note::

        URLs are unescaped prior to validation.

        Only check queries with ``<testQuery>`` parameters.
        Does not perform meta-data and erroneous queries.

    Parameters
    ----------
    destdir : str, optional
        Directory to store output files. Will be created if does
        not exist. Existing files with these names will be deleted
        or replaced:

            * conesearch_good.json
            * conesearch_warn.json
            * conesearch_exception.json
            * conesearch_error.json

    verbose : bool, optional
        Print extra info to log.

    parallel : bool, optional
        Enable multiprocessing.

    url_list : list of string, optional
        Only check these access URLs against
        ``astroquery.vo_conesearch.validator.conf.conesearch_master_list``
        and ignore the others, which will not appear in output files.
        By default, check those in
        ``astroquery.vo_conesearch.validator.conf.conesearch_urls``.
        If `None`, check everything.

    Raises
    ------
    IOError
        Invalid destination directory.

    timeout
        URL request timed out.

    ValidationMultiprocessingError
        Multiprocessing failed.

    """
    if url_list == 'default':
        url_list = conf.conesearch_urls

    if (not isinstance(destdir, six.string_types) or len(destdir) == 0 or
            os.path.exists(destdir) and not os.path.isdir(destdir)):
        raise IOError('Invalid destination directory')  # pragma: no cover

    if not os.path.exists(destdir):
        os.mkdir(destdir)

    # Output dir created by votable.validator
    out_dir = os.path.join(destdir, 'results')

    if not os.path.exists(out_dir):
        os.mkdir(out_dir)

    # Output files
    db_file = OrderedDict()
    db_file['good'] = os.path.join(destdir, 'conesearch_good.json')
    db_file['warn'] = os.path.join(destdir, 'conesearch_warn.json')
    db_file['excp'] = os.path.join(destdir, 'conesearch_exception.json')
    db_file['nerr'] = os.path.join(destdir, 'conesearch_error.json')

    # JSON dictionaries for output files
    js_tree = {}
    for key in db_file:
        js_tree[key] = VOSDatabase.create_empty()

        # Delete existing files, if any, to be on the safe side.
        # Else can cause confusion if program exited prior to
        # new files being written but old files are still there.
        if os.path.exists(db_file[key]):  # pragma: no cover
            os.remove(db_file[key])
            if verbose:
                log.info('Existing file {0} deleted'.format(db_file[key]))

    # Master VO database from registry. Silence all the warnings.
    with warnings.catch_warnings():
        warnings.simplefilter('ignore')
        js_mstr = VOSDatabase.from_registry(
            conf.conesearch_master_list, encoding='binary',
            show_progress=verbose)

    # Validate only a subset of the services.
    if url_list is not None:
        # Make sure URL is unique and fixed.
        url_list = set(map(
            unescape_all,
            [cur_url.encode('utf-8') if isinstance(cur_url, str) else cur_url
             for cur_url in url_list]))
        uniq_rows = len(url_list)
        url_list_processed = []  # To track if given URL is valid in registry
        if verbose:
            log.info('Only {0}/{1} site(s) are validated'.format(uniq_rows,
                                                                 len(js_mstr)))
    # Validate all services.
    else:
        uniq_rows = len(js_mstr)

    key_lookup_by_url = {}

    # Process each catalog in the registry.
    for cur_key, cur_cat in js_mstr.get_catalogs():
        cur_url = cur_cat['url'].encode('utf-8')

        # Skip if:
        #   a. not a Cone Search service
        #   b. not in given subset, if any
        if ((cur_cat['cap_type'] != b'conesearch') or
                (url_list is not None and cur_url not in url_list)):
            continue

        # Use testQuery to return non-empty VO table with max verbosity.
        testquery_pars = parse_cs(cur_cat['ivoid'], cur_cat['cap_index'])
        cs_pars_arr = ['{}={}'.format(key, testquery_pars[key]).encode('utf-8')
                       for key in testquery_pars]
        cs_pars_arr += [b'VERB=3']

        # Track the service.
        key_lookup_by_url[cur_url + b'&'.join(cs_pars_arr)] = cur_key
        if url_list is not None:
            url_list_processed.append(cur_url)

    # Give warning if any of the user given subset is not in the registry.
    if url_list is not None:
        url_list_skipped = url_list - set(url_list_processed)
        n_skipped = len(url_list_skipped)
        if n_skipped > 0:
            warn_str = '{0} not found in registry! Skipped:\n'.format(
                n_skipped)
            for cur_url in url_list_skipped:
                warn_str += '\t{0}\n'.format(cur_url)
            warnings.warn(warn_str, AstropyUserWarning)

    all_urls = list(key_lookup_by_url)
    timeout = data.conf.remote_timeout
    map_args = [(out_dir, url, timeout) for url in all_urls]

    # Validate URLs
    if parallel:
        pool = multiprocessing.Pool()
        try:
            mp_list = pool.map(_do_validation, map_args)
        except Exception as exc:  # pragma: no cover
            raise ValidationMultiprocessingError(
                'An exception occurred during parallel processing '
                'of validation results: {0}'.format(exc))
    else:
        mp_list = map(_do_validation, map_args)

    # Categorize validation results
    for r in mp_list:
        db_key = r['out_db_name']
        cat_key = key_lookup_by_url[r.url]
        cur_cat = js_mstr.get_catalog(cat_key)
        _copy_r_to_cat(r, cur_cat)
        js_tree[db_key].add_catalog(cat_key, cur_cat)

    # Write to HTML
    html_subsets = result.get_result_subsets(mp_list, out_dir)
    html.write_index(html_subsets, all_urls, out_dir)
    if parallel:
        html_subindex_args = [(out_dir, html_subset, uniq_rows)
                              for html_subset in html_subsets]
        pool.map(_html_subindex, html_subindex_args)
    else:
        for html_subset in html_subsets:
            _html_subindex((out_dir, html_subset, uniq_rows))

    # Write to JSON
    n = {}
    n_tot = 0
    for key in db_file:
        n[key] = len(js_tree[key])
        n_tot += n[key]
        js_tree[key].to_json(db_file[key], overwrite=True)
        if verbose:
            log.info('{0}: {1} catalog(s)'.format(key, n[key]))

    # Checksum
    if verbose:
        log.info('total: {0} out of {1} catalog(s)'.format(n_tot, uniq_rows))

    if n['good'] == 0:  # pragma: no cover
        warnings.warn(
            'No good sites available for Cone Search.', AstropyUserWarning)
Exemplo n.º 11
0
def shrink_input_images(images, outpath='', new_width=100, **kwargs):
    """Shrink input images for mosaic, if necessary.

    The shrunken images are not deleted on exit;
    User has to remove them manually.

    Parameters
    ----------
    images : list
        List of input image files.

    outpath : str
        Output directory. This must be different from input
        directory because image names remain the same.

    new_width : int
        Width of the shrunken image. Height will be scaled accordingly.
        Because this will be converted into a zoom factor for
        :func:`~stginga.utils.scale_image`, requested width might not
        be the exact one that you get but should be close.

    kwargs : dict
        Optional keywords for :func:`~stginga.utils.scale_image`.

    Returns
    -------
    outlist : list
        List of images to use. If shrunken, the list will include
        the new image in the ``outpath`` (same filename).
        If the input is already small enough, shrinking process is
        skipped and the list will contain the input image instead.

    """
    from stginga.utils import scale_image

    outpath = os.path.abspath(outpath)
    debug = kwargs.get('debug', False)

    # Use same extension as scale image.
    if 'ext' in kwargs:
        ext = kwargs['ext']
    else:
        ext = ('SCI', 1)
        kwargs['ext'] = ext

    def _shrink_one(infile):
        with fits.open(infile) as pf:
            old_width = pf[ext].data.shape[1]  # (ny, nx)

        # Shrink it.
        if old_width > new_width:
            path, fname = os.path.split(infile)

            # Skipping instead of just returning the input image
            # because want to avoid mosaicking large images.
            if os.path.abspath(path) == outpath:
                print('Input and output directories are the same: '
                      '{0}; Skipping {1}'.format(outpath, fname))
                outfile = ''
            else:
                outfile = os.path.join(outpath, fname)
                zoom_factor = new_width / old_width
                scale_image(infile, outfile, zoom_factor, **kwargs)

        # Input already small enough.
        else:
            outfile = infile
            if debug:
                print('{0} has width {1} <= {2}; Using input '
                      'file'.format(infile, old_width, new_width))

        return outfile

    outlist = [s for s in map(_shrink_one, images) if s]
    return outlist
Exemplo n.º 12
0
def check_conesearch_sites(destdir=os.curdir,
                           verbose=True,
                           parallel=True,
                           url_list='default'):
    """
    Validate Cone Search Services.

    .. note::

        URLs are unescaped prior to validation.

        Only check queries with ``<testQuery>`` parameters.
        Does not perform meta-data and erroneous queries.

    Parameters
    ----------
    destdir : str, optional
        Directory to store output files. Will be created if does
        not exist. Existing files with these names will be deleted
        or replaced:

            * conesearch_good.json
            * conesearch_warn.json
            * conesearch_exception.json
            * conesearch_error.json

    verbose : bool, optional
        Print extra info to log.

    parallel : bool, optional
        Enable multiprocessing.

    url_list : list of string, optional
        Only check these access URLs against
        ``astroquery.vo_conesearch.validator.conf.conesearch_master_list``
        and ignore the others, which will not appear in output files.
        By default, check those in
        ``astroquery.vo_conesearch.validator.conf.conesearch_urls``.
        If `None`, check everything.

    Raises
    ------
    IOError
        Invalid destination directory.

    timeout
        URL request timed out.

    ValidationMultiprocessingError
        Multiprocessing failed.

    """
    if url_list == 'default':
        url_list = conf.conesearch_urls

    if (not isinstance(destdir, six.string_types) or len(destdir) == 0
            or os.path.exists(destdir) and not os.path.isdir(destdir)):
        raise IOError('Invalid destination directory')  # pragma: no cover

    if not os.path.exists(destdir):
        os.mkdir(destdir)

    # Output dir created by votable.validator
    out_dir = os.path.join(destdir, 'results')

    if not os.path.exists(out_dir):
        os.mkdir(out_dir)

    # Output files
    db_file = OrderedDict()
    db_file['good'] = os.path.join(destdir, 'conesearch_good.json')
    db_file['warn'] = os.path.join(destdir, 'conesearch_warn.json')
    db_file['excp'] = os.path.join(destdir, 'conesearch_exception.json')
    db_file['nerr'] = os.path.join(destdir, 'conesearch_error.json')

    # JSON dictionaries for output files
    js_tree = {}
    for key in db_file:
        js_tree[key] = VOSDatabase.create_empty()

        # Delete existing files, if any, to be on the safe side.
        # Else can cause confusion if program exited prior to
        # new files being written but old files are still there.
        if os.path.exists(db_file[key]):  # pragma: no cover
            os.remove(db_file[key])
            if verbose:
                log.info('Existing file {0} deleted'.format(db_file[key]))

    # Master VO database from registry. Silence all the warnings.
    with warnings.catch_warnings():
        warnings.simplefilter('ignore')
        js_mstr = VOSDatabase.from_registry(conf.conesearch_master_list,
                                            encoding='binary',
                                            show_progress=verbose)

    # Validate only a subset of the services.
    if url_list is not None:
        # Make sure URL is unique and fixed.
        url_list = set(
            map(unescape_all, [
                cur_url.encode('utf-8')
                if isinstance(cur_url, str) else cur_url
                for cur_url in url_list
            ]))
        uniq_rows = len(url_list)
        url_list_processed = []  # To track if given URL is valid in registry
        if verbose:
            log.info('Only {0}/{1} site(s) are validated'.format(
                uniq_rows, len(js_mstr)))
    # Validate all services.
    else:
        uniq_rows = len(js_mstr)

    key_lookup_by_url = {}

    # Process each catalog in the registry.
    for cur_key, cur_cat in js_mstr.get_catalogs():
        cur_url = cur_cat['url'].encode('utf-8')

        # Skip if:
        #   a. not a Cone Search service
        #   b. not in given subset, if any
        if ((cur_cat['cap_type'] != b'conesearch')
                or (url_list is not None and cur_url not in url_list)):
            continue

        # Use testQuery to return non-empty VO table with max verbosity.
        testquery_pars = parse_cs(cur_cat['ivoid'], cur_cat['cap_index'])
        cs_pars_arr = [
            '{}={}'.format(key, testquery_pars[key]).encode('utf-8')
            for key in testquery_pars
        ]
        cs_pars_arr += [b'VERB=3']

        # Track the service.
        key_lookup_by_url[cur_url + b'&'.join(cs_pars_arr)] = cur_key
        if url_list is not None:
            url_list_processed.append(cur_url)

    # Give warning if any of the user given subset is not in the registry.
    if url_list is not None:
        url_list_skipped = url_list - set(url_list_processed)
        n_skipped = len(url_list_skipped)
        if n_skipped > 0:
            warn_str = '{0} not found in registry! Skipped:\n'.format(
                n_skipped)
            for cur_url in url_list_skipped:
                warn_str += '\t{0}\n'.format(cur_url)
            warnings.warn(warn_str, AstropyUserWarning)

    all_urls = list(key_lookup_by_url)
    timeout = data.conf.remote_timeout
    map_args = [(out_dir, url, timeout) for url in all_urls]

    # Validate URLs
    if parallel:
        pool = multiprocessing.Pool()
        try:
            mp_list = pool.map(_do_validation, map_args)
        except Exception as exc:  # pragma: no cover
            raise ValidationMultiprocessingError(
                'An exception occurred during parallel processing '
                'of validation results: {0}'.format(exc))
    else:
        mp_list = map(_do_validation, map_args)

    # Categorize validation results
    for r in mp_list:
        db_key = r['out_db_name']
        cat_key = key_lookup_by_url[r.url]
        cur_cat = js_mstr.get_catalog(cat_key)
        _copy_r_to_cat(r, cur_cat)
        js_tree[db_key].add_catalog(cat_key, cur_cat)

    # Write to HTML
    html_subsets = result.get_result_subsets(mp_list, out_dir)
    html.write_index(html_subsets, all_urls, out_dir)
    if parallel:
        html_subindex_args = [(out_dir, html_subset, uniq_rows)
                              for html_subset in html_subsets]
        pool.map(_html_subindex, html_subindex_args)
    else:
        for html_subset in html_subsets:
            _html_subindex((out_dir, html_subset, uniq_rows))

    # Write to JSON
    n = {}
    n_tot = 0
    for key in db_file:
        n[key] = len(js_tree[key])
        n_tot += n[key]
        js_tree[key].to_json(db_file[key], overwrite=True)
        if verbose:
            log.info('{0}: {1} catalog(s)'.format(key, n[key]))

    # Checksum
    if verbose:
        log.info('total: {0} out of {1} catalog(s)'.format(n_tot, uniq_rows))

    if n['good'] == 0:  # pragma: no cover
        warnings.warn('No good sites available for Cone Search.',
                      AstropyUserWarning)
Exemplo n.º 13
0
    def make_mosaic(self, images, outpath='', outsuffix='mosaic',
                    clobber=False, debug=False):
        """Construct one mosaic for each dataset, for multiple datasets.

        Images are sorted into datasets by JWST naming convention,
        ``jw<PPPPP><OOO><VVV>_<GGSAA>_<EEEEE>_<detector>_<suffix>.fits``,
        where the ROOTNAME is defined as
        ``jw<PPPPP><OOO><VVV>_<GGSAA>_<EEEEE>``.
        Each mosaic is saved as ``ROOTNAME_<outsuffix>.fits``,
        a single-extension FITS image.

        Parameters
        ----------
        images : list
            List of filenames.

        outpath : str
            Output directory. If not given, it is the current
            working directory.

        outsuffix : str
            Output suffix.

        clobber : bool
            If `True`, overwrite existing mosaic file(s).

        debug : bool
            If `True`, print extra information to screen.

        Returns
        -------
        mosaiclist : list
            List of mosaic filenames.

        """
        # Separate different datasets
        root_list = {}
        for im in images:
            rootname = os.path.basename('_'.join(im.split('_')[:-2]))
            if rootname not in root_list:
                root_list[rootname] = [im]
            else:
                root_list[rootname].append(im)

        # Process each dataset
        def _mosaic_one(rootname):
            imlist = root_list[rootname]
            outname = os.path.join(
                outpath, '{0}_{1}.fits'.format(rootname, outsuffix))

            # Avoid regenerating mosaic if already exist.
            # This also avoids crashing at the very end.
            if not clobber and os.path.exists(outname):
                if debug:
                    print('Using existing {0}'.format(outname))
                return outname

            mosaic = self.get_single_mosaic_array(imlist)
            if mosaic is None:
                if debug:
                    print('No mosaic for {0}'.format(imlist))
                return ''

            hdu = fits.PrimaryHDU(mosaic)

            # Inherit some keywords from primary header from first image in list
            prihdr = fits.getheader(imlist[0])
            for key in ('ROOTNAME', 'TARGNAME', 'INSTRUME',
                        'FILTER', 'PUPIL', 'DATE-OBS', 'TIME-OBS'):
                if key not in prihdr:
                    continue
                hdu.header[key] = prihdr[key]

            hdu.header.add_history('Mosaic from {0}'.format(','.join(imlist)))
            hdu.writeto(outname, clobber=clobber)
            return outname

        mosaiclist = sorted(map(_mosaic_one, list(root_list.keys())))
        return [m for m in mosaiclist if m]