예제 #1
0
    def set_lines(self, verbose=True, use_cache=True, use_ISM_table=True):
        """ Parse the lines of interest

        Parameters
        ----------
        verbose : bool, optional
        use_cache : bool, optional
          cache the linelist for faster repeat performance
        use_ISM_table : bool, optional
          For speed, use a saved ISM table instead of reading from original source files.
        """
        import warnings

        global CACHE
        key = self.list
        if use_cache and key in CACHE['data']:
            self._data = CACHE['data'][key]
            return
        elif use_ISM_table and self.list in ('ISM', 'Strong', 'EUV', 'HI'):
            data = QTable(Table.read(lt_path + '/data/lines/ISM_table.fits'))
            if self.list != 'ISM':
                cond = data['is_'  + self.list]
                self._data = data[cond]
            else:
                self._data = data
            CACHE['data'][key] = self._data
            return

        indices = []
        set_flags = []

        # Default list
        # Loop on lines
        if self.list in ['H2', 'CO']:
            gdi = np.where(self._fulltable['mol'] == self.list)[0]
            if len(gdi) == 0:
                raise IndexError(
                    'set_lines: Found no {:s} molecules! Read more data'.format(self.list))
            indices.append(gdi)
        elif self.list == 'ISM':
            set_flags.append('fISM')
        elif self.list == 'Strong':
            set_flags.append('fSI')
        elif self.list == 'HI':
            set_flags.append('fHI')
        elif self.list == 'EUV':
            set_flags.append('fEUV')
        elif self.list == 'Galaxy':
            set_flags.append('fgE')
            set_flags.append('fgA')
        else:
            raise ValueError(
                'set_lines: Not ready for this: {:s}'.format(self.list))

        # Deal with Defined sets
        # import pdb
        # pdb.set_trace()
        if len(set_flags) > 0:
            # Read standard file
            set_data = lilp.read_sets()
            # Speed up
            wrest = self._fulltable['wrest'].value  # Assuming Angstroms
            for sflag in set_flags:
                gdset = np.where(set_data[sflag] == 1)[0]
                # Match to wavelengths
                for igd in gdset:
                    mt = np.where(
                        np.abs(set_data[igd]['wrest'] - wrest) < 9e-5)[0]
                    if len(mt) == 1:
                        for imt in mt:
                            # Over-ride name!
                            self._fulltable[imt][
                                'name'] = set_data[igd]['name']
                            # if set_data[igd]['name'] == 'DI 1215':
                            #    xdb.set_trace()
                        indices.append(mt[0])
                    elif len(mt) > 1:
                        #
                        wmsg = 'WARNING: Multiple lines with wrest={:g}'.format(
                            set_data[igd]['wrest'])
                        warnings.warn(wmsg)
                        warnings.warn(
                            'Taking the first entry. Maybe use higher precision.')
                        indices.append(mt[0])
                    else:
                        if verbose:
                            print('set_lines: Did not find {:s} in data Tables'.format(
                                set_data[igd]['name']))

        # Collate (should grab unique ones!)
        all_idx = np.unique(np.array(indices))

        # Parse and sort (consider masking instead)
        tmp_tab = self._fulltable[all_idx]
        tmp_tab.sort('wrest')

        #
        self._data = tmp_tab
        CACHE['data'][key] = self._data
예제 #2
0
    def set_lines(self, verbose=True, use_cache=True, use_ISM_table=True):
        """ Parse the lines of interest

        Parameters
        ----------
        verbose : bool, optional
        use_cache : bool, optional
          cache the linelist for faster repeat performance
        use_ISM_table : bool, optional
          For speed, use a saved ISM table instead of reading from original source files.
        """

        global CACHE
        key = self.list
        if use_cache and key in CACHE['data']:
            self._data = CACHE['data'][key]
            return
        elif use_ISM_table and self.list in ('ISM', 'Strong', 'HI'):
            data = Table.read(lt_path + '/data/lines/ISM_table.fits')
            if self.list != 'ISM':
                cond = data['is_' + self.list]
                self._data = data[cond]
            else:
                self._data = data
            CACHE['data'][key] = self._data
            return

        indices = []
        set_flags = []

        # Default list
        # Loop on lines
        if self.list in ['H2', 'CO']:
            gdi = np.where(self._fulltable['mol'] == self.list)[0]
            if len(gdi) == 0:
                raise IndexError(
                    'set_lines: Found no {:s} molecules! Read more data'.
                    format(self.list))
            indices.append(gdi)
        elif self.list == 'ISM':
            set_flags.append('fISM')
        elif self.list == 'Strong':
            set_flags.append('fSI')
        elif self.list == 'HI':
            set_flags.append('fHI')
        elif self.list == 'EUV':
            set_flags.append('fEUV')
        elif self.list == 'Galaxy':
            set_flags.append('fgE')
            set_flags.append('fgA')
        else:
            raise ValueError('set_lines: Not ready for this: {:s}'.format(
                self.list))

        # Deal with Defined sets
        if len(set_flags) > 0:
            # Read standard file
            set_data = lilp.read_sets()
            # Speed up
            wrest = self._fulltable['wrest']  # Assuming Angstroms
            for sflag in set_flags:
                gdset = np.where(set_data[sflag] == 1)[0]
                # Match to wavelengths
                for igd in gdset:
                    mt = np.where(
                        np.abs(set_data['wrest'][igd] - wrest) < 9e-5)[0]
                    if len(mt) == 1:
                        self._fulltable['name'][mt[0]] = set_data['name'][igd]
                        indices.append(mt[0])
                    elif len(mt) > 1:
                        #
                        wmsg = 'WARNING: Multiple lines with wrest={:g}'.format(
                            set_data['wrest'][igd])
                        warnings.warn(wmsg)
                        warnings.warn(
                            'Taking the first entry. Maybe use higher precision.'
                        )
                        indices.append(mt[0])
                    else:
                        if verbose:
                            print(
                                'set_lines: Did not find {:s} in data Tables'.
                                format(set_data[igd]['name']))

        # Collate (should grab unique ones!)
        all_idx = np.unique(np.array(indices))

        # Parse and sort (consider masking instead)
        tmp_tab = self._fulltable[all_idx]
        tmp_tab.sort('wrest')

        #
        self._data = tmp_tab
        CACHE['data'][key] = self._data
예제 #3
0
파일: linelist.py 프로젝트: nhmc/linetools
    def set_lines(self, verbose=True, use_cache=True):#, gd_lines=None):
        ''' Parse the lines of interest
        '''
        import warnings

        global CACHE
        key = tuple(self.lists)
        if use_cache and key in CACHE['data']:
            self._data = CACHE['data'][key]
            return

        indices = []
        set_flags = []

        # Default list
        # Loop on lines
        for llist in self.lists:
            if llist in ['H2','CO']:
                gdi = np.where(self._fulltable['mol'] == llist)[0]
                if len(gdi) == 0:
                    raise IndexError(
                        'set_lines: Found no {:s} molecules! Read more data'.format(llist))
                indices.append(gdi)
            elif llist == 'ISM':
                set_flags.append('fISM')
            elif llist == 'Strong':
                set_flags.append('fSI')
            elif llist == 'HI':
                set_flags.append('fHI')
            elif llist == 'EUV':
                set_flags.append('fEUV')
            else:
                raise ValueError('set_lines: Not ready for this: {:s}'.format(llist))


        # Deal with Defined sets
        #import pdb
        #pdb.set_trace()
        if len(set_flags) > 0:
            # Read standard file
            set_data = lilp.read_sets()
            # Speed up
            wrest = self._fulltable['wrest'].value # Assuming Angstroms
            for sflag in set_flags:
                gdset = np.where(set_data[sflag] == 1)[0]
                # Match to wavelengths
                for igd in gdset:
                    mt = np.where( 
                        np.abs(set_data[igd]['wrest']-wrest) < 9e-5 )[0]
                    if len(mt) == 1:
                        for imt in mt:
                            # Over-ride name!
                            self._fulltable[imt]['name'] = set_data[igd]['name']
                            #if set_data[igd]['name'] == 'DI 1215':
                            #    xdb.set_trace()
                        indices.append(mt[0])
                    elif len(mt) > 1:
                        #
                        wmsg = 'WARNING: Multiple lines with wrest={:g}'.format(
                            set_data[igd]['wrest'])
                        warnings.warn(wmsg)
                        warnings.warn('Taking the first entry. Maybe use higher precision.')
                        indices.append(mt[0])
                    else:
                        if verbose:
                            print('set_lines: Did not find {:s} in data Tables'.format(
                                set_data[igd]['name']))

        # Collate (should grab unique ones!)
        #all_idx = np.unique( np.concatenate( [np.array(itt) for itt in indices] ) )
        all_idx = np.unique( np.array(indices) )

        # Parse and sort (consider masking instead)
        tmp_tab = self._fulltable[all_idx]
        tmp_tab.sort('wrest')

        #
        self._data = tmp_tab
        CACHE['data'][key] = self._data