示例#1
0
 def concatImages(self, sweepType, removeBkg=True):
     """
     This won't work well if the background level or QE of the pixel changes between sweeps...
     Should remove this first
     """
     sweepType = sweepType.lower()
     assert sweepType in ('x', 'y')
     imageList = None
     for s in self.config.beammap.sweep.sweeps:
         if s.sweeptype in sweepType:
             getLogger('beammap').info('loading: ' + str(s))
             imList = self.loadSweepImgs(s).astype(np.float)
             if removeBkg:
                 bkgndList = np.median(imList, axis=0)
                 imList -= bkgndList
             direction = -1 if s.sweepdirection == '-' else 1
             if imageList is None:
                 imageList = imList[::direction, :, :]
             else:
                 imageList = np.concatenate(
                     (imageList, imList[::direction, :, :]), axis=0)
     if sweepType == 'x':
         self.x_images = imageList
     else:
         self.y_images = imageList
     return imageList
示例#2
0
    def __init__(self, config=None):
        self.config_file = DEFAULT_CONFIG_FILE if config is None else config

        self.cfg = mkidpipeline.config.load_task_config(config)
        self.use_wavecal = self.cfg.flatcal.use_wavecal
        self.dataDir = self.cfg.paths.data
        self.out_directory = self.cfg.paths.out

        self.intTime = self.cfg.flatcal.chunk_time

        self.dark_start = []
        self.dark_int = []

        self.xpix = self.cfg.beammap.ncols
        self.ypix = self.cfg.beammap.nrows
        self.deadtime = self.cfg.instrument.deadtime

        self.wvlStart = self.cfg.instrument.wvl_start
        self.wvlStop = self.cfg.instrument.wvl_stop

        self.countRateCutoff = self.cfg.flatcal.rate_cutoff
        self.fractionOfChunksToTrim = self.cfg.flatcal.trim_fraction
        self.timeSpacingCut = None

        self.obs = None
        self.beamImage = None
        self.wvlFlags = None
        self.wvlBinEdges = None
        self.wavelengths = None

        if self.use_wavecal:
            sol_file = self.cfg.wavcal  #TODO make sure this works then utilize this in speccal as well
            sol = mkidpipeline.calibration.wavecal.Solution(sol_file[0])
            r, resid = sol.find_resolving_powers(cache=True)
            self.r_list = np.nanmedian(r, axis=0)
        else:
            self.r_list = None

        self.save_plots = self.cfg.flatcal.plots.lower() == 'all'
        self.summary_plot = self.cfg.flatcal.plots.lower() in ('all',
                                                               'summary')
        if self.save_plots:
            getLogger(__name__).warning(
                "Comanded to save debug plots, this will add ~30 min to runtime."
            )

        self.spectralCubes = None
        self.cubeEffIntTimes = None
        self.countCubes = None
        self.cubeWeights = None
        self.weightErr = None
        self.totalCube = None
        self.totalFrame = None
        self.flatWeights = None
        self.countCubesToSave = None
        self.flatWeightErr = None
        self.flatFlags = None
        self.flatWeightsforplot = None
        self.plotName = None
        self.fig = None
示例#3
0
    def getAbsOffset(self, shiftedTimes, auto=True, locLimit=None):
        """
        The autocorrelation function can only calculate relative time differences
        between pixels. This function defines the absolute time reference (ie. the
        location of the peak)

        INPUTS:
            shiftedTimes: a list of pixel time streams shifted to match up
            auto: if False then ask user to click on a plot
        """
        if not np.isfinite(
                locLimit) or locLimit < 0 or locLimit >= len(shiftedTimes):
            locLimit = -1
        offset = np.argmax(np.sum(shiftedTimes[:locLimit], axis=0))
        if auto: return offset

        getLogger('beammap').info("Please click the correct peak")
        fig, ax = plt.subplots()
        for p_i in range(len(shiftedTimes)):
            ax.plot(shiftedTimes[p_i])
        ax.plot(np.sum(shiftedTimes, axis=0), 'k-')
        ln = ax.axvline(offset, c='b')

        def onclick(event):
            if fig.canvas.manager.toolbar._active is None:
                offset = event.xdata
                getLogger('beammap').info(offset)
                ln.set_xdata(offset)
                plt.draw()

        cid = fig.canvas.mpl_connect('button_press_event', onclick)
        plt.show()
        return offset
示例#4
0
    def from_yaml(cls, loader, node):
        d = dict(loader.construct_pairs(node))
        if 'approximate_time' in d:
            d.pop('file', None)
            return cls(d.pop('name'),
                       d.pop('wavecal', None),
                       d.pop('flatcal', None),
                       d.pop('wcscal'),
                       d.pop('speccal', None),
                       byTimestamp=d.pop('approximate_time'),
                       use=d.pop('use', None),
                       _common=d)

        if not os.path.isfile(d['file']):
            getLogger(__name__).info(
                'Treating {} as relative dither path.'.format(d['file']))
            d['file'] = os.path.join(config.paths.dithers, d['file'])
        return cls(d.pop('name'),
                   d.pop('wavecal', None),
                   d.pop('flatcal', None),
                   d.pop('wcscal'),
                   d.pop('speccal', None),
                   byLegacyFile=d.pop('file'),
                   use=d.pop('use', None),
                   _common=d)
示例#5
0
def buildtables(timeranges,
                config=None,
                ncpu=1,
                asynchronous=False,
                remake=False,
                **kwargs):
    timeranges = list(set(timeranges))

    b2h_configs = gen_configs(timeranges, config)

    builders = [HDFBuilder(c, force=remake, **kwargs) for c in b2h_configs]

    if ncpu == 1:
        for b in builders:
            try:
                b.run(**kwargs)
            except MemoryError:
                getLogger(__name__).error(
                    'Insufficient memory to process {}'.format(b.h5file))
        return timeranges

    pool = mp.Pool(min(ncpu, mp.cpu_count()))

    if asynchronous:
        getLogger(__name__).debug('Running async on {} builders'.format(
            len(builders)))
        async_res = pool.map_async(runbuilder, builders)
        pool.close()
        return timeranges, async_res
    else:
        pool.map(runbuilder, builders)
        pool.close()
        pool.join()
示例#6
0
 def fetch_spectra(self):
     '''
     called from get(), searches either a URL, ESO catalog or uses astroquery.SDSS to search the SDSS catalog. Puts
     the retrieved spectrum in a '/spectrum/' folder in self.save_dir
     :return:
     '''
     if self.std_path is not None:
         if self.std_path.endswith('.txt'):
             data = np.loadtxt(self.std_path)
         else:
             self.spectrum_file = fetch_spectra_URL(object_name=self.object_name, url_path=self.std_path,
                                                    save_dir=self.save_dir)
             data = np.loadtxt(self.spectrum_file)
         return data
     else:
         self.spectrum_file = fetch_spectra_ESO(object_name=self.object_name, save_dir=self.save_dir)
         if not self.spectrum_file:
             self.spectrum_file = fetch_spectra_SDSS(object_name=self.object_name, save_dir=self.save_dir,
                                                     coords=self.coords)
             try:
                 data = np.loadtxt(self.spectrum_file)
                 return data
             except ValueError:
                 getLogger(__name__).error(
                     'Could not find standard spectrum for this object, please find a spectrum and point to it in '
                     'the standard_path in your pipe.yml')
                 sys.exit()
         data = np.loadtxt(self.spectrum_file)
         # to convert to the appropriate units if ESO spectra
         data[:, 1] = data[:, 1] * 10**(-16)
         return data
示例#7
0
    def handle_existing(self):
        """ Handles existing h5 files, deleting them if appropriate"""
        if os.path.exists(self.cfg.h5file):

            if self.force:
                getLogger(__name__).info('Remaking {} forced'.format(
                    self.cfg.h5file))
                done = False
            else:
                try:
                    done = Photontable(
                        self.cfg.h5file).duration >= self.cfg.inttime
                    if not done:
                        getLogger(__name__).info(
                            ('{} does not contain full duration, '
                             'will remove and rebuild').format(
                                 self.cfg.h5file))
                except:
                    done = False
                    getLogger(__name__).info(
                        ('{} presumed corrupt,'
                         ' will remove and rebuild').format(self.cfg.h5file),
                        exc_info=True)
            if not done:
                try:
                    os.remove(self.cfg.h5file)
                    getLogger(__name__).info('Deleted {}'.format(
                        self.cfg.h5file))
                except FileNotFoundError:
                    pass
            else:
                getLogger(__name__).info(
                    'H5 {} already built. Remake not requested. Done.'.format(
                        self.cfg.h5file))
                self.done = True
示例#8
0
 def extend_spectrum(self, x, y):
     """
     BB Fit to extend standard spectrum to 1500 nm and to convolve it with a gaussian kernel corresponding to the
     energy resolution of the detector
     """
     r = np.median(np.nanmedian(self.cfg.r_list, axis=0))
     if np.round(x[-1]) < self.cfg.wvlStop:
         fraction = 1.0 / 3.0
         nirX = np.arange(int(x[int((1.0 - fraction) * len(x))]), self.cfg.wvlStop)
         T, nirY = fitBlackbody(x, y, fraction=fraction, newWvls=nirX)
         if np.any(x >= self.cfg.wvlStop):
             self.bb_wvls = x
             self.bb_flux = y
         else:
             self.bb_wvls = np.concatenate((x, nirX[nirX > max(x)]))
             self.bb_flux = np.concatenate((y, nirY[nirX > max(x)]))
         # Gaussian convolution to smooth std spectrum to MKIDs median resolution
         newX, newY = gaussianConvolution(self.bb_wvls, self.bb_flux, xEnMin=self.cfg.energyStop,
                                          xEnMax=self.cfg.energyStart, fluxUnits="lambda", r=r, plots=False)
     else:
         getLogger(__name__).info('Standard Spectrum spans whole energy range - no need to perform blackbody fit')
         # Gaussian convolution to smooth std spectrum to MKIDs median resolution
         std_stop = (c.h * c.c) / (self.std_wvls[0] * 10**(-10) * c.e)
         std_start = (c.h * c.c) / (self.std_wvls[-1] * 10 ** (-10) * c.e)
         newX, newY = gaussianConvolution(x, y, xEnMin=std_start, xEnMax=std_stop, fluxUnits="lambda", r=r,
                                          plots=False)
     return newX, newY
示例#9
0
    def write(self, file):
        dir = self.datadir
        if not glob(os.path.join(dir, '*.bin')):
            dir = os.path.join(
                self.datadir,
                datetime.utcfromtimestamp(self.starttime).strftime('%Y%m%d'))
        else:
            getLogger(__name__).debug(
                'bin files found in data directory. Will not append YYYMMDD')

        try:
            file.write(
                self._template.format(datadir=dir,
                                      starttime=self.starttime,
                                      inttime=self.inttime,
                                      beamfile=self.beamfile,
                                      outdir=self.outdir,
                                      x=self.x,
                                      y=self.y))
        except AttributeError:
            with open(file, 'w') as wavefile:
                wavefile.write(
                    self._template.format(datadir=dir,
                                          starttime=self.starttime,
                                          inttime=self.inttime,
                                          beamfile=self.beamfile,
                                          outdir=self.outdir,
                                          x=self.x,
                                          y=self.y))
示例#10
0
    def get_dark_frame(self):
        '''
        takes however many dark files that are specified in the pipe.yml and computes the counts/pixel/sec for the sum
        of all the dark obs. This creates a stitched together long dark obs from all of the smaller obs given. This
        is useful for legacy data where there may not be a specified dark observation but parts of observations where
        the filter wheel was closed.

        If self.use_wavecal is True then a dark is not subtracted off since this just  takes into account total counts
        and not energy information

        :return: expected dark counts for each pixel over a flat observation
        '''
        if not self.dark_h5_file_names:
            dark_frame = np.zeros_like(self.spectralCubes[0][:, :, 0])
        else:
            self.dark_start = [self.cfg.flatcal.dark_data['start_times']]
            self.dark_int = [self.cfg.flatcal.dark_data['int_times']]
            self.dark_h5_file_names = [
                os.path.join(self.h5_directory,
                             str(t) + '.h5') for t in self.dark_start
            ]
            frames = np.zeros((140, 146, len(self.dark_start)))
            getLogger(__name__).info('Loading dark frames for Laser flat')

            for i, file in enumerate(self.dark_h5_file_names):
                obs = Photontable(file)
                frame = obs.getPixelCountImage(
                    integrationTime=self.dark_int[i])['image']
                frames[:, :, i] = frame
            total_counts = np.sum(frames, axis=2)
            total_int_time = float(np.sum(self.dark_int))
            counts_per_sec = total_counts / total_int_time
            dark_frame = counts_per_sec
        return dark_frame
示例#11
0
 def allowed(self, key, value):
     if key + '._a' in self:
         getLogger(__name__).warning(
             str(key) +
             ' has restrictions no allowed values but checking has not yet been '
             'implemented')
     return True
示例#12
0
    def saveInferenceFile(self):
        metadatafile = self.inferenceFile.rpartition('.')[0] + '_metadata.txt'

        try:
            flNum = int(
                re.search('fl\d', self.inferenceFile,
                          re.IGNORECASE).group()[-1])
        except AttributeError:
            getLogger(__name__).warning(
                'Could not guess feedline from filename.')
            flNum = 0

        ws_good_inds = self.goodPeakIndices
        freqs = np.append(self.inferenceData.freqs[ws_good_inds],
                          self.inferenceData.freqs[ws_bad_inds])
        sort_inds = np.argsort(freqs)
        resIds = np.arange(freqs.size) + flNum * 10000

        flag = np.fill(freqs.size, sweepdata.ISBAD)
        flag[self.goodPeakIndices] = sweepdata.ISGOOD
        smd = sweepdata.SweepMetadata(resid=resIds,
                                      flag=flag[sort_inds],
                                      wsfreq=freqs[sort_inds],
                                      file=metadatafile)
        smd.save()
示例#13
0
 def read_from_file(self, filename):
     with open(filename, 'r') as f:
         old = ''
         for line in f:
             line = line.strip()
             if len(line) == 0 or line[0] == '#':
                 continue
             s = line.split('#')
             line = s[0]
             s = line.split('\\')
             if len(s) > 1:
                 old = string.join([old, s[0]])
                 continue
             else:
                 line = string.join([old, s[0]])
                 old = ''
             for i in xrange(len(line)):
                 if line[i] != ' ':
                     line = line[i:]
                     break
             exec(line)
             s = line.split('=')
             if len(s) != 2:
                 getLogger(__name__).warning(
                     "Error parsing line:\n\t'{}'".format(line))
                 continue
             key = s[0].strip()
             val = eval(s[1].strip())  # XXX:make safer
             self[key] = val
示例#14
0
    def __init__(self, yml):
        self.yml = yml
        self.meta = mkidcore.config.load(yml)
        names = [d.name for d in self.meta]
        if len(names) != len(set(names)):
            msg = 'Duplicate names not allowed in {}.'.format(yml)
            getLogger(__name__).critical(msg)
            raise ValueError(msg)

        wcdict = {w.name: w for w in self.wavecals}
        fcdict = {f.name: f for f in self.flatcals}
        wcsdict = {w.name: w for w in self.wcscals}
        scdict = {s.name: s for s in self.spectralcals}

        for o in self.all_observations:
            o.wavecal = wcdict.get(o.wavecal, o.wavecal)
            o.speccal = scdict.get(o.speccal, o.speccal)
            o.flatcal = fcdict.get(o.flatcal, o.flatcal)
            o.wcscal = wcsdict.get(o.wcscal, o.wcscal)

        for o in self.science_observations:
            o.flatcal = fcdict.get(o.flatcal, o.flatcal)
            o.wcscal = wcsdict.get(o.wcscal, o.wcscal)
            o.speccal = scdict.get(o.speccal, o.speccal)

        for fc in self.flatcals:
            try:
                fc.wavecal = wcdict.get(fc.wavecal, fc.wavecal)
            except AttributeError:
                pass

        for sc in self.spectralcals:
            for d in sc.data:
                try:
                    d.wavecal = wcdict.get(d.wavecal, d.wavecal)
                except AttributeError:
                    pass
                try:
                    d.flatcal = fcdict.get(d.flatcal, d.flatcal)
                except AttributeError:
                    pass
                try:
                    d.wcscal = wcsdict.get(d.wcscal, d.wcscal)
                except AttributeError:
                    pass

        for d in self.dithers:
            try:
                d.wavecal = wcdict.get(d.wavecal, d.wavecal)
            except AttributeError:
                pass
            try:
                d.flatcal = fcdict.get(d.flatcal, d.flatcal)
            except AttributeError:
                pass
            try:
                d.wcscal = wcsdict.get(d.wcscal, d.wcscal)
            except AttributeError:
                pass
示例#15
0
 def _load(self, filename):
     """
     Loads beammap data from filename
     """
     self.file = filename
     getLogger(__name__).debug('Reading {}'.format(self.file))
     self.resIDs, self.flags, self.xCoords, self.yCoords = np.loadtxt(
         filename, unpack=True)
示例#16
0
def runbuilder(b):
    getLogger(__name__).debug('Calling run on {}'.format(b.cfg.h5file))
    try:
        b.run()
    except Exception as e:
        getLogger(__name__).critical(
            'Caught exception during run of {}'.format(b.cfg.h5file),
            exc_info=True)
示例#17
0
 def loadData(self):
     '''
     placeholder function to maintain universality of the makeCalibration function
     '''
     getLogger(__name__).info(
         'No need to load wavecal solution data for a Laser Flat, passing through'
     )
     pass
示例#18
0
 def saveRoughBeammap(self):
     getLogger('beammap').info('Saving: '.format(self.outputBeammapFn))
     allResIDs = self.resIDsMap.flatten()
     flags = self.flagMap.flatten()
     x = self.x_loc.flatten()
     y = self.y_loc.flatten()
     args = np.argsort(allResIDs)
     data = np.asarray([allResIDs[args], flags[args], x[args], y[args]]).T
     np.savetxt(self.outputBeammapFn, data, fmt='%7d %3d %7f %7f')
示例#19
0
def satellite_spot_contrast(lam):
    """

    :param lam: wavelength in angstroms
    :return: contrast
    """
    getLogger(__name__).info('Using satellite spot contrast for a 25 nm astrogrid')
    ref = 1.55*10**4
    contrast = 2.72e-3*(ref / lam)**2 # 2.72e-3 number from Currie et. al. 2018b
    return contrast
示例#20
0
def create_spectra_directory(save_dir):
    """
    creates a spectrum directory in the save directory to put the spectra. If not called then the spectrum will
    just be saved in save_path
    """
    if os.path.exists(save_dir + '/spectra/'):
        getLogger(__name__).info('Spectrum directory already exists in {}, not going to make a new one'.format(save_dir))
    else:
        os.mkdir(save_dir + '/spectra/')
    return save_dir + '/spectra/'
示例#21
0
def _makeimage(xs, ys, img_shape, verbose=False):
    """See documentation in ParseBin.image() for best info"""
    tic = time.time()
    ret = np.zeros(tuple(img_shape))
    np.add.at(ret, (ys, xs), 1)
    toc = time.time()
    if verbose:
        getLogger('binparse').debug(
            "Time to make image is {:4.2f} seconds".format(toc - tic))
    return ret
示例#22
0
 def __getitem__(self, key):
     if key not in self:
         if self.ask:
             getLogger(__name__).info("Parameter '%s' not found" % key)
             val = ask_for(key)
             getLogger(__name__).info("Setting '%s' = %s" %
                                      (key, repr(val)))
             dict.__setitem__(self, key, val)
         else:
             return None
     return dict.__getitem__(self, key)
示例#23
0
def validate_metadata(md, warn=True, error=False):
    fail = False
    for k in REQUIRED_KEYS:
        if k not in md:
            if error:
                raise KeyError(msg)
            fail = True
            msg = '{} missing from {}'.format(k, md)
            if warn:
                getLogger(__name__).warning(msg)
    return fail
示例#24
0
 def save(self):
     """Save the time-stream data to the object's file path."""
     try:
         np.savez(self.file_path, self.phase)
     except IOError:
         path = self.file_path.rsplit('/', 1)
         if len(path) <= 1:
             raise
         getLogger(__name__).info('Making directory: ' + path[0])
         os.mkdir(path[0])
         np.savez(self.file_path, self.phase)
示例#25
0
def load_observing_metadata(files=tuple(), include_database=True):
    """Return a list of mkidcore.config.ConfigThings with the contents of the metadata from observing"""
    global config
    files = list(files)
    if config is not None and include_database:
        files += glob(os.path.join(config.paths.database, 'obslog*.json'))
    elif include_database:
        getLogger(__name__).warning('No pipleline database configured.')
    metadata = []
    for f in files:
        metadata += parse_obslog(f)
    return metadata
示例#26
0
 def save(self, save_name=None):
     """Save the solution to a file. The directory is given by the configuration."""
     if save_name is None:
         save_path = os.path.join(self.cfg.save_directory, self.name)
     else:
         save_path = os.path.join(self.cfg.save_directory, save_name)
     if not save_path.endswith('.npz'):
         save_path += '.npz'
     getLogger(__name__).info("Saving spectrophotometric response curve to {}".format(save_path))
     np.savez(save_path, curve=self.curve, wvl_bin_widths=self.wvl_bin_widths, wvl_bin_centers=self.wvl_bin_centers,
              cube=self.cube, errors=self.errors, configuration=self.cfg)
     self._file_path = save_path  # new file_path for the solution
示例#27
0
    def __init__(self, file):

        self.count_images = {}

        with open(file, 'rb') as f:
            self._r = pickle.load(f)

        try:
            with open(file + '.count_images', 'rb') as f:
                self.count_images = pickle.load(f)
        except IOError:
            getLogger(__name__).info(
                'No count image file found. Data will need to be regenerated.')
            pass

        settings = [
            dict(index=ndx,
                 chunkshape=cshape,
                 timesort=tsort,
                 shuffle=shuffle,
                 bitshuffle=bshuffle,
                 ndx_bitshuffle=ndx_bshuffle,
                 ndx_shuffle=ndx_shuffle)
            for ndx in [(k, l) for k in ('full', 'medium', 'ultralight')
                        for l in (3, 6, 9)] for tsort in (False, True)
            for cshape in (20, 100, 180, 260, 340, 500, 1000, 5000, 10000,
                           None) for ndx_shuffle in (True, False)
            for ndx_bshuffle in (True, False) for shuffle in (True, False)
            for bshuffle in (True, False)
        ]

        for k, v in self._r.items():
            for s in settings:
                if setting_id(s) in k:
                    v['kwargs'] = repr(s)
                    break

        for k, v in self._r.items():
            if 'kwargs' not in v:
                print(v['table_nfo'], k)
                raise ValueError

        for k in self._r:
            matches = re.search(r"chunkshape := \((\d+),\)",
                                self._r[k]['table_nfo'])
            self._r[k]['chunkshape'] = int(matches.group(1))

        if not all([d in self.count_images for d in self.datasets]):
            print('Will determine count images, may take some time.')
            self.determine_mean_photperRID()
            with open(file + '.count_images', 'wb') as f:
                pickle.dump(self.count_images, f)
示例#28
0
    def run(self, save=True, plot=None):
        """
        Compute the spectrophotometric calibration for the data specified in the configuration
        object. This method runs load_relative_spectrum() and load_sky_spectrum() or load_absolute_sepctrum(),
        load_standard_spectrum(), and calculate_specweights() sequentially.

        Args:
            save: a boolean specifying if the result will be saved.
            plot: a boolean specifying if a summary plot for the computation will be
                  saved.
        """

        try:
            getLogger(__name__).info("Loading Spectrum from MEC")
            self.load_absolute_spectrum()
            getLogger(__name__).info("Loading Standard Spectrum")
            self.load_standard_spectrum()
            getLogger(__name__).info("Calculating Spectrophotometric Response Curve")
            self.calculate_response_curve()
            self.solution = ResponseCurve(configuration=self.cfg, curve=self.curve, wvl_bin_widths=self.wvl_bin_widths,
                                          wvl_bin_centers=self.wvl_bin_centers, cube=self.cube,
                                          solution_name=self.solution_name, errors=self.errors)
            if save:
                self.solution.save(save_name=self.solution_name if isinstance(self.solution_name, str) else None)
            if plot or (plot is None and self.cfg.summary_plot):
                save_name = self.solution_name.rpartition(".")[0] + ".pdf"
                self.plot_summary(save_name=save_name)
        except KeyboardInterrupt:
            getLogger(__name__).info("Keyboard shutdown requested ... exiting")
示例#29
0
def guessFeedline(filename):
    # TODO generaize and find a home for this function
    try:
        flNum = int(re.search('fl\d', filename, re.IGNORECASE).group()[-1])
    except AttributeError:
        try:
            ip = int(os.path.splitext(filename)[0][-3:])
            flNum = int(MEC_NUM_FL_MAP[ip][:-1])
        except (KeyError, ValueError, IndexError):
            getLogger(__name__).warning('Could not guess feedline from filename {}.')
            raise ValueError('Unable to guess feedline')

    getLogger(__name__).debug('Guessing FL{} for filename {}'.format(flNum, os.path.basename(filename)))
    return flNum
示例#30
0
def fetch_spectra_SDSS(object_name, save_dir, coords):
    """
    saves a textfile in self.save_dir where the first column is the wavelength in angstroms and the second
    column is flux in erg cm-2 s-1 AA-1
    :return: the path to the saved spectrum file
    """
    if os.path.exists(save_dir + object_name + 'spectrum.dat'):
        getLogger(__name__).info('Spectrum already loaded, will not be reloaded')
        spectrum_file = save_dir + object_name + 'spectrum.dat'
        return spectrum_file
    getLogger(__name__).info('Looking for {} spectrum in SDSS catalog'.format(object_name))
    result = SDSS.query_region(coords, spectro=True)
    if not result:
        getLogger(__name__).warning('Could not find spectrum for {} at {},{} in SDSS catalog'.format(object_name, coords.ra, coords.dec))
        spectrum_file = None
        return spectrum_file
    spec = SDSS.get_spectra(matches=result)
    data = spec[0][1].data
    lamb = 10**data['loglam'] * u.AA
    flux = data['flux'] * 10 ** -17 * u.Unit('erg cm-2 s-1 AA-1')
    spectrum = Spectrum1D(spectral_axis=lamb, flux=flux)
    res = np.array([spectrum.spectral_axis, spectrum.flux])
    res = res.T
    spectrum_file = save_dir + object_name + 'spectrum.dat'
    np.savetxt(spectrum_file, res, fmt='%1.4e')
    getLogger(__name__).info('Spectrum loaded for {} from SDSS catalog'.format(object_name))
    return spectrum_file