コード例 #1
0
ファイル: images.py プロジェクト: barentsen/iphas-dr2
def prepare_images(clusterview):
    # Make sure the output directory exists
    util.setup_dir(constants.PATH_IMAGES)
    metadata = []
    for band in ['halpha', 'r', 'i']:
        log.info('Starting with band {0}'.format(band))
        # Retrieve the list of runs
        if band == 'halpha':
            idx_band = 'ha'
        else:
            idx_band = band
        # [constants.IPHASQC_COND_RELEASE]
        runs = constants.IPHASQC['run_'+idx_band]
        # Prepare each run
        result = clusterview.map(prepare_one, runs, block=True)
        metadata.extend(result)

    # Write the metadata to a table
    mycolumns = (str('filename'), str('run'), str('ccd'),
                 str('in_dr2'),
                 str('ra'), str('dec'),
                 str('ra_min'), str('ra_max'),
                 str('dec_min'), str('dec_max'),
                 str('band'),
                 str('utstart'), str('exptime'),
                 str('seeing'), str('elliptic'),
                 str('skylevel'), str('skynoise'),
                 str('airmass'), str('photzp'),
                 str('confmap'))
    rows = list(itertools.chain.from_iterable(metadata))  # flatten list
    t = table.Table(rows, names=mycolumns)
    table_filename = os.path.join(constants.PATH_IMAGES, 'iphas-images.fits')
    t.write(table_filename, format='fits', overwrite=True)
コード例 #2
0
ファイル: core.py プロジェクト: AlexaVillaume/astroquery
 def _login(self, username=None, store_password=False):
     if username is None:
         if self.USERNAME == "":
             raise LoginError("If you do not pass a username to login(), you should configure a default one!")
         else:
             username = self.USERNAME
     # Get password from keyring or prompt
     password_from_keyring = keyring.get_password("astroquery:www.eso.org", username)
     if password_from_keyring is None:
         if system_tools.in_ipynb():
             log.warn("You may be using an ipython notebook:"
                      " the password form will appear in your terminal.")
         password = getpass.getpass("{0}, enter your ESO password:\n".format(username))
     else:
         password = password_from_keyring
     # Authenticate
     log.info("Authenticating {0} on www.eso.org...".format(username))
     # Do not cache pieces of the login process
     login_response = self._request("GET", "https://www.eso.org/sso/login", cache=False)
     login_result_response = self._activate_form(login_response,
                                                 form_index=-1,
                                                 inputs={'username': username,
                                                         'password': password})
     root = BeautifulSoup(login_result_response.content, 'html5lib')
     authenticated = not root.select('.error')
     if authenticated:
         log.info("Authentication successful!")
     else:
         log.exception("Authentication failed!")
     # When authenticated, save password in keyring if needed
     if authenticated and password_from_keyring is None and store_password:
         keyring.set_password("astroquery:www.eso.org", username, password)
     return authenticated
コード例 #3
0
    def plot_area_vs_energy(self, filename=None, show_save_energy=True):
        """
        Plot effective area vs. energy.
        """
        import matplotlib.pyplot as plt

        energy_hi = self.energy_hi.value
        effective_area = self.effective_area.value
        plt.plot(energy_hi, effective_area)
        if show_save_energy:
            plt.vlines(self.energy_thresh_hi.value, 1E3, 1E7, 'k', linestyles='--')
            plt.text(self.energy_thresh_hi.value - 1, 3E6,
                     'Safe energy threshold: {0:3.2f}'.format(
                         self.energy_thresh_hi),
                     ha='right')
            plt.vlines(self.energy_thresh_lo.value, 1E3, 1E7, 'k', linestyles='--')
            plt.text(self.energy_thresh_lo.value + 0.1, 3E3,
                     'Safe energy threshold: {0:3.2f}'.format(self.energy_thresh_lo))
        plt.xlim(0.1, 100)
        plt.ylim(1E3, 1E7)
        plt.loglog()
        plt.xlabel('Energy [TeV]')
        plt.ylabel('Effective Area [m^2]')
        if filename is not None:
            plt.savefig(filename)
            log.info('Wrote {0}'.format(filename))
コード例 #4
0
ファイル: calibrators.py プロジェクト: ctrichard/ctapipe
def calibration_parameters(excess_args, origin, calib_help=False):
    """
    Obtain the calibration parameters.

    Parameters
    ----------
    excess_args : list
        List of arguments left over after intial parsing.
    origin : str
        Origin of data file e.g. hessio.
    calib_help : bool
        Print help message for calibration arguments.

    Return
    ------
    params : dict
        Calibration parameter dict.
    unknown_args : list
        List of leftover cmdline arguments after parsing for calibration
        arguments.
    """

    parser, ns = calibration_parser(origin)

    if calib_help:
        parser.print_help()
        parser.exit()

    args, unknown_args = parser.parse_known_args(excess_args, ns)

    params = vars(args)
    for key, value in params.items():
        log.info("[{}] {}".format(key, value))

    return params, unknown_args
コード例 #5
0
ファイル: core.py プロジェクト: martindurant/astroquery
    def retrieve_data_from_uid(self, uids, cache=True):
        """
        Stage & Download ALMA data.  Will print out the expected file size
        before attempting the download.

        Parameters
        ----------
        uids : list or str
            A list of valid UIDs or a single UID.
            UIDs should have the form: 'uid://A002/X391d0b/X7b'
        cache : bool
            Whether to cache the downloads.

        Returns
        -------
        downloaded_files : list
            A list of the downloaded file paths
        """
        if isinstance(uids, six.string_types):
            uids = [uids]
        if not isinstance(uids, (list, tuple, np.ndarray)):
            raise TypeError("Datasets must be given as a list of strings.")

        files = self.stage_data(uids)
        file_urls = files['URL']
        totalsize = files['size'].sum() * files['size'].unit

        # each_size, totalsize = self.data_size(files)
        log.info("Downloading files of size {0}...".format(totalsize.to(u.GB)))
        # TODO: Add cache=cache keyword here.  Currently would have no effect.
        downloaded_files = self.download_files(file_urls)
        return downloaded_files
コード例 #6
0
    def use_output_config(self, filename):
        '''
        Use output configuration from an existing output or input file

        Parameters
        ----------
        filename : str
            The file to read the parameters from. This can be either the input
            or output file from a radiation transfer run.
        '''

        logger.info("Retrieving output configuration from %s" % filename)

        # Open existing file
        f = h5py.File(filename, 'r')

        # Get a pointer to the group with the sources
        if 'Output' in f:
            g_output = f['/Output/']
        else:
            g_output = f['/Input/Output/']

        # Read in output configuration
        self.conf.output.read(g_output)

        # Close the file
        f.close()
コード例 #7
0
ファイル: counts_spectrum.py プロジェクト: JouvinLea/gammapy
    def plot(self, ax=None, filename=None, **kwargs):
        """
        Plot counts vector

        kwargs are forwarded to matplotlib.pyplot.hist

        Parameters
        ----------
        ax : `~matplotlib.axis` (optional)
            Axis instance to be used for the plot
        filename : str (optional)
            File to save the plot to

        Returns
        -------
        ax: `~matplotlib.axis`
            Axis instance used for the plot
        """
        import matplotlib.pyplot as plt

        ax = plt.gca() if ax is None else ax

        plt.hist(self.energy.value, len(self.energy.value),
                 weights=self.counts, **kwargs)
        plt.xlabel('Energy [{0}]'.format(self.energy.unit))
        plt.ylabel('Counts')
        if filename is not None:
            plt.savefig(filename)
            log.info('Wrote {0}'.format(filename))

        return ax
コード例 #8
0
    def use_run_config(self, filename):
        '''
        Use runtime configuration from an existing output or input file

        Parameters
        ----------
        filename : str
            The file to read the parameters from. This can be either the input
            or output file from a radiation transfer run.
        '''

        # need to do this here because n_photons will depend on monochromatic vs not
        self.use_monochromatic_config(filename)

        logger.info("Retrieving runtime configuration from %s" % filename)

        # Open existing file
        f = h5py.File(filename, 'r')

        # Get a pointer to the group with the sources
        if 'Input' in f:
            g_par = f['/Input/']
        else:
            g_par = f

        # Read in runtime configuration
        self.read_run_conf(g_par)

        # Close the file
        f.close()
コード例 #9
0
    def use_image_config(self, filename):
        '''
        Use image configuration from an existing output or input file

        Parameters
        ----------
        filename : str
            The file to read the parameters from. This can be either the input
            or output file from a radiation transfer run.
        '''

        # need to do this here because image wavelength interval will depend on monochromatic vs not
        self.use_monochromatic_config(filename)

        logger.info("Retrieving image configuration from %s" % filename)

        # Open existing file
        f = h5py.File(filename, 'r')

        # Get a pointer to the group with the sources
        if 'Output' in f:
            g_image = f['/Output/']
        else:
            g_image = f['/Input/Output/']

        # Read in binned images
        if 'n_theta' in g_image['Binned']:
            self.binned_output = BinnedImageConf.read(g_image['Binned'])

        # Read in peeled images
        for peeled in g_image['Peeled']:
            self.peeled_output.append(PeeledImageConf.read(g_image['Peeled'][peeled]))

        # Close the file
        f.close()
コード例 #10
0
    def use_sources(self, filename):
        '''
        Use sources from an existing output file

        Parameters
        ----------
        filename : str
            The file to read the sources from. This should be the input or
            output file of a radiation transfer run.
        '''

        logger.info("Retrieving sources from %s" % filename)

        # Open existing file
        f = h5py.File(filename, 'r')

        # Get a pointer to the group with the sources
        if 'Sources' in f:
            g_sources = f['/Sources/']
        else:
            g_sources = f['/Input/Sources/']

        # Loop over sources
        for source in g_sources:
            self.add_source(read_source(g_sources[source]))

        # Close the file
        f.close()
コード例 #11
0
    def use_monochromatic_config(self, filename):
        '''
        Use monochromatic configuration from an existing output file.

        Parameters
        ----------
        filename : str
            The file to read the configuration from. This should be the input or
            output file of a radiation transfer run.
        '''

        logger.info("Retrieving monochromatic configuration from %s" % filename)

        # Open existing file
        f = h5py.File(filename, 'r')

        # Get a pointer to the group with the sources
        if 'Input' in f:
            g = f['/Input']
        else:
            g = f

        # Read in monochromatic configuration
        self._read_monochromatic(g)

        # Close the file
        f.close()
コード例 #12
0
ファイル: ytcube.py プロジェクト: bsipocz/spectral-cube
def _make_movie(moviepath, prefix="", filename='out.mp4', overwrite=True):
    """
    Use ffmpeg to generate a movie from the image series
    """

    outpath = os.path.join(moviepath, filename)

    if os.path.exists(outpath) and overwrite:
        command = ['ffmpeg', '-y', '-r','5','-i',
                   os.path.join(moviepath,prefix+'%04d.png'),
                   '-r','30','-pix_fmt', 'yuv420p',
                   outpath]
    elif os.path.exists(outpath):
        log.info("File {0} exists - skipping".format(outpath))
    else:
        command = ['ffmpeg', '-r', '5', '-i',
                   os.path.join(moviepath,prefix+'%04d.png'),
                   '-r','30','-pix_fmt', 'yuv420p',
                   outpath]

    pipe = subprocess.Popen(command, stdout=subprocess.PIPE, close_fds=True)

    pipe.wait()

    return pipe
コード例 #13
0
def run_notebooks(selected_nb_re=None):
    """ Run the tutorial notebooks. """
    from runipy.notebook_runner import NotebookRunner

    _orig_path = os.getcwd()

    # walk through each directory in tutorials/ to find all .ipynb file
    for tutorial_filename,nb in walk_through_tutorials(only_published=True,
                                selected_nb_re=selected_nb_re):
        path,filename = os.path.split(tutorial_filename)

        if filename.startswith("_run_"):
            continue

        logger.info("Running tutorial: {}".format(filename))

        # notebook file
        output_filename = os.path.join(path,"_run_{}"
                                       .format(filename))

        # prepend _run_ to the notebook names to create new files
        #   so the user isn't left with a bunch of modified files.
        os.chdir(path)
        r = NotebookRunner(nb, mpl_inline=True)
        r.run_notebook(skip_exceptions=True)
        write(r.nb, open(output_filename, 'w'), 'json')

    os.chdir(_orig_path)
コード例 #14
0
    def __init__(self, cache_path, overwrite=False, **kwargs):

        # validate cache path
        self.cache_path = os.path.abspath(cache_path)
        if not os.path.exists(self.cache_path):
            os.mkdir(self.cache_path)

        # create empty config namespace
        ns = ConfigNamespace()

        for k,v in self.config_defaults.items():
            if k not in kwargs:
                setattr(ns, k, v)
            else:
                setattr(ns, k, kwargs[k])

        self.config = ns

        self.cache_file = os.path.join(self.cache_path, self.config.cache_filename)
        if os.path.exists(self.cache_file) and overwrite:
            os.remove(self.cache_file)

        # load initial conditions
        w0_path = os.path.join(self.cache_path, self.config.w0_filename)
        if not os.path.exists(w0_path):
            raise IOError("Initial conditions file '{0}' doesn't exist! You need"
                          "to generate this file first using make_grid.py".format(w0_path))
        self.w0 = np.load(w0_path)
        self.norbits = len(self.w0)
        logger.info("Number of orbits: {0}".format(self.norbits))
コード例 #15
0
ファイル: convert.py プロジェクト: Cadair/astropy-tutorials
    def __init__(self, nb_path, output_path=None, template_file=None,
                 overwrite=False, kernel_name=None):
        self.nb_path = path.abspath(nb_path)
        fn = path.basename(self.nb_path)
        self.path_only = path.dirname(self.nb_path)
        self.nb_name, _ = path.splitext(fn)

        if output_path is not None:
            self.output_path = output_path
            makedirs(self.output_path, exist_ok=True)
        else:
            self.output_path = self.path_only

        if template_file is not None:
            self.template_file = path.abspath(template_file)
        else:
            self.template_file = None

        self.overwrite = overwrite

        # the executed notebook
        self._executed_nb_path = path.join(self.output_path,
                                           'exec_{0}'.format(fn))

        logger.info('Processing notebook {0} (in {1})'.format(fn,
                                                              self.path_only))

        # the RST file
        self._rst_path = path.join(self.output_path,
                                   '{0}.rst'.format(self.nb_name))

        self._execute_kwargs = dict(timeout=900)
        if kernel_name:
            self._execute_kwargs['kernel_name'] = kernel_name
コード例 #16
0
ファイル: concatenating.py プロジェクト: YUnruh/iphas-dr2
    def __init__(self, strip, part='a', mode='full'):
        assert(part in ['a', 'b'])
        assert(mode in ['light', 'full'])

        self.strip = strip
        self.part = part
        self.mode = mode
        
        # Where are the input catalogues?
        self.datapath = os.path.join(constants.DESTINATION, 'seamed')

        # Where to write the output?
        self.destination = os.path.join(constants.DESTINATION,
                                        'concatenated')
        
        # Setup the destination directory
        if mode == 'light':
            self.destination = os.path.join(self.destination, 'light')
        else:
            self.destination = os.path.join(self.destination, 'full')
        util.setup_dir(self.destination)
        util.setup_dir(self.destination+'-compressed')

        log.info('Reading data from {0}'.format(self.datapath))

        # Limits
        self.lon1 = strip
        self.lon2 = strip + constants.STRIPWIDTH
        self.fieldlist = self.get_fieldlist()
コード例 #17
0
    def _run_wrapper(self, index):
        logger.info("Orbit {0}".format(index))

        # unpack input argument dictionary
        import gary.potential as gp
        potential = gp.load(os.path.join(self.cache_path, self.config.potential_filename))

        # read out just this initial condition
        norbits = len(self.w0)
        allfreqs = np.memmap(self.cache_file, mode='r',
                             shape=(norbits,), dtype=self.cache_dtype)

        # short-circuit if this orbit is already done
        if allfreqs['success'][index]:
            logger.debug("Orbit {0} already successfully completed.".format(index))
            return None

        # Only pass in things specified in _run_kwargs (w0 and potential required)
        kwargs = dict([(k,self.config[k]) for k in self.config.keys() if k in self._run_kwargs])
        res = self.run(w0=self.w0[index], potential=potential, **kwargs)
        res['index'] = index

        # cache res into a tempfile, return name of tempfile
        tmpfile = os.path.join(self._tmpdir, "{0}-{1}.pickle".format(self.__class__.__name__, index))
        with open(tmpfile, 'w') as f:
            pickle.dump(res, f)
        return tmpfile
コード例 #18
0
ファイル: dendro_mask.py プロジェクト: bsipocz/APEX_CMZ_H2CO
def make_dend(cube, noise, view=True, write=True,
              min_npix=100,
              min_nsig_value=3,
              min_nsig_delta=2,
              outfn="DendroMask_H2CO303202.hdf5"):
    """
    Given a cube and a 2D noise map, extract dendrograms.
    """

    # Use a little sigma-rejection to get a decently robust noise estimate
    noise_std = noise[noise==noise].std()
    noise_mean = noise[noise==noise].mean()
    err_estimate = noise[(noise > (noise_mean-noise_std)) &
                         (noise < (noise_mean+noise_std))].mean()
    bad_noise = np.isnan(noise)
    log.info("{1} Estimated error: {0}".format(err_estimate, outfn))

    dend = Dendrogram.compute(cube.filled_data[:].value,
                              min_value=min_nsig_value*err_estimate,
                              min_delta=min_nsig_delta*err_estimate,
                              min_npix=min_npix,
                              verbose=True, wcs=cube.wcs)

    if view:
        dend.viewer()

    if write:
        dend.save_to(hpath(outfn))

    return dend
コード例 #19
0
ファイル: concatenating.py プロジェクト: YUnruh/iphas-dr2
def merge_light_catalogue():
    """Merge the light tiled catalogues into one big file."""
    output_filename = os.path.join(constants.DESTINATION,
                                   'concatenated',
                                   'iphas-dr2-light.fits')

    instring = ''
    for lon in np.arange(25, 215+1, constants.STRIPWIDTH):
        for part in ['a', 'b']:
            path = os.path.join(constants.DESTINATION,
                                'concatenated',
                                'light',
                                'iphas-dr2-{0:03d}{1}-light.fits'.format(
                                                                    lon, part))
            instring += 'in={0} '.format(path)

    # Warning: a bug in stilts causes long fieldIDs to be truncated if -utype S15 is not set
    param = {'stilts': constants.STILTS,
             'in': instring,
             'out': output_filename}

    cmd = '{stilts} tcat {in} countrows=true lazy=true ofmt=colfits-basic out={out}'
    mycmd = cmd.format(**param)
    log.debug(mycmd)
    status = os.system(mycmd)
    log.info('concat: '+str(status))

    return status
コード例 #20
0
    def compute_TDBs(self):
        """Compute and add TDB and TDB long double columns to the TOA table.

        This routine creates new columns 'tdb' and 'tdbld' in a TOA table
        for TDB times, using the Observatory locations and IERS A Earth
        rotation corrections for UT1.
        """
        if 'tdb' in self.table.colnames:
            log.info('tdb column already exists. Deleting...')
            self.table.remove_column('tdb')
        if 'tdbld' in self.table.colnames:
            log.info('tdbld column already exists. Deleting...')
            self.table.remove_column('tdbld')

        # Compute in observatory groups
        tdbs = numpy.zeros_like(self.table['mjd'])
        for ii, key in enumerate(self.table.groups.keys):
            grp = self.table.groups[ii]
            obs = self.table.groups.keys[ii]['obs']
            loind, hiind = self.table.groups.indices[ii:ii+2]
            grpmjds = time.Time(grp['mjd'], location=grp['mjd'][0].location)
            grptdbs = grpmjds.tdb
            tdbs[loind:hiind] = numpy.asarray([t for t in grptdbs])

        # Now add the new columns to the table
        col_tdb = table.Column(name='tdb', data=tdbs)
        col_tdbld = table.Column(name='tdbld',
                data=[utils.time_to_longdouble(t) for t in tdbs])
        self.table.add_columns([col_tdb, col_tdbld])
コード例 #21
0
def analyze_muon_source(source, params=None, geom_dict=None, args=None):
    """
    Generator for analyzing all the muon events

    Parameters
    ----------
    source : generator
    A 'ctapipe' event generator as
    'ctapipe.io.hessio_event_source

    Returns
    -------
    analyzed_muon : container
    A ctapipe event container (MuonParameter) with muon information

    """
    log.info("[FUNCTION] {}".format(__name__))

    if geom_dict is None:
        geom_dict={}
        
    numev = 0
    for event in source:#Put a limit on number of events
        numev += 1
        analyzed_muon = analyze_muon_event(event, params, geom_dict)
        print("Analysed event number",numev)
        #   if analyzed_muon[1] is not None:
        #           plot_muon_event(event, analyzed_muon, geom_dict, args)
            
        #  if numev > 50: #for testing purposes only
        #          break

        yield analyzed_muon
コード例 #22
0
ファイル: toa.py プロジェクト: yanwang2012/PINT
    def compute_posvels(self, ephem="DE405", planets=False):
        """Compute positions and velocities of observatory and Earth.

        Compute the positions and velocities of the observatory (wrt
        the Geocenter) and the center of the Earth (referenced to the
        SSB) for each TOA.  The JPL solar system ephemeris can be set
        using the 'ephem' parameter.  The positions and velocities are
        set with PosVel class instances which have astropy units.
        """
        # Load the appropriate JPL ephemeris
        load_kernels()
        pth = os.path.join(pintdir, "datafiles")
        ephem_file = os.path.join(pth, "%s.bsp"%ephem.lower())
        spice.furnsh(ephem_file)
        log.info("Loaded ephemeris from %s" % ephem_file)
        j2000 = time.Time('2000-01-01 12:00:00', scale='utc')
        j2000_mjd = utils.time_to_mjd_mpf(j2000)
        for toa in self.toas:
            xyz = observatories[toa.obs].xyz
            toa.obs_pvs = erfautils.topo_posvels(xyz, toa)
            # SPICE expects ephemeris time to be in sec past J2000 TDB
            # We need to figure out how to get the correct time...
            et = (toa.mjd.TDB - j2000_mjd) * SECS_PER_DAY

            # SSB to observatory position/velocity:
            toa.earth_pvs = objPosVel("EARTH", "SSB", et)
            toa.pvs = toa.obs_pvs + toa.earth_pvs

            # Obs to Sun PV:
            toa.obs_sun_pvs = objPosVel("SUN", "EARTH", et) - toa.obs_pvs
            if planets:
                for p in ('jupiter', 'saturn', 'venus', 'uranus'):
                    pv = objPosVel(p.upper()+" BARYCENTER",
                            "EARTH", et) - toa.obs_pvs
                    setattr(toa, 'obs_'+p+'_pvs', pv)
コード例 #23
0
ファイル: core.py プロジェクト: astropy/astroquery
    def download_files(self, files, savedir=None, cache=True, continuation=True):
        """
        Given a list of file URLs, download them

        Note: Given a list with repeated URLs, each will only be downloaded
        once, so the return may have a different length than the input list
        """
        downloaded_files = []
        if savedir is None:
            savedir = self.cache_location
        for fileLink in unique(files):
            try:
                filename = self._request("GET", fileLink, save=True,
                                         savedir=savedir,
                                         timeout=self.TIMEOUT, cache=cache,
                                         continuation=continuation)
                downloaded_files.append(filename)
            except requests.HTTPError as ex:
                if ex.response.status_code == 401:
                    log.info("Access denied to {url}.  Skipping to"
                             " next file".format(url=fileLink))
                    continue
                else:
                    raise ex
        return downloaded_files
コード例 #24
0
def analyze_muon_source(source):
    """
    Generator for analyzing all the muon events

    Parameters
    ----------
    source : ctapipe.io.EventSource
        input event source

    Returns
    -------
    analyzed_muon : container
    A ctapipe event container (MuonParameter) with muon information

    """
    log.info("[FUNCTION] {}".format(__name__))

    if geom_dict is None:
        geom_dict = {}
    numev = 0
    for event in source:  # Put a limit on number of events
        numev += 1
        analyzed_muon = analyze_muon_event(event)

        yield analyzed_muon
コード例 #25
0
ファイル: metrec.py プロジェクト: barentsen/meteor-flux
def ingest_zip(path, mydb, remove_old=True):
    """Adds a single metrec flux zip file to the database.

    Parameters
    ----------
    path : str
        Location of the data.

    mydb : FluxDB object
        Database in which to ingest.

    remove_old : bool
        If true, search and delete any previous version of a file with
        the same filename (i.e. dataset_id). This slows things down!

    Returns
    -------
    MetRecData object that was ingested.
    """
    log.info("Ingesting %s" % path)
    myzip = MetRecData(path)
    # Make sure any previous version of this dataset is removed
    if remove_old:
        mydb.remove_dataset(myzip.dataset_id)
    mydb.ingest_json(myzip.get_json())
    return myzip
コード例 #26
0
 def imsave(self, out_fn=None):
     if out_fn is None:
         out_fn = 'vphas-{0}-{1}.jpg'.format(self.offset, self.ccd)
     log.info('Writing {0}'.format(out_fn))
     #mimg.imsave(out_fn, np.rot90(self.as_array()), origin='lower')
     img = np.rot90(self.as_array())
     imageio.imsave(out_fn, img, quality=90, optimize=True)
コード例 #27
0
ファイル: voronoi_grid.py プロジェクト: hyperion-rt/hyperion
    def _recompute_voronoi(self, force=False):

        if (self._voronoi_table is None
            or self._voronoi_table.meta['geometry'].decode('utf-8') != self.get_geometry_id()
                or self._samples_params != (self._n_samples, self._min_cell_samples)):

            from .voronoi_helpers import voronoi_grid

            logger.info("Updating Voronoi Tesselation")

            # Compute the Voronoi tesselation
            points = np.array([self._x, self._y, self._z]).transpose()
            mesh = voronoi_grid(points,
                                np.array([[self.xmin, self.xmax],
                                          [self.ymin, self.ymax],
                                          [self.zmin, self.zmax]],
                                         ),
                                n_samples=self._n_samples or 0,
                                min_cell_samples=self._min_cell_samples or 0,
                                verbose=self._verbose,
                                seed=self._seed)

            # Store the neighbours information in sparse format.
            self._sparse_neighbors = mesh.st
            self._voronoi_table = mesh.neighbours_table
            self._voronoi_table.meta['geometry'] = np.string_(self.get_geometry_id().encode('utf-8'))

            if self._n_samples is not None:
                self._samples = mesh.samples
                self._samples_idx = mesh.samples_idx
                self._samples_params = (self._n_samples, self._min_cell_samples)
コード例 #28
0
ファイル: counts_spectrum.py プロジェクト: drex44/gammapy
    def plot(self, ax=None, filename=None, weight=1, **kwargs):
        """
        Plot counts vector

        kwargs are forwarded to matplotlib.pyplot.hist

        Parameters
        ----------
        ax : `~matplotlib.axis` (optional)
            Axis instance to be used for the plot
        filename : str (optional)
            File to save the plot to
        weight : float
            Weighting factor for the counts

        Returns
        -------
        ax: `~matplotlib.axis`
            Axis instance used for the plot
        """
        import matplotlib.pyplot as plt

        ax = plt.gca() if ax is None else ax
        w = self.counts * weight
        plt.hist(self.energy_bounds.log_centers, bins=self.energy_bounds, weights=w, **kwargs)
        plt.xlabel("Energy [{0}]".format(self.energy_bounds.unit))
        plt.ylabel("Counts")
        if filename is not None:
            plt.savefig(filename)
            log.info("Wrote {0}".format(filename))

        return ax
コード例 #29
0
ファイル: graphtab.py プロジェクト: jhunkeler/reftools
 def select_edge(self, kwdset, partial=False, count=0):
     # based on algorithm by Alex Martelli
     # kwdset = set of the comma-separated keywords
     # in an obsmode. eg
     # "acs,hrc,f555w" -> set(["acs","hrc","f555w"])
     match = self.edgeset & kwdset
     if len(match) > 1:
         log.info("Found match of {0}".format(match))
         raise ValueError("Ambiguous...Too many edges match. Check for problems with graph table.")
     elif len(match) == 1:
         ans = self.edges[match.pop()]
     else:
         # pick up the default if there is one
         if 'default' in self.edges:
             if not partial or (partial and count < len(kwdset)):
                 #consider 'default' -> None
                 ans = self.edges['default']
             else:
                 # define Edge object to serve as sentinal to mark the end of this path
                 ans = Edge('default',[None,None,None,None],None)
         else:
             # An example of this case would be kwdset=['acs'] yet
             # the only edges for continuing are ['wfc2','sbc','wfc1','hrc']
             #raise KeyError("No match, bla bla")
             log.info("No match... Multiple edges but no default.")
             # define Edge object to serve as sentinal to mark the end of this path
             ans = Edge('default',[None,None,None,None],None)
     return ans
コード例 #30
0
    def solar_system_shapiro_delay(self, toas):
        """
        Returns total shapiro delay to due solar system objects.
        If the PLANET_SHAPIRO model param is set to True then
        planets are included, otherwise only the value for the
        Sun is calculated.

        Requires Astrometry or similar model that provides the
        ssb_to_psb_xyz method for direction to pulsar.

        If planets are to be included, TOAs.compute_posvels() must
        have been called with the planets=True argument.
        """
        # Start out with 0 delay with units of seconds
        delay = numpy.zeros(len(toas))
        for ii, key in enumerate(toas.groups.keys):
            grp = toas.groups[ii]
            obs = toas.groups.keys[ii]['obs']
            loind, hiind = toas.groups.indices[ii:ii+2]
            if key['obs'].lower() == 'barycenter':
                log.info("Skipping Shapiro delay for Barycentric TOAs")
                continue
            psr_dir = self.ssb_to_psb_xyz(epoch=grp['tdbld'].astype(numpy.float64))
            delay[loind:hiind] += self.ss_obj_shapiro_delay(grp['obs_sun_pos'],
                                    psr_dir, self._ss_mass_sec['sun'])
            if self.PLANET_SHAPIRO.value:
                for pl in ('jupiter', 'saturn', 'venus', 'uranus'):
                    delay[loind:hiind] += self.ss_obj_shapiro_delay(grp['obs_'+pl+'_pos'],
                                                   psr_dir, self._ss_mass_sec[pl])
        return delay
コード例 #31
0
ファイル: utils.py プロジェクト: zefrawg/astroquery
def make_finder_chart_from_image_and_catalog(
    image,
    catalog,
    save_prefix,
    alma_kwargs={
        'public': False,
        'science': False
    },
    bands=(3, 4, 5, 6, 7, 8, 9),
    private_band_colors=('maroon', 'red', 'orange', 'coral', 'brown', 'yellow',
                         'mediumorchid'),
    public_band_colors=('blue', 'cyan', 'green', 'turquoise', 'teal',
                        'darkslategrey', 'chartreuse'),
    integration_time_contour_levels=np.logspace(0, 5, base=2, num=6),
    save_masks=False,
    use_saved_masks=False,
):
    """
    Create a "finder chart" showing where ALMA has pointed in various bands,
    including different color coding for public/private data and each band.

    Contours are set at various integration times.

    Parameters
    ----------
    image : fits.PrimaryHDU or fits.ImageHDU object
        The image to overlay onto
    catalog : astropy.Table object
        The catalog of ALMA observations
    save_prefix : str
        The prefix for the output files.  Both .reg and .png files will be
        written.  The .reg files will have the band numbers and
        public/private appended, while the .png file will be named
        prefix_almafinderchart.png
    alma_kwargs : dict
        Keywords to pass to the ALMA archive when querying.
    private_band_colors / public_band_colors : tuple
        A tuple or list of colors to be associated with private/public
        observations in the various bands
    integration_time_contour_levels : list or np.array
        The levels at which to draw contours in units of seconds.  Default is
        log-spaced (2^n) seconds: [  1.,   2.,   4.,   8.,  16.,  32.])
    """
    import aplpy

    import pyregion
    from pyregion.parser_helper import Shape

    primary_beam_radii = [
        approximate_primary_beam_sizes(row['Frequency support'])
        for row in catalog
    ]

    all_bands = bands
    bands = used_bands = np.unique(catalog['Band'])
    log.info("The bands used include: {0}".format(used_bands))
    band_colors_priv = dict(zip(all_bands, private_band_colors))
    band_colors_pub = dict(zip(all_bands, public_band_colors))
    log.info("Color map private: {0}".format(band_colors_priv))
    log.info("Color map public: {0}".format(band_colors_pub))

    if use_saved_masks:
        hit_mask_public = {}
        hit_mask_private = {}

        for band in bands:
            pubfile = '{0}_band{1}_public.fits'.format(save_prefix, band)
            if os.path.exists(pubfile):
                hit_mask_public[band] = fits.getdata(pubfile)
            privfile = '{0}_band{1}_private.fits'.format(save_prefix, band)
            if os.path.exists(privfile):
                hit_mask_private[band] = fits.getdata(privfile)

    else:
        today = np.datetime64('today')

        private_circle_parameters = {
            band: [(row['RA'], row['Dec'], np.mean(rad).to(u.deg).value)
                   for row, rad in zip(catalog, primary_beam_radii)
                   if not row['Release date'] or (np.datetime64(
                       row['Release date']) > today and row['Band'] == band)]
            for band in bands
        }

        public_circle_parameters = {
            band: [(row['RA'], row['Dec'], np.mean(rad).to(u.deg).value)
                   for row, rad in zip(catalog, primary_beam_radii)
                   if row['Release date'] and (np.datetime64(
                       row['Release date']) <= today and row['Band'] == band)]
            for band in bands
        }

        unique_private_circle_parameters = {
            band: np.array(list(set(private_circle_parameters[band])))
            for band in bands
        }
        unique_public_circle_parameters = {
            band: np.array(list(set(public_circle_parameters[band])))
            for band in bands
        }

        release_dates = np.array(catalog['Release date'], dtype=np.datetime64)

        for band in bands:
            log.info("BAND {0}".format(band))
            privrows = sum((catalog['Band'] == band) & (release_dates > today))
            pubrows = sum((catalog['Band'] == band) & (release_dates <= today))
            log.info("PUBLIC:  Number of rows: {0}.  Unique pointings: "
                     "{1}".format(pubrows,
                                  len(unique_public_circle_parameters[band])))
            log.info("PRIVATE: Number of rows: {0}.  Unique pointings: "
                     "{1}".format(privrows,
                                  len(unique_private_circle_parameters[band])))

        prv_regions = {
            band: pyregion.ShapeList([
                Shape('circle', [x, y, r])
                for x, y, r in private_circle_parameters[band]
            ])
            for band in bands
        }
        pub_regions = {
            band: pyregion.ShapeList([
                Shape('circle', [x, y, r])
                for x, y, r in public_circle_parameters[band]
            ])
            for band in bands
        }
        for band in bands:
            circle_pars = np.vstack([
                x for x in (private_circle_parameters[band],
                            public_circle_parameters[band]) if any(x)
            ])
            for r, (x, y, c) in zip(prv_regions[band] + pub_regions[band],
                                    circle_pars):
                r.coord_format = 'fk5'
                r.coord_list = [x, y, c]
                r.attr = ([], {
                    'color': 'green',
                    'dash': '0 ',
                    'dashlist': '8 3',
                    'delete': '1 ',
                    'edit': '1 ',
                    'fixed': '0 ',
                    'font': '"helvetica 10 normal roman"',
                    'highlite': '1 ',
                    'include': '1 ',
                    'move': '1 ',
                    'select': '1',
                    'source': '1',
                    'text': '',
                    'width': '1 '
                })

            if prv_regions[band]:
                prv_regions[band].write('{0}_band{1}_private.reg'.format(
                    save_prefix, band))
            if pub_regions[band]:
                pub_regions[band].write('{0}_band{1}_public.reg'.format(
                    save_prefix, band))

        prv_mask = {
            band:
            fits.PrimaryHDU(prv_regions[band].get_mask(image).astype('int'),
                            header=image.header)
            for band in bands if prv_regions[band]
        }
        pub_mask = {
            band:
            fits.PrimaryHDU(pub_regions[band].get_mask(image).astype('int'),
                            header=image.header)
            for band in bands if pub_regions[band]
        }

        hit_mask_public = {
            band: np.zeros_like(image.data)
            for band in pub_mask
        }
        hit_mask_private = {
            band: np.zeros_like(image.data)
            for band in prv_mask
        }
        mywcs = wcs.WCS(image.header)

        for band in bands:
            log.debug('Band: {0}'.format(band))
            for row, rad in ProgressBar(list(zip(catalog,
                                                 primary_beam_radii))):
                shape = Shape(
                    'circle',
                    (row['RA'], row['Dec'], np.mean(rad).to(u.deg).value))
                shape.coord_format = 'fk5'
                shape.coord_list = (row['RA'], row['Dec'],
                                    np.mean(rad).to(u.deg).value)
                shape.attr = ([], {
                    'color': 'green',
                    'dash': '0 ',
                    'dashlist': '8 3 ',
                    'delete': '1 ',
                    'edit': '1 ',
                    'fixed': '0 ',
                    'font': '"helvetica 10 normal roman"',
                    'highlite': '1 ',
                    'include': '1 ',
                    'move': '1 ',
                    'select': '1 ',
                    'source': '1',
                    'text': '',
                    'width': '1 '
                })
                log.debug('{1} {2}: {0}'.format(shape, row['Release date'],
                                                row['Band']))

                if not row['Release date']:
                    reldate = False
                else:
                    reldate = np.datetime64(row['Release date'])

                if (((not reldate) or (reldate > today))
                        and (row['Band'] == band) and (band in prv_mask)):
                    # private: release_date = 'sometime' says when it will be released
                    (xlo, xhi, ylo,
                     yhi), mask = pyregion_subset(shape,
                                                  hit_mask_private[band],
                                                  mywcs)
                    log.debug("{0},{1},{2},{3}: {4}".format(
                        xlo, xhi, ylo, yhi, mask.sum()))
                    hit_mask_private[band][
                        ylo:yhi, xlo:xhi] += row['Integration'] * mask
                elif (reldate and (reldate <= today) and (row['Band'] == band)
                      and (band in pub_mask)):
                    # public: release_date = '' should mean already released
                    (xlo, xhi, ylo,
                     yhi), mask = pyregion_subset(shape, hit_mask_public[band],
                                                  mywcs)
                    log.debug("{0},{1},{2},{3}: {4}".format(
                        xlo, xhi, ylo, yhi, mask.sum()))
                    hit_mask_public[band][ylo:yhi,
                                          xlo:xhi] += row['Integration'] * mask

        if save_masks:
            for band in bands:
                if band in hit_mask_public:
                    hdu = fits.PrimaryHDU(data=hit_mask_public[band],
                                          header=image.header)
                    hdu.writeto('{0}_band{1}_public.fits'.format(
                        save_prefix, band),
                                clobber=True)
                if band in hit_mask_private:
                    hdu = fits.PrimaryHDU(data=hit_mask_private[band],
                                          header=image.header)
                    hdu.writeto('{0}_band{1}_private.fits'.format(
                        save_prefix, band),
                                clobber=True)

    fig = aplpy.FITSFigure(fits.HDUList(image), convention='calabretta')
    fig.show_grayscale(stretch='arcsinh')
    for band in bands:
        if band in hit_mask_public:
            fig.show_contour(fits.PrimaryHDU(data=hit_mask_public[band],
                                             header=image.header),
                             levels=integration_time_contour_levels,
                             colors=[band_colors_pub[band]] *
                             len(integration_time_contour_levels),
                             convention='calabretta')
        if band in hit_mask_private:
            fig.show_contour(fits.PrimaryHDU(data=hit_mask_private[band],
                                             header=image.header),
                             levels=integration_time_contour_levels,
                             colors=[band_colors_priv[band]] *
                             len(integration_time_contour_levels),
                             convention='calabretta')

    fig.save('{0}_almafinderchart.png'.format(save_prefix))

    return image, catalog, hit_mask_public, hit_mask_private
コード例 #32
0
    def __init__(self, args):
        log.info('Initializing the data object')
        self.args = args

        #Getting the names of the event files from obsdir
        self.evfiles = glob(
            path.join(self.args.obsdir, 'xti/event_cl/ni*mpu7_cl.evt*'))
        self.evfiles.sort()
        if len(self.evfiles) == 0:
            log.error("No event files found!")
            raise Exception('No event files found!')
        log.info(
            'Found clean event files: {0}'.format("\n" +
                                                  "    \n".join(self.evfiles)))

        self.ufafiles = glob(
            path.join(self.args.obsdir, 'xti/event_cl/ni*mpu7_ufa.evt*'))
        self.ufafiles.sort()
        log.info('Found merged unfiltered event files: {0}'.format(
            "\n" + "    \n".join(self.ufafiles)))

        self.uffiles = glob(
            path.join(self.args.obsdir, 'xti/event_uf/ni*mpu*_uf.evt*'))
        self.uffiles.sort()
        log.info('Found raw unfiltered event files: {0}'.format(
            "\n" + "    \n".join(self.uffiles)))

        # Get name of orbit file from obsdir
        try:
            self.args.orb = glob(path.join(self.args.obsdir,
                                           'auxil/ni*.orb*'))[0]
        except:
            log.error("Orbit file not found!")
        log.info('Found the orbit file: {0}'.format(self.args.orb))

        # Get name of SPS HK file (apid0260)
        if self.args.sps is None:
            try:
                self.args.sps = glob(
                    path.join(self.args.obsdir, 'auxil/ni*_apid0260.hk*'))[0]
            except:
                self.args.sps = None

        # Get name of MPU housekeeping files
        self.hkfiles = glob(path.join(self.args.obsdir, 'xti/hk/ni*.hk*'))
        self.hkfiles.sort()
        log.info('Found the MPU housekeeping files: {0}'.format(
            "\n" + "\t\n".join(self.hkfiles)))

        # Get name of filter (.mkf) file
        self.mkfile = glob(path.join(args.obsdir, 'auxil/ni*.mkf*'))[0]
        self.mktable = Table.read(self.mkfile, hdu=1)
        if 'TIMEZERO' in self.mktable.meta:
            log.info(
                'Applying TIMEZERO of {0} to mktable in NicerFileSet'.format(
                    self.mktable.meta['TIMEZERO']))
            self.mktable['TIME'] += self.mktable.meta['TIMEZERO']
            self.mktable.meta['TIMEZERO'] = 0.0

        # Make lat, lon interpolater from mktable
        self.llinterp = LatLonInterp(self.mktable['TIME'],
                                     self.mktable['SAT_LAT'],
                                     self.mktable['SAT_LON'])

        #Compiling Event Data
        self.getgti()
        if len(self.gtitable) == 0:
            log.error('No Good Time remaining! Quitting...')
            sys.exit(0)
        if self.args.useftools:
            self.etable = filtallandmerge_ftools(self.ufafiles, workdir=None)
        else:
            self.etable = self.createetable()
        if len(self.etable) == 0:
            log.error("No events in etable! Aborting")
            raise Exception('No events in etable!')
        self.sortmet()
        self.makebasename()

        if args.applygti is not None:
            g = Table.read(args.applygti)
            if 'TIMEZERO' in g.meta:
                log.info(
                    'Applying TIMEZERO of {0} to gti in NicerFileSet'.format(
                        g.meta['TIMEZERO']))
                g['START'] += g.meta['TIMEZERO']
                g['STOP'] += g.meta['TIMEZERO']
                g.meta['TIMEZERO'] = 0.0
            log.info('Applying external GTI from {0}'.format(args.applygti))
            g['DURATION'] = g['STOP'] - g['START']
            # Only keep GTIs longer than 16 seconds
            g = g[np.where(g['DURATION'] > 16.0)]
            log.info('Applying external GTI')
            print(g)
            self.etable = apply_gti(self.etable, g)
            # Replacing this GTI does not work. It needs to be ANDed with the existing GTI
            self.etable.meta['EXPOSURE'] = g['DURATION'].sum()
            self.gtitable = g

        if args.gtirows is not None:
            log.info('Apply gti rows {}'.format(args.gtirows))
            g = self.gtitable[args.gtirows]
            print(g)
            self.etable = apply_gti(self.etable, g)
            self.gtitable = g

        #Compiling HK Data
        if args.extraphkshootrate:
            self.quickhkshootrate()
        else:
            self.hkshootrate()
        self.geteventshoots()
コード例 #33
0
ファイル: core.py プロジェクト: ruizca/astromatch
    def __xmatch(self, **kwargs):
        log.info('Using XMatch method:')

        self._match = XMatch(*self.catalogues)

        return self._match.run(**kwargs)
コード例 #34
0
# Length of a chunk
chunklen = 30.0

if args.plot:
    import matplotlib.pyplot as plt

for pipedir in args.pipedirs:
    mkfname = glob(path.join(pipedir, "ni*.mkf"))[0]
    evtname = glob(path.join(pipedir, "cleanfilt.evt"))[0]
    outname = evtname.replace('.evt', '.bkgtab')
    #    outfile = open(evtname.replace('.evt','.bkgtab'),'w')
    #    print("{0:15s} {1:6s} {2:6s} {3:6s} {4:8s} {5:5s} {6:8s} {7:9s} {8:9s} {9:9s} {10:9s} {11:9s} {12:9s}".format(
    #    "# MET","Band1", "Band2", "Band3", "CORSAX", "SUN", "SUNANG", "OVERONLY","NOISE25","RATIOREJ","R1517", "IBG", "HREJ"),
    #    file=outfile)
    log.info("Processing {0} and {1}".format(evtname, mkfname))

    # Collect GTIs
    gtitable = Table.read(evtname, hdu=2)
    if 'TIMEZERO' in gtitable.meta:
        tz = gtitable.meta['TIMEZERO']
        # If there are multiple TIMEZERO entries in the header, just take the last
        if not np.isscalar(tz):
            tz = tz[-1]
        log.info(
            'Applying TIMEZERO of {0} to gtitable in NicerFileSet'.format(tz))
        gtitable['START'] += tz
        gtitable['STOP'] += tz
        gtitable.meta['TIMEZERO'] = 0.0
    log.info('Got the good times from GTI')
    gtitable['DURATION'] = gtitable['STOP'] - gtitable['START']
コード例 #35
0
ファイル: zima.py プロジェクト: paulray/PINT
def main(argv=None):
    import argparse

    parser = argparse.ArgumentParser(
        description="PINT tool for simulating TOAs")
    parser.add_argument("parfile", help="par file to read model from")
    parser.add_argument("timfile", help="Output TOA file name")
    parser.add_argument(
        "--inputtim",
        help="Input tim file for fake TOA sampling",
        type=str,
        default=None,
    )
    parser.add_argument(
        "--startMJD",
        help="MJD of first fake TOA (default=56000.0)",
        type=float,
        default=56000.0,
    )
    parser.add_argument("--ntoa",
                        help="Number of fake TOAs to generate",
                        type=int,
                        default=100)
    parser.add_argument("--duration",
                        help="Span of TOAs to generate (days)",
                        type=float,
                        default=400.0)
    parser.add_argument("--obs",
                        help="Observatory code (default: GBT)",
                        default="GBT")
    parser.add_argument(
        "--freq",
        help="Frequency for TOAs (MHz) (default: 1400)",
        nargs="+",
        type=float,
        default=1400.0,
    )
    parser.add_argument(
        "--error",
        help="Random error to apply to each TOA (us, default=1.0)",
        type=float,
        default=1.0,
    )
    parser.add_argument(
        "--fuzzdays",
        help="Standard deviation of 'fuzz' distribution (jd) (default: 0.0)",
        type=float,
        default=0.0,
    )
    parser.add_argument("--plot",
                        help="Plot residuals",
                        action="store_true",
                        default=False)
    parser.add_argument("--ephem", help="Ephemeris to use", default="DE421")
    parser.add_argument(
        "--planets",
        help="Use planetary Shapiro delay",
        action="store_true",
        default=False,
    )
    parser.add_argument("--format",
                        help="The format of out put .tim file.",
                        default="TEMPO2")
    args = parser.parse_args(argv)

    log.info("Reading model from {0}".format(args.parfile))
    m = pint.models.get_model(args.parfile)

    out_format = args.format
    error = args.error * u.microsecond

    if args.inputtim is None:
        log.info("Generating uniformly spaced TOAs")
        duration = args.duration * u.day
        # start = Time(args.startMJD,scale='utc',format='pulsar_mjd',precision=9)
        start = np.longdouble(args.startMJD) * u.day
        freq = np.atleast_1d(args.freq) * u.MHz
        site = get_observatory(args.obs)
        scale = site.timescale

        times = np.linspace(0,
                            duration.to(u.day).value,
                            args.ntoa) * u.day + start

        # 'Fuzz' out times
        if args.fuzzdays > 0.0:
            fuzz = np.random.normal(scale=args.fuzzdays,
                                    size=len(times)) * u.day
            times += fuzz

        # Add mulitple frequency
        freq_array = get_freq_array(freq, len(times))
        tl = [
            toa.TOA(t.value, error=error, obs=args.obs, freq=f, scale=scale)
            for t, f in zip(times, freq_array)
        ]
        ts = toa.TOAs(toalist=tl)
    else:
        log.info("Reading initial TOAs from {0}".format(args.inputtim))
        ts = toa.TOAs(toafile=args.inputtim)
        ts.table["error"][:] = error

    # WARNING! I'm not sure how clock corrections should be handled here!
    # Do we apply them, or not?
    if not any(["clkcorr" in f for f in ts.table["flags"]]):
        log.info("Applying clock corrections.")
        ts.apply_clock_corrections()
    if "tdb" not in ts.table.colnames:
        log.info("Getting IERS params and computing TDBs.")
        ts.compute_TDBs(ephem=args.ephem)
    if "ssb_obs_pos" not in ts.table.colnames:
        log.info("Computing observatory positions and velocities.")
        ts.compute_posvels(args.ephem, args.planets)

    log.info("Creating TOAs")
    F_local = m.d_phase_d_toa(ts)
    rs = m.phase(ts).frac / F_local

    # Adjust the TOA times to put them where their residuals will be 0.0
    ts.adjust_TOAs(TimeDelta(-1.0 * rs))
    rspost = m.phase(ts).frac / F_local

    log.info("Second iteration")
    # Do a second iteration
    ts.adjust_TOAs(TimeDelta(-1.0 * rspost))

    err = np.random.randn(len(ts.table)) * error
    # Add the actual error fuzzing
    ts.adjust_TOAs(TimeDelta(err))

    # Write TOAs to a file
    ts.write_TOA_file(args.timfile, name="fake", format=out_format)

    if args.plot:
        # This should be a very boring plot with all residuals flat at 0.0!
        import matplotlib.pyplot as plt
        from astropy.visualization import quantity_support

        quantity_support()

        rspost2 = m.phase(ts).frac / F_local
        plt.errorbar(ts.get_mjds(),
                     rspost2.to(u.us),
                     yerr=ts.get_errors().to(u.us),
                     fmt=".")
        newts = pint.toa.get_TOAs(args.timfile,
                                  ephem=args.ephem,
                                  planets=args.planets)
        rsnew = m.phase(newts).frac / F_local
        plt.errorbar(newts.get_mjds(),
                     rsnew.to(u.us),
                     yerr=newts.get_errors().to(u.us),
                     fmt=".")
        # plt.plot(ts.get_mjds(),rspost.to(u.us),'x')
        plt.xlabel("MJD")
        plt.ylabel("Residual (us)")
        plt.grid(True)
        plt.show()
コード例 #36
0
ファイル: test_model.py プロジェクト: mattpitkin/PINT
from astropy import log

from pinttestdata import testdir, datadir

log.setLevel('ERROR')
# for nice output info, set the following instead
#log.setLevel('INFO')
os.chdir(datadir)

parfile = 'J1744-1134.basic.par'
t1_parfile = 'J1744-1134.t1.par'
timfile = 'J1744-1134.Rcvr1_2.GASP.8y.x.tim'

m = tm.get_model(parfile)
log.info("model.as_parfile():\n%s" % m.as_parfile())
try:
    planets = m.PLANET_SHAPIRO.value
except AttributeError:
    planets = False

t0 = time.time()
t = toa.get_TOAs(timfile, planets=planets, include_bipm=False, usepickle=False)
time_toa = time.time() - t0
if log.level < 25:
    t.print_summary()
log.info("Read/corrected TOAs in %.3f sec" % time_toa)

mjds = t.get_mjds()
errs = t.get_errors()
コード例 #37
0
    def _login(self,
               username=None,
               store_password=False,
               reenter_password=False):
        """
        Login to the NRAO archive

        Parameters
        ----------
        username : str, optional
            Username to the NRAO archive. If not given, it should be specified
            in the config file.
        store_password : bool, optional
            Stores the password securely in your keyring. Default is False.
        reenter_password : bool, optional
            Asks for the password even if it is already stored in the
            keyring. This is the way to overwrite an already stored passwork
            on the keyring. Default is False.
        """

        # Developer notes:
        # Login via https://my.nrao.edu/cas/login
        # # this can be added to auto-redirect back to the query tool: ?service=https://archive.nrao.edu/archive/advquery.jsp

        if username is None:
            if not self.USERNAME:
                raise LoginError("If you do not pass a username to login(), "
                                 "you should configure a default one!")
            else:
                username = self.USERNAME

        # Check if already logged in
        loginpage = self._request("GET",
                                  "https://my.nrao.edu/cas/login",
                                  cache=False)
        root = BeautifulSoup(loginpage.content, 'html5lib')
        if root.find('div', class_='success'):
            log.info("Already logged in.")
            return True

        # Get password from keyring or prompt
        if reenter_password is False:
            password_from_keyring = keyring.get_password(
                "astroquery:my.nrao.edu", username)
        else:
            password_from_keyring = None

        if password_from_keyring is None:
            if system_tools.in_ipynb():
                log.warning("You may be using an ipython notebook:"
                            " the password form will appear in your terminal.")
            password = getpass.getpass("{0}, enter your NRAO archive password:"******"\n".format(username))
        else:
            password = password_from_keyring
        # Authenticate
        log.info("Authenticating {0} on my.nrao.edu ...".format(username))
        # Do not cache pieces of the login process
        data = {
            kw: root.find('input', {'name': kw})['value']
            for kw in ('lt', '_eventId', 'execution')
        }
        data['username'] = username
        data['password'] = password
        data['execution'] = 'e1s1'  # not sure if needed
        data['_eventId'] = 'submit'
        data['submit'] = 'LOGIN'

        login_response = self._request("POST",
                                       "https://my.nrao.edu/cas/login",
                                       data=data,
                                       cache=False)

        authenticated = ('You have successfully logged in'
                         in login_response.text)

        if authenticated:
            log.info("Authentication successful!")
            self.USERNAME = username
        else:
            log.exception("Authentication failed!")
        # When authenticated, save password in keyring if needed
        if authenticated and password_from_keyring is None and store_password:
            keyring.set_password("astroquery:my.nrao.edu", username, password)

        return authenticated
コード例 #38
0
def main(argv=None):
    parser = argparse.ArgumentParser(
        description="Command line interfact to PINT")
    parser.add_argument("parfile", help="par file to read model from")
    parser.add_argument("timfile", help="TOA file name")
    parser.add_argument(
        "--usepickle",
        help="Enable pickling of TOAs",
        action="store_true",
        default=False,
    )
    parser.add_argument("--outfile",
                        help="Output par file name (default=None)",
                        default=None)
    parser.add_argument("--plot",
                        help="Plot residuals",
                        action="store_true",
                        default=False)
    parser.add_argument("--plotfile", help="Plot file name", default=None)
    args = parser.parse_args(argv)

    log.info("Reading model from {0}".format(args.parfile))
    m = pint.models.get_model(args.parfile)

    log.warning(m.params)

    log.info("Reading TOAs")
    use_planets = False
    if m.PLANET_SHAPIRO.value:
        use_planets = True
    model_ephem = "DE421"
    if m.EPHEM is not None:
        model_ephem = m.EPHEM.value
    t = pint.toa.get_TOAs(args.timfile,
                          planets=use_planets,
                          ephem=model_ephem,
                          usepickle=args.usepickle)

    # turns pre-existing jump flags in t.table['flags'] into parameters in parfile
    m.jump_flags_to_params(t)

    if m.TRACK.value == "-2":
        if "pn" in t.table.colnames:
            log.info("Already have pulse numbers from TOA flags.")
        else:
            log.info("Adding pulse numbers")
            t.compute_pulse_numbers(m)

    prefit_resids = pint.residuals.Residuals(t, m).time_resids

    log.info("Fitting...")
    f = pint.fitter.WLSFitter(t, m)
    f.fit_toas()

    # Print fit summary
    print(
        "============================================================================"
    )
    f.print_summary()

    if args.plot:
        import matplotlib.pyplot as plt

        # Turn on support for plotting quantities
        from astropy.visualization import quantity_support

        quantity_support()

        fig, ax = plt.subplots(figsize=(8, 4.5))
        xt = t.get_mjds()
        ax.errorbar(xt,
                    prefit_resids.to(u.us),
                    t.get_errors().to(u.us),
                    fmt="o")
        ax.errorbar(xt,
                    f.resids.time_resids.to(u.us),
                    t.get_errors().to(u.us),
                    fmt="x")
        ax.set_title("%s Timing Residuals" % m.PSR.value)
        ax.set_xlabel("MJD")
        ax.set_ylabel("Residual (us)")
        ax.grid()
        if args.plotfile is not None:
            fig.savefig(args.plotfile)
        else:
            plt.show()

    if args.outfile is not None:
        fout = open(args.outfile, "w")
    else:
        fout = sys.stdout
        print("\nBest fit model is:")

    fout.write(f.model.as_parfile() + "\n")
    return 0
コード例 #39
0
ファイル: topo_obs.py プロジェクト: produit/PINT
 def clock_corrections(self, t):
     # Read clock file if necessary
     # TODO provide some method for re-reading the clock file?
     if self._clock is None:
         log.info('Observatory {0}, loading clock file {1}'.format(
             self.name, self.clock_fullpath))
         self._clock = ClockFile.read(self.clock_fullpath,
                                      format=self.clock_fmt,
                                      obscode=self.tempo_code)
     log.info('Evaluating observatory clock corrections.')
     corr = self._clock.evaluate(t)
     if self.include_gps:
         log.info('Applying GPS to UTC clock correction (~few nanoseconds)')
         if self._gps_clock is None:
             log.info('Observatory {0}, loading GPS clock file {1}'.format(
                 self.name, self.gps_fullpath))
             self._gps_clock = ClockFile.read(self.gps_fullpath,
                                              format='tempo2')
         corr += self._gps_clock.evaluate(t)
     if self.include_bipm:
         log.info('Applying TT(TAI) to TT(BIPM) clock correction (~27 us)')
         tt2tai = 32.184 * 1e6 * u.us
         if self._bipm_clock is None:
             try:
                 log.info(
                     'Observatory {0}, loading BIPM clock file {1}'.format(
                         self.name, self.bipm_fullpath))
                 self._bipm_clock = ClockFile.read(self.bipm_fullpath,
                                                   format='tempo2')
             except:
                 raise ValueError("Can not find TT BIPM file '%s'. " %
                                  self.bipm_version)
         corr += self._bipm_clock.evaluate(t) - tt2tai
     return corr
コード例 #40
0
ファイル: QA_plot.py プロジェクト: astrochun/GNIRSLongSlit
def QA_combine(path0, targets0, out_pdf='', silent=False, verbose=True):
    '''
    Display sky-subtracted and shifted combined images for telluric and
    science data

    Parameters
    ----------
    path0 : str
      Full path to where output PDF and FITS file are located. Must end
      with a '/'

    targets0: list or numpy array
      A list or array of source names available through path0

    out_pdf : str
      Filename for output PDF. Do NOT include full path.
      Default: 'QA_combine.pdf'

    silent : boolean
      Turns off stdout messages. Default: False

    verbose : boolean
      Turns on additional stdout messages. Default: True

    Returns
    -------
    multi-page PDF plot, 'QA_combine.pdf'

    Notes
    -----
    Created by Chun Ly, 31 May 2017
    Modified by Chun Ly, 1 June 2017
     - Switch over to pyplot.imshow() since aplpy cannot allow for more customization
    '''

    if silent == False: log.info('### Begin QA_combine : ' + systime())

    out_pdf = path0 + 'QA_combine.pdf' if out_pdf == '' else path0 + out_pdf
    pp = PdfPages(out_pdf)

    for target in targets0:
        t_path = path0 + target + '/'

        dir_list, list_path = dir_check.main(t_path,
                                             silent=silent,
                                             verbose=verbose)

        for dpath in list_path:
            tel_file = glob.glob(dpath + 'tell_comb.fits')
            obj_file = glob.glob(dpath + 'obj_comb.fits')

            if len(tel_file) == 0 and len(obj_file) == 0:
                log.warn('## No tell_comb.fits and obj_comb.fits found in: ')
                log.warn('## ' + dpath)

            if len(tel_file) == 1 and len(obj_file) == 1:
                fig, (ax1, ax2) = plt.subplots(1, 2)  # Mod on 01/06/2017

            # Mod on 01/06/2017
            if len(tel_file) != 0:
                t_im, t_hdr = fits.getdata(tel_file[0], header=True)

                lam_max = t_hdr['CRVAL2'] + t_hdr['CD2_2'] * t_hdr['NAXIS2']
                extent = [0, t_hdr['NAXIS1'], t_hdr['CRVAL2'], lam_max]

                z1, z2 = zscale.get_limits(t_im)
                norm = ImageNormalize(vmin=z2, vmax=z1)
                ax1.imshow(t_im,
                           cmap='Greys',
                           origin='lower',
                           norm=norm,
                           extent=extent)
                yticks = np.array(ax1.get_yticks())
                ax1.set_yticklabels([val / 1e4 for val in yticks])

                ax1.get_yaxis().set_tick_params(which='major',
                                                direction='in',
                                                right=True,
                                                length=5,
                                                width=1)
                ax1.get_yaxis().set_tick_params(which='minor',
                                                direction='in',
                                                right=True,
                                                length=2.5)
                ax1.get_xaxis().set_tick_params(which='major',
                                                direction='in',
                                                top=True,
                                                length=5,
                                                width=1)
                ax1.get_xaxis().set_tick_params(which='minor',
                                                direction='in',
                                                top=True,
                                                length=2.5)
                ax1.minorticks_on()

                ax1.set_xlabel('X [pixels]', fontsize=14)
                ax1.set_ylabel(r'Wavelength ($\mu$m)', fontsize=14)

                ax1.annotate(tel_file[0], [0.025, 0.975],
                             xycoords='axes fraction',
                             ha='left',
                             va='top',
                             bbox=bbox_props)

            # Mod on 01/06/2017
            if len(obj_file) != 0:
                o_im, o_hdr = fits.getdata(obj_file[0], header=True)

                lam_max = o_hdr['CRVAL2'] + o_hdr['CD2_2'] * o_hdr['NAXIS2']
                extent = [0, o_hdr['NAXIS1'], o_hdr['CRVAL2'], lam_max]

                z1, z2 = zscale.get_limits(o_im)
                norm = ImageNormalize(vmin=z2, vmax=z1)
                ax2.imshow(o_im,
                           cmap='Greys',
                           origin='lower',
                           norm=norm,
                           extent=extent)
                yticks = np.array(ax2.get_yticks())
                ax2.set_yticklabels([val / 1e4 for val in yticks])

                ax2.get_yaxis().set_tick_params(which='major',
                                                direction='in',
                                                right=True,
                                                length=5,
                                                width=1)
                ax2.get_yaxis().set_tick_params(which='minor',
                                                direction='in',
                                                right=True,
                                                length=2.5)
                ax2.get_xaxis().set_tick_params(which='major',
                                                direction='in',
                                                top=True,
                                                length=5,
                                                width=1)
                ax2.get_xaxis().set_tick_params(which='minor',
                                                direction='in',
                                                top=True,
                                                length=2.5)
                ax2.minorticks_on()

                ax2.set_xlabel('X [pixels]', fontsize=14)
                ax2.set_ylabel('')
                ax2.set_yticklabels([])

                ax2.annotate(obj_file[0], [0.025, 0.975],
                             xycoords='axes fraction',
                             ha='left',
                             va='top',
                             bbox=bbox_props)

            if len(tel_file) == 1 and len(obj_file) == 1:  # Mod on 01/06/2017
                subplots_adjust(left=0.06,
                                bottom=0.06,
                                top=0.995,
                                right=0.99,
                                hspace=0.00,
                                wspace=0.00)
                fig.set_size_inches(11, 7.3)
                # fig.tight_layout()
                fig.savefig(pp, format='pdf')  #, bbox_inches='tight')

    if silent == False: log.info('## Writing : ' + out_pdf)
    pp.close()
    if silent == False: log.info('### End QA_combine : ' + systime())
コード例 #41
0
ファイル: ni_Htest_sortgti.py プロジェクト: kerrm/NICERsoft
    if minexp is not None:
        exposure = np.cumsum(gti_len_s)
        # depress S/N for values that do not satisfy eposure cuts
        mask = exposure / exposure[-1] < minexp
        sn[mask] = 0
        sn0[mask] = 0
        hs[mask] = 0

    return sn, sn0, hs, ph_gti, list(
        pi_gti), gti_rts_s, gti_len_s, gti_t0_s, gti_t1_s


if len(args.infile) == 1:
    if args.infile[0].startswith('@'):
        inputfile = args.infile[0].split('@')[1]
        log.info('Reading input ObsID list: {}'.format(inputfile))
        all_files = np.loadtxt(inputfile, dtype=str)
    else:
        all_files = args.infile
else:
    all_files = args.infile

data = load_files(all_files)
data_diced = data.dice_gtis(tmax=100)
data_diced = data_diced.apply_min_gti(args.mingti)
assert (data_diced.check_gti())

if args.writeevents:
    args.writegti = True

if args.writegti:
コード例 #42
0
def fits2bitmap(filename,
                ext=0,
                out_fn=None,
                stretch='linear',
                power=1.0,
                asinh_a=0.1,
                min_cut=None,
                max_cut=None,
                min_percent=None,
                max_percent=None,
                percent=None,
                cmap='Greys_r'):
    """
    Create a bitmap file from a FITS image, applying a stretching
    transform between minimum and maximum cut levels and a matplotlib
    colormap.

    Parameters
    ----------
    filename : str
        The filename of the FITS file.
    ext : int
        FITS extension name or number of the image to convert.  The
        default is 0.
    out_fn : str
        The filename of the output bitmap image.  The type of bitmap
        is determined by the filename extension (e.g. '.jpg', '.png').
        The default is a PNG file with the same name as the FITS file.
    stretch : {{'linear', 'sqrt', 'power', log', 'asinh'}}
        The stretching function to apply to the image.  The default is
        'linear'.
    power : float, optional
        The power index for ``stretch='power'``.  The default is 1.0.
    asinh_a : float, optional
        For ``stretch='asinh'``, the value where the asinh curve
        transitions from linear to logarithmic behavior, expressed as a
        fraction of the normalized image.  Must be in the range between
        0 and 1.  The default is 0.1.
    min_cut : float, optional
        The pixel value of the minimum cut level.  Data values less than
        ``min_cut`` will set to ``min_cut`` before stretching the image.
        The default is the image minimum.  ``min_cut`` overrides
        ``min_percent``.
    max_cut : float, optional
        The pixel value of the maximum cut level.  Data values greater
        than ``min_cut`` will set to ``min_cut`` before stretching the
        image.  The default is the image maximum.  ``max_cut`` overrides
        ``max_percent``.
    min_percent : float, optional
        The percentile value used to determine the pixel value of
        minimum cut level.  The default is 0.0.  ``min_percent``
        overrides ``percent``.
    max_percent : float, optional
        The percentile value used to determine the pixel value of
        maximum cut level.  The default is 100.0.  ``max_percent``
        overrides ``percent``.
    percent : float, optional
        The percentage of the image values used to determine the pixel
        values of the minimum and maximum cut levels.  The lower cut
        level will set at the ``(100 - percent) / 2`` percentile, while
        the upper cut level will be set at the ``(100 + percent) / 2``
        percentile.  The default is 100.0.  ``percent`` is ignored if
        either ``min_percent`` or ``max_percent`` is input.
    cmap : str
        The matplotlib color map name.  The default is 'Greys_r'.
    """

    import matplotlib
    import matplotlib.cm as cm
    import matplotlib.image as mimg

    # __main__ gives ext as a string
    try:
        ext = int(ext)
    except ValueError:
        pass

    try:
        image = getdata(filename, ext)
    except Exception as e:
        log.critical(e)
        return 1

    if image.ndim != 2:
        log.critical(
            'data in FITS extension {0} is not a 2D array'.format(ext))

    if out_fn is None:
        out_fn = os.path.splitext(filename)[0]
        if out_fn.endswith('.fits'):
            out_fn = os.path.splitext(out_fn)[0]
        out_fn += '.png'

    # need to explicitly define the output format due to a bug in
    # matplotlib (<= 2.1), otherwise the format will always be PNG
    out_format = os.path.splitext(out_fn)[1][1:]

    # workaround for matplotlib 2.0.0 bug where png images are inverted
    # (mpl-#7656)
    if (out_format.lower() == 'png'
            and LooseVersion(matplotlib.__version__) == LooseVersion('2.0.0')):
        image = image[::-1]

    try:
        cm.get_cmap(cmap)
    except ValueError:
        log.critical(
            '{0} is not a valid matplotlib colormap name.'.format(cmap))
        return 1

    norm = simple_norm(image,
                       stretch=stretch,
                       power=power,
                       asinh_a=asinh_a,
                       min_cut=min_cut,
                       max_cut=max_cut,
                       min_percent=min_percent,
                       max_percent=max_percent,
                       percent=percent)

    mimg.imsave(out_fn,
                norm(image),
                cmap=cmap,
                origin='lower',
                format=out_format)
    log.info('Saved file to {0}.'.format(out_fn))
コード例 #43
0
    password = args.password

table_list = [
    'filter',
    'magabdualobj',
    'photozlephare_updated',
    'magabsingleobj',
    'tileimage',
    'stargalclass',
    'xmatch_jplus_dr1',
    'xmatch_sdss_dr12',
    'xmatch_deep2_spec',
    #'xmatch_alhambra', # xmatch_alhambra has invalid values which mess up the VOTable parser.
]

log.info('Connecting to %s.' % args.serviceUrl)
tm = TAPQueueManager(args.serviceUrl, args.tablesDir)
tm.connect(login, password)

if path.exists(args.jobList):
    tm.loadJobList(args.jobList)

for tab in table_list:
    log.info('Requesting table %s.' % tab)
    tm.requestTable(tab, force=args.forceResub, maxrec=args.maxrec)
    if args.overwrite:
        tm.removeDownload(tab)

tm.saveJobList(args.jobList)

log.info('Downloading tables to %s.' % args.tablesDir)
コード例 #44
0
ファイル: ni_Htest_sortgti.py プロジェクト: kerrm/NICERsoft
def runcmd(cmd):
    # CMD should be a list of strings since it is not processed by a shell
    log.info('CMD: ' + " ".join(cmd))
    os.system(" ".join(cmd))
コード例 #45
0
ファイル: ccddata.py プロジェクト: weaverba137/astropy
def _generate_wcs_and_update_header(hdr):
    """
    Generate a WCS object from a header and remove the WCS-specific
    keywords from the header.

    Parameters
    ----------

    hdr : astropy.io.fits.header or other dict-like

    Returns
    -------

    new_header, wcs
    """

    # Try constructing a WCS object.
    try:
        wcs = WCS(hdr)
    except Exception as exc:
        # Normally WCS only raises Warnings and doesn't fail but in rare
        # cases (malformed header) it could fail...
        log.info('An exception happened while extracting WCS information from '
                 'the Header.\n{}: {}'.format(type(exc).__name__, str(exc)))
        return hdr, None
    # Test for success by checking to see if the wcs ctype has a non-empty
    # value, return None for wcs if ctype is empty.
    if not wcs.wcs.ctype[0]:
        return (hdr, None)

    new_hdr = hdr.copy()
    # If the keywords below are in the header they are also added to WCS.
    # It seems like they should *not* be removed from the header, though.

    wcs_header = wcs.to_header(relax=True)
    for k in wcs_header:
        if k not in _KEEP_THESE_KEYWORDS_IN_HEADER:
            new_hdr.remove(k, ignore_missing=True)

    # Check that this does not result in an inconsistent header WCS if the WCS
    # is converted back to a header.

    if (_PCs & set(wcs_header)) and (_CDs & set(new_hdr)):
        # The PCi_j representation is used by the astropy.wcs object,
        # so CDi_j keywords were not removed from new_hdr. Remove them now.
        for cd in _CDs:
            new_hdr.remove(cd, ignore_missing=True)

    # The other case -- CD in the header produced by astropy.wcs -- should
    # never happen based on [1], which computes the matrix in PC form.
    # [1]: https://github.com/astropy/astropy/blob/1cf277926d3598dd672dd528504767c37531e8c9/cextern/wcslib/C/wcshdr.c#L596
    #
    # The test test_ccddata.test_wcs_keyword_removal_for_wcs_test_files() does
    # check for the possibility that both PC and CD are present in the result
    # so if the implementation of to_header changes in wcslib in the future
    # then the tests should catch it, and then this code will need to be
    # updated.

    # We need to check for any SIP coefficients that got left behind if the
    # header has SIP.
    if wcs.sip is not None:
        keyword = '{}_{}_{}'
        polynomials = ['A', 'B', 'AP', 'BP']
        for poly in polynomials:
            order = wcs.sip.__getattribute__(f'{poly.lower()}_order')
            for i, j in itertools.product(range(order), repeat=2):
                new_hdr.remove(keyword.format(poly, i, j), ignore_missing=True)

    return (new_hdr, wcs)
コード例 #46
0
def main():
    script = os.path.splitext(os.path.basename(__file__))[0]
    log.info("[SCRIPT] {}".format(script))

    parser = argparse.ArgumentParser(
        description='Display each event in the file',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('-f',
                        '--file',
                        dest='input_path',
                        action='store',
                        default=get_path('gamma_test.simtel.gz'),
                        help='path to the input file')
    parser.add_argument('-O',
                        '--origin',
                        dest='origin',
                        action='store',
                        choices=InputFile.origin_list(),
                        default='hessio',
                        help='origin of the file')
    parser.add_argument('-D',
                        dest='display',
                        action='store_true',
                        default=False,
                        help='display the camera events')
    parser.add_argument('--pdf',
                        dest='output_path',
                        action='store',
                        default=None,
                        help='path to store a pdf output of the plots')
    parser.add_argument('-t',
                        '--telescope',
                        dest='tel',
                        action='store',
                        type=int,
                        default=None,
                        help='telecope to view. Default = All')
    parser.add_argument('--calib-help',
                        dest='calib_help',
                        action='store_true',
                        default=False,
                        help='display the arguments used for the camera '
                        'calibration')

    logger_detail = parser.add_mutually_exclusive_group()
    logger_detail.add_argument('-q',
                               '--quiet',
                               dest='quiet',
                               action='store_true',
                               default=False,
                               help='Quiet mode')
    logger_detail.add_argument('-v',
                               '--verbose',
                               dest='verbose',
                               action='store_true',
                               default=False,
                               help='Verbose mode')
    logger_detail.add_argument('-d',
                               '--debug',
                               dest='debug',
                               action='store_true',
                               default=False,
                               help='Debug mode')

    args, excess_args = parser.parse_known_args()

    params, unknown_args = calibration_parameters(excess_args, args.origin,
                                                  args.calib_help)

    if unknown_args:
        parser.print_help()
        calibration_parameters(unknown_args, args.origin, True)
        msg = 'unrecognized arguments: %s'
        parser.error(msg % ' '.join(unknown_args))

    if args.quiet:
        log.setLevel(40)
    if args.verbose:
        log.setLevel(20)
    if args.debug:
        log.setLevel(10)

    log.debug("[file] Reading file")
    input_file = InputFile(args.input_path, args.origin)
    source = input_file.read()

    # geom_dict is a dictionary of CameraGeometry, with keys of
    # tel_id. By using these keys, the geometry is
    # calculated only once per telescope, reducing computation
    # time.
    # Creating a geom_dict at this point is optional, but is recommended, as
    # the same geom_dict can then be shared between the calibration and
    # CameraPlotter, again reducing computation time.
    # The dictionary becomes filled as a result of a dictionary's mutable
    # nature.
    geom_dict = {}

    # Calibrate events and fill geom_dict

    calibrated_source = calibrate_source(source, params, geom_dict)

    fig = plt.figure(figsize=(16, 7))
    if args.display:
        plt.show(block=False)
    pp = PdfPages(args.output_path) if args.output_path is not None else None
    for event in calibrated_source:
        tels = list(event.dl0.tels_with_data)
        if args.tel is None:
            tel_loop = tels
        else:
            if args.tel not in tels:
                continue
            tel_loop = [args.tel]
        log.debug(tels)
        for tel_id in tel_loop:
            display_telescope(event, tel_id, args.display, geom_dict, pp, fig)
    if pp is not None:
        pp.close()

    log.info("[COMPLETE]")
コード例 #47
0
def load_Fermi_TOAs(
    ft1name,
    weightcolumn=None,
    targetcoord=None,
    logeref=4.1,
    logesig=0.5,
    minweight=0.0,
    minmjd=-np.inf,
    maxmjd=np.inf,
    fermiobs="Fermi",
):
    """
    toalist = load_Fermi_TOAs(ft1name)
      Read photon event times out of a Fermi FT1 file and return
      a list of PINT TOA objects.
      Correctly handles raw FT1 files, or ones processed with gtbary
      to have barycentered or geocentered TOAs.


    Parameters
    ----------
    weightcolumn : str
        Specifies the FITS column name to read the photon weights from.
        The special value 'CALC' causes the weights to be computed 
        empirically as in Philippe Bruel's SearchPulsation code.
    targetcoord : astropy.SkyCoord
        Source coordinate for weight computation if weightcolumn=='CALC'
    logeref : float
        Parameter for the weight computation if weightcolumn=='CALC'
    logesig : float
        Parameter for the weight computation if weightcolumn=='CALC'
    minweight : float
        If weights are loaded or computed, exclude events with smaller weights.
    minmjd : float
        Events with earlier MJDs are excluded.
    maxmjd : float
        Events with later MJDs are excluded.
    fermiobs: str
      The default observatory name is Fermi, and must have already been
      registered.  The user can specify another name 

    Returns
    -------
    toalist : list
        A list of TOA objects corresponding to the Fermi events.
    """

    # Load photon times from FT1 file
    hdulist = fits.open(ft1name)
    ft1hdr = hdulist[1].header
    ft1dat = hdulist[1].data

    # TIMESYS will be 'TT' for unmodified Fermi LAT events (or geocentered), and
    #                 'TDB' for events barycentered with gtbary
    # TIMEREF will be 'GEOCENTER' for geocentered events,
    #                 'SOLARSYSTEM' for barycentered,
    #             and 'LOCAL' for unmodified events

    timesys = ft1hdr["TIMESYS"]
    log.info("TIMESYS {0}".format(timesys))
    timeref = ft1hdr["TIMEREF"]
    log.info("TIMEREF {0}".format(timeref))

    # Read time column from FITS file
    mjds = read_fits_event_mjds_tuples(hdulist[1])
    if len(mjds) == 0:
        log.error("No MJDs read from file!")
        raise

    energies = ft1dat.field("ENERGY") * u.MeV
    if weightcolumn is not None:
        if weightcolumn == "CALC":
            photoncoords = SkyCoord(
                ft1dat.field("RA") * u.degree,
                ft1dat.field("DEC") * u.degree,
                frame="icrs",
            )
            weights = calc_lat_weights(
                ft1dat.field("ENERGY"),
                photoncoords.separation(targetcoord),
                logeref=logeref,
                logesig=logesig,
            )
        else:
            weights = ft1dat.field(weightcolumn)
        if minweight > 0.0:
            idx = np.where(weights > minweight)[0]
            mjds = mjds[idx]
            energies = energies[idx]
            weights = weights[idx]

    # limit the TOAs to ones in selected MJD range
    mjds_float = np.asarray([r[0] + r[1] for r in mjds])
    idx = (minmjd < mjds_float) & (mjds_float < maxmjd)
    mjds = mjds[idx]
    energies = energies[idx]
    if weightcolumn is not None:
        weights = weights[idx]

    if timesys == "TDB":
        log.info("Building barycentered TOAs")
        obs = "Barycenter"
        scale = "tdb"
        msg = "barycentric"
    elif (timesys == "TT") and (timeref == "LOCAL"):
        assert timesys == "TT"
        try:
            get_observatory(fermiobs)
        except KeyError:
            log.error(
                "%s observatory not defined. Make sure you have specified an FT2 file!"
                % fermiobs)
            raise
        obs = fermiobs
        scale = "tt"
        msg = "spacecraft local"
    elif (timesys == "TT") and (timeref == "GEOCENTRIC"):
        obs = "Geocenter"
        scale = "tt"
        msg = "geocentric"
    else:
        raise ValueError("Unrecognized TIMEREF/TIMESYS.")

    log.info("Building {0} TOAs, with MJDs in range {1} to {2}".format(
        msg, mjds[0, 0] + mjds[0, 1], mjds[-1, 0] + mjds[-1, 1]))
    if weightcolumn is None:
        toalist = [
            toa.TOA(m, obs=obs, scale=scale, energy=e, error=1.0 * u.us)
            for m, e in zip(mjds, energies)
        ]
    else:
        toalist = [
            toa.TOA(m,
                    obs=obs,
                    scale=scale,
                    energy=e,
                    weight=w,
                    error=1.0 * u.us)
            for m, e, w in zip(mjds, energies, weights)
        ]

    return toalist
コード例 #48
0
ファイル: ccddata.py プロジェクト: weaverba137/astropy
def fits_ccddata_reader(filename,
                        hdu=0,
                        unit=None,
                        hdu_uncertainty='UNCERT',
                        hdu_mask='MASK',
                        hdu_flags=None,
                        key_uncertainty_type='UTYPE',
                        **kwd):
    """
    Generate a CCDData object from a FITS file.

    Parameters
    ----------
    filename : str
        Name of fits file.

    hdu : int, str, tuple of (str, int), optional
        Index or other identifier of the Header Data Unit of the FITS
        file from which CCDData should be initialized. If zero and
        no data in the primary HDU, it will search for the first
        extension HDU with data. The header will be added to the primary HDU.
        Default is ``0``.

    unit : `~astropy.units.Unit`, optional
        Units of the image data. If this argument is provided and there is a
        unit for the image in the FITS header (the keyword ``BUNIT`` is used
        as the unit, if present), this argument is used for the unit.
        Default is ``None``.

    hdu_uncertainty : str or None, optional
        FITS extension from which the uncertainty should be initialized. If the
        extension does not exist the uncertainty of the CCDData is ``None``.
        Default is ``'UNCERT'``.

    hdu_mask : str or None, optional
        FITS extension from which the mask should be initialized. If the
        extension does not exist the mask of the CCDData is ``None``.
        Default is ``'MASK'``.

    hdu_flags : str or None, optional
        Currently not implemented.
        Default is ``None``.

    key_uncertainty_type : str, optional
        The header key name where the class name of the uncertainty  is stored
        in the hdu of the uncertainty (if any).
        Default is ``UTYPE``.

        .. versionadded:: 3.1

    kwd :
        Any additional keyword parameters are passed through to the FITS reader
        in :mod:`astropy.io.fits`; see Notes for additional discussion.

    Notes
    -----
    FITS files that contained scaled data (e.g. unsigned integer images) will
    be scaled and the keywords used to manage scaled data in
    :mod:`astropy.io.fits` are disabled.
    """
    unsupport_open_keywords = {
        'do_not_scale_image_data': 'Image data must be scaled.',
        'scale_back': 'Scale information is not preserved.'
    }
    for key, msg in unsupport_open_keywords.items():
        if key in kwd:
            prefix = f'unsupported keyword: {key}.'
            raise TypeError(' '.join([prefix, msg]))
    with fits.open(filename, **kwd) as hdus:
        hdr = hdus[hdu].header

        if hdu_uncertainty is not None and hdu_uncertainty in hdus:
            unc_hdu = hdus[hdu_uncertainty]
            stored_unc_name = unc_hdu.header.get(key_uncertainty_type, 'None')
            # For compatibility reasons the default is standard deviation
            # uncertainty because files could have been created before the
            # uncertainty type was stored in the header.
            unc_type = _unc_name_to_cls.get(stored_unc_name, StdDevUncertainty)
            uncertainty = unc_type(unc_hdu.data)
        else:
            uncertainty = None

        if hdu_mask is not None and hdu_mask in hdus:
            # Mask is saved as uint but we want it to be boolean.
            mask = hdus[hdu_mask].data.astype(np.bool_)
        else:
            mask = None

        if hdu_flags is not None and hdu_flags in hdus:
            raise NotImplementedError('loading flags is currently not '
                                      'supported.')

        # search for the first instance with data if
        # the primary header is empty.
        if hdu == 0 and hdus[hdu].data is None:
            for i in range(len(hdus)):
                if (hdus.info(hdu)[i][3] == 'ImageHDU'
                        and hdus.fileinfo(i)['datSpan'] > 0):
                    hdu = i
                    comb_hdr = hdus[hdu].header.copy()
                    # Add header values from the primary header that aren't
                    # present in the extension header.
                    comb_hdr.extend(hdr, unique=True)
                    hdr = comb_hdr
                    log.info(f"first HDU with data is extension {hdu}.")
                    break

        if 'bunit' in hdr:
            fits_unit_string = hdr['bunit']
            # patch to handle FITS files using ADU for the unit instead of the
            # standard version of 'adu'
            if fits_unit_string.strip().lower() == 'adu':
                fits_unit_string = fits_unit_string.lower()
        else:
            fits_unit_string = None

        if fits_unit_string:
            if unit is None:
                # Convert the BUNIT header keyword to a unit and if that's not
                # possible raise a meaningful error message.
                try:
                    kifus = CCDData.known_invalid_fits_unit_strings
                    if fits_unit_string in kifus:
                        fits_unit_string = kifus[fits_unit_string]
                    fits_unit_string = u.Unit(fits_unit_string)
                except ValueError:
                    raise ValueError(
                        'The Header value for the key BUNIT ({}) cannot be '
                        'interpreted as valid unit. To successfully read the '
                        'file as CCDData you can pass in a valid `unit` '
                        'argument explicitly or change the header of the FITS '
                        'file before reading it.'.format(fits_unit_string))
            else:
                log.info("using the unit {} passed to the FITS reader instead "
                         "of the unit {} in the FITS file.".format(
                             unit, fits_unit_string))

        use_unit = unit or fits_unit_string
        hdr, wcs = _generate_wcs_and_update_header(hdr)
        ccd_data = CCDData(hdus[hdu].data,
                           meta=hdr,
                           unit=use_unit,
                           mask=mask,
                           uncertainty=uncertainty,
                           wcs=wcs)

    return ccd_data
コード例 #49
0
    return spectra


if __name__ == "__main__":

    pl.ioff()
    pl.close(1)
    pl.figure(1).clf()

    radexfit = False  # not super useful...

    regs = (pyregion.open(regpath + 'spectral_apertures.reg') +
            pyregion.open(regpath + 'target_fields_8x8_gal.reg'))

    #regs = regs[:8]
    log.info(str({r.attr[1]['text']: r for r in regs}))

    name_column = table.Column(data=[
        reg.attr[1]['text'] for reg in regs
        for ii in range(pars[reg.attr[1]['text']]['ncomp'])
    ],
                               name='Source_Name')
    comp_id_column = table.Column(data=[0] * name_column.size,
                                  name='ComponentID')
    lon_column = table.Column(data=[
        reg.coord_list[0] for reg in regs
        for ii in range(pars[reg.attr[1]['text']]['ncomp'])
    ],
                              name='GLON')
    lat_column = table.Column(data=[
        reg.coord_list[1] for reg in regs
コード例 #50
0
    def retrieve_data(self, datasets, cache=True):
        """
        Retrieve a list of datasets form the ESO archive.

        Parameters
        ----------
        datasets : list of strings or string
            List of datasets strings to retrieve from the archive.
        cache : bool
            Cache the retrieval forms (not the data - they are downloaded
            independent of this keyword)

        Returns
        -------
        files : list of strings or string
            List of files that have been locally downloaded from the archive.

        Examples
        --------
        >>> dptbl = Eso.query_instrument('apex', pi_coi='ginsburg')
        >>> dpids = [row['DP.ID'] for row in dptbl if 'Map' in row['Object']]
        >>> files = Eso.retrieve_data(dpids)

        """
        datasets_to_download = []
        files = []

        if isinstance(datasets, six.string_types):
            return_list = False
            datasets = [datasets]
        else:
            return_list = True
        if not isinstance(datasets, (list, tuple, np.ndarray)):
            raise TypeError("Datasets must be given as a list of strings.")

        # First: Detect datasets already downloaded
        for dataset in datasets:
            if os.path.splitext(dataset)[1].lower() in ('.fits', '.tar'):
                local_filename = dataset
            else:
                local_filename = dataset + ".fits"

            if self.cache_location is not None:
                local_filename = os.path.join(self.cache_location,
                                              local_filename)
            if os.path.exists(local_filename):
                log.info("Found {0}.fits...".format(dataset))
                files.append(local_filename)
            elif os.path.exists(local_filename + ".Z"):
                log.info("Found {0}.fits.Z...".format(dataset))
                files.append(local_filename + ".Z")
            elif os.path.exists(local_filename + ".fz"):  # RICE-compressed
                log.info("Found {0}.fits.fz...".format(dataset))
                files.append(local_filename + ".fz")
            else:
                datasets_to_download.append(dataset)

        valid_datasets = [self.verify_data_exists(ds)
                          for ds in datasets_to_download]
        if not all(valid_datasets):
            invalid_datasets = [ds for ds, v in zip(datasets_to_download,
                                                    valid_datasets) if not v]
            raise ValueError("The following data sets were not found on the "
                             "ESO servers: {0}".format(invalid_datasets))

        # Second: Download the other datasets
        if datasets_to_download:
            if not self.authenticated():
                self.login()
            url = "http://archive.eso.org/cms/eso-data/eso-data-direct-retrieval.html"
            data_retrieval_form = self._request("GET", url, cache=cache)
            log.info("Staging request...")
            with suspend_cache(self):  # Never cache staging operations
                inputs = {"list_of_datasets": "\n".join(datasets_to_download)}
                data_confirmation_form = self._activate_form(
                    data_retrieval_form, form_index=-1, inputs=inputs)

                root = BeautifulSoup(data_confirmation_form.content,
                                     'html5lib')
                login_button = root.select('input[value=LOGIN]')
                if login_button:
                    raise LoginError("Not logged in. "
                                     "You must be logged in to download data.")

                # TODO: There may be another screen for Not Authorized; that
                # should be included too
                data_download_form = self._activate_form(
                    data_confirmation_form, form_index=-1)
                log.info("Staging form is at {0}."
                         .format(data_download_form.url))
                root = BeautifulSoup(data_download_form.content, 'html5lib')
                state = root.select('span[id=requestState]')[0].text
                t0 = time.time()
                while state not in ('COMPLETE', 'ERROR'):
                    time.sleep(2.0)
                    data_download_form = self._request("GET",
                                                       data_download_form.url,
                                                       cache=False)
                    root = BeautifulSoup(data_download_form.content,
                                         'html5lib')
                    state = root.select('span[id=requestState]')[0].text
                    print("{0:20.0f}s elapsed"
                          .format(time.time() - t0), end='\r')
                    sys.stdout.flush()
                if state == 'ERROR':
                    raise RemoteServiceError("There was a remote service "
                                             "error; perhaps the requested "
                                             "file could not be found?")
            log.info("Downloading files...")
            for fileId in root.select('input[name=fileId]'):
                fileLink = ("http://dataportal.eso.org/dataPortal" +
                            fileId.attrs['value'].split()[1])
                filename = self._request("GET", fileLink, save=True)
                files.append(system_tools.gunzip(filename))
        # Empty the redirect cache of this request session
        self._session.redirect_cache.clear()
        log.info("Done!")
        if (not return_list) and (len(files) == 1):
            files = files[0]
        return files
コード例 #51
0
def get_sampler(
    data_table=None,
    p0=None,
    model=None,
    prior=None,
    nwalkers=500,
    nburn=100,
    guess=True,
    interactive=False,
    prefit=False,
    labels=None,
    threads=4,
    data_sed=None,
):
    """Generate a new MCMC sampler.

    Parameters
    ----------
    data_table : `~astropy.table.Table` or list of `~astropy.table.Table`
        Table containing the observed spectrum. If multiple tables are passed
        as a string, they will be concatenated in the order given. Each table
        needs at least these columns, with the appropriate associated units
        (with the physical type indicated in brackets below) as either a
        `~astropy.units.Unit` instance or parseable string:

        - ``energy``: Observed photon energy [``energy``]
        - ``flux``: Observed fluxes [``flux`` or ``differential flux``]
        - ``flux_error``: 68% CL gaussian uncertainty of the flux [``flux`` or
          ``differential flux``]. It can also be provided as ``flux_error_lo``
          and ``flux_error_hi`` (see below).

        Optional columns:

        - ``energy_width``: Width of the energy bin [``energy``], or
        - ``energy_error``: Half-width of the energy bin [``energy``], or
        - ``energy_error_lo`` and ``energy_error_hi``: Distance from bin center
          to lower and upper bin edges [``energy``], or
        - ``energy_lo`` and ``energy_hi``: Energy edges of the corresponding
          energy bin [``energy``]
        - ``flux_error_lo`` and ``flux_error_hi``: 68% CL gaussian lower and
          upper uncertainties of the flux.
        - ``ul``: Flag to indicate that a flux measurement is an upper limit.
        - ``flux_ul``: Upper limit to the flux. If not present, the ``flux``
          column will be taken as an upper limit for those measurements with
          the ``ul`` flag set to True or 1.

        The ``keywords`` metadata field of the table can be used to provide the
        confidence level of the upper limits with the keyword ``cl``, which
        defaults to 90%. The `astropy.io.ascii` reader can recover all
        the needed information from ASCII tables in the
        :class:`~astropy.io.ascii.Ipac` and :class:`~astropy.io.ascii.Daophot`
        formats, and everything except the ``cl`` keyword from tables in the
        :class:`~astropy.io.ascii.Sextractor`.  For the latter format, the cl
        keyword can be added after reading the table with::

            data.meta['keywords']['cl']=0.99

    p0 : array
        Initial position vector. The distribution for the ``nwalkers`` walkers
        will be computed as a multidimensional gaussian of width 5% around the
        initial position vector ``p0``.
    model : function
        A function that takes a vector in the parameter space and the data
        dictionary, and returns the expected fluxes at the energies in the
        spectrum. Additional return objects will be saved as blobs in the
        sampler chain, see `the emcee documentation for the
        format
        <http://dan.iel.fm/emcee/current/user/advanced/#arbitrary-metadata-blobs>`_.
    prior : function, optional
        A function that takes a vector in the parameter space and returns the
        log-likelihood of the Bayesian prior. Parameter limits can be specified
        through a uniform prior, returning 0. if the vector is within the
        parameter bounds and ``-np.inf`` otherwise.
    nwalkers : int, optional
        The number of Goodman & Weare “walkers”. Default is 500.
    nburn : int, optional
        Number of burn-in steps. After ``nburn`` steps, the sampler is reset
        and chain history discarded. It is necessary to settle the sampler into
        the maximum of the parameter space density. Default is 100.
    labels : iterable of strings, optional
        Labels for the parameters included in the position vector ``p0``. If
        not provided ``['par1','par2', ... ,'parN']`` will be used.
    threads : int, optional
        Number of threads to use for sampling. Default is 4.
    guess : bool, optional
        Whether to attempt to guess the normalization (first) parameter of the
        model. Default is True.
    interactive : bool, optional
        Whether to launch the interactive fitting window to set the initial
        values for the prefitting or the MCMC run. Requires matplotlib. Default
        is False.
    prefit : bool, optional
        Whether to attempt to find the maximum likelihood parameters with a
        Nelder-Mead algorithm and use them as starting point of the MCMC run.
        The parameter values in `p0` will be used as starting points for the
        minimization. Note that the initial optimization is done without taking
        the prior function into account to avoid the possibility of infinite
        values in the objective function. If the best-fit parameter vector
        without prior is forbidden by the prior given, it will be discarded.
    data_sed : bool, optional
        When providing more than one data table, whether to convert them to SED
        format. If unset or None, all tables will be converted to the format of
        the first table.

    Returns
    -------
    sampler : :class:`~emcee.EnsembleSampler` instance
        Ensemble sampler with walker positions after ``nburn`` burn-in steps.
    pos : :class:`~numpy.ndarray`
        Final position vector array.

    See also
    --------
    emcee.EnsembleSampler
    """
    import emcee

    if data_table is None:
        raise TypeError("Data table is missing!")
    else:
        data = validate_data_table(data_table, sed=data_sed)

    if model is None:
        raise TypeError("Model function is missing!")

    # Add parameter labels if not provided or too short
    if labels is None:
        # First is normalization
        labels = ["norm"] + ["par{0}".format(i) for i in range(1, len(p0))]
    elif len(labels) < len(p0):
        labels += ["par{0}".format(i) for i in range(len(labels), len(p0))]

    # Check that the model returns fluxes in same physical type as data
    modelout = model(p0, data)
    if (type(modelout) == tuple or type(modelout) == list) and (
        type(modelout) != np.ndarray
    ):
        spec = modelout[0]
    else:
        spec = modelout

    # check whether both can be converted to same physical type through
    # sed_conversion
    try:
        # If both can be converted to differential flux, they can be compared
        # Otherwise, sed_conversion will raise a u.UnitsError
        sed_conversion(data["energy"], spec.unit, False)
        sed_conversion(data["energy"], data["flux"].unit, False)
    except u.UnitsError:
        raise u.UnitsError(
            "The physical type of the model and data units are not compatible,"
            " please modify your model or data so they match:\n"
            " Model units: {0} [{1}]\n Data units: {2} [{3}]\n".format(
                spec.unit,
                spec.unit.physical_type,
                data["flux"].unit,
                data["flux"].unit.physical_type,
            )
        )

    if guess:
        normNames = ["norm", "Norm", "ampl", "Ampl", "We", "Wp"]
        normNameslog = ["log({0}".format(name) for name in normNames]
        normNameslog10 = ["log10({0}".format(name) for name in normNames]
        normNames += normNameslog + normNameslog10
        idxs = []
        for l in normNames:
            for l2 in labels:
                if l2.startswith(l):
                    # check with startswith to include normalization,
                    # amplitude, etc.
                    idxs.append(labels.index(l2))

        if len(idxs) == 1:

            nunit, sedf = sed_conversion(data["energy"], spec.unit, False)
            currFlux = np.trapz(
                data["energy"] * (spec * sedf).to(nunit), data["energy"]
            )
            nunit, sedf = sed_conversion(
                data["energy"], data["flux"].unit, False
            )
            dataFlux = np.trapz(
                data["energy"] * (data["flux"] * sedf).to(nunit),
                data["energy"],
            )
            ratio = dataFlux / currFlux
            if labels[idxs[0]].startswith("log("):
                p0[idxs[0]] += np.log(ratio)
            elif labels[idxs[0]].startswith("log10("):
                p0[idxs[0]] += np.log10(ratio)
            else:
                p0[idxs[0]] *= ratio

        elif len(idxs) == 0:
            log.warning(
                "No label starting with [{0}] found: not applying"
                " normalization guess.".format(",".join(normNames))
            )
        elif len(idxs) > 1:
            log.warning(
                "More than one label starting with [{0}] found:"
                " not applying normalization guess.".format(
                    ",".join(normNames)
                )
            )

    P0_IS_ML = False
    if interactive:
        try:
            log.info("Launching interactive model fitter, close when finished")
            from .model_fitter import InteractiveModelFitter
            import matplotlib.pyplot as plt

            iprev = plt.rcParams["interactive"]
            plt.rcParams["interactive"] = False
            imf = InteractiveModelFitter(
                model, p0, data, labels=labels, sed=True
            )
            p0 = imf.pars
            P0_IS_ML = imf.P0_IS_ML
            plt.rcParams["interactive"] = iprev
        except ImportError as e:
            log.warning(
                "Interactive fitting is not available because"
                " matplotlib is not installed: {0}".format(e)
            )

    # If we already did the prefit call in ModelWidget (and didn't modify the
    # parameters afterwards), avoid doing it here
    if prefit and not P0_IS_ML:
        p0, P0_IS_ML = _prefit(p0, data, model, prior)

    sampler = emcee.EnsembleSampler(
        nwalkers, len(p0), lnprob, args=[data, model, prior], threads=threads
    )

    # Add data and parameters properties to sampler
    sampler.data_table = data_table
    sampler.data = data
    sampler.labels = labels
    # Add model function to sampler
    sampler.modelfn = model
    # Add run_info dict
    sampler.run_info = {
        "n_walkers": nwalkers,
        "n_burn": nburn,
        # convert from np.float to regular float
        "p0": [float(p) for p in p0],
        "guess": guess,
    }

    # Initialize walkers in a ball of relative size 0.5% in all dimensions if
    # the parameters have been fit to their ML values, or to 10% otherwise
    spread = 0.005 if P0_IS_ML else 0.1
    p0var = np.array([spread * pp for pp in p0])
    p0 = emcee.utils.sample_ball(p0, p0var, nwalkers)

    if nburn > 0:
        print(
            "Burning in the {0} walkers with {1} steps...".format(
                nwalkers, nburn
            )
        )
        sampler, pos = _run_mcmc(sampler, p0, nburn)
    else:
        pos = p0

    sampler.run_info["p0_burn_median"] = [
        float(p) for p in np.median(pos, axis=0)
    ]

    return sampler, pos
コード例 #52
0
def fit_a_spectrum(sp,
                   radexfit=False,
                   write=True,
                   vlimits=(-105, 125),
                   pars=pars):
    sp.plotter.autorefresh = False
    sp.plotter(figure=1)
    ncomp = pars[sp.specname]['ncomp']
    if ncomp == 0:
        log.info(
            "Skipping {0} - no velocity components detected.".format(ncomp))
        return
    returns = [ncomp]
    velos = pars[sp.specname]['velo']
    spname = sp.specname.replace(" ", "_")

    width_min = 1

    if 'width_max' in pars[sp.specname]:
        width_max = pars[sp.specname]['width_max']
    elif 'Map' in sp.specname or 'box' in sp.specname:
        width_max = 40
    else:
        width_max = 15

    sp.specfit.Registry.add_fitter('h2co_simple',
                                   simple_fitter2,
                                   6,
                                   multisingle='multi')
    guesses_simple = [
        x for ii in range(ncomp)
        for x in (sp.data.max(), velos[ii], 5, 0.5, 1.0, sp.data.max())
    ]

    if not (min(velos) > vlimits[0] and max(velos) < vlimits[1]):
        log.warn("A velocity guess {0} is outside limits {1}.".format(
            velos, vlimits))
        vlimits = (min(velos) - 25, max(velos) + 25)
        log.warn("Changing limits to {0}".format(vlimits))

    sp.specfit(
        fittype='h2co_simple',
        multifit=True,
        guesses=guesses_simple,
        limited=[(True, True)] * 6,
        limits=[(0, 20), vlimits, (width_min, width_max), (0, 1), (0.3, 1.1),
                (0, 1e5)],
    )
    sp.baseline(excludefit=True,
                subtract=True,
                highlight_fitregion=True,
                order=1)

    sp.plotter(clear=True)
    sp.specfit(
        fittype='h2co_simple',
        multifit=True,
        guesses=guesses_simple,
        limited=[(True, True)] * 6,
        limits=[(0, 20), vlimits, (width_min, width_max), (0, 1), (0.3, 1.1),
                (0, 1e5)],
    )

    returns.append(copy.copy(sp.specfit.parinfo))

    err = sp.error.mean()

    sp.plotter()
    sp.specfit.plot_fit(show_components=True)
    sp.specfit.annotate(fontsize=font_sizes[ncomp])
    sp.specfit.plotresiduals(axis=sp.plotter.axis,
                             yoffset=-err * 5,
                             clear=False,
                             color='#444444',
                             label=False)
    sp.plotter.axis.set_ylim(sp.plotter.ymin - err * 5, sp.plotter.ymax)
    sp.plotter.savefig(
        os.path.join(figurepath,
                     "simple/{0}_fit_4_lines_simple.pdf".format(spname)))
    if write:
        sp.write(mpath("spectra/{0}_spectrum.fits".format(spname)))

    # This will mess things up for the radexfit (maybe in a good way) but *cannot*
    # be done after the radexfit
    # Set the spectrum to be the fit residuals.  The linear baseline has
    # already been subtracted from both the data and the residuals
    linear_baseline = sp.baseline.basespec
    sp.baseline.unsubtract()
    fitted_residuals = sp.baseline.spectofit = sp.specfit.residuals
    sp.baseline.includemask[:] = True  # Select ALL residuals
    sp.baseline.fit(spline=True, order=3, spline_sampling=50)
    spline_baseline = sp.baseline.basespec
    sp.data -= spline_baseline + linear_baseline
    sp.baseline.subtracted = True
    sp.error[:] = sp.stats((218.5e9, 218.65e9))['std']
    sp.specfit(
        fittype='h2co_simple',
        multifit=True,
        guesses=guesses_simple,
        limited=[(True, True)] * 6,
        limits=[(0, 1e5), vlimits, (width_min, width_max), (0, 1), (0.3, 1.1),
                (0, 1e5)],
    )
    sp.plotter()
    sp.plotter.axis.plot(sp.xarr,
                         spline_baseline - err * 5,
                         color='orange',
                         alpha=0.75,
                         zorder=-1,
                         linewidth=2)
    sp.specfit.plot_fit(show_components=True)
    sp.specfit.annotate(fontsize=font_sizes[ncomp])
    sp.plotter.axis.plot(sp.xarr,
                         fitted_residuals - err * 5,
                         color="#444444",
                         linewidth=0.5,
                         drawstyle='steps-mid')
    #sp.specfit.plotresiduals(axis=sp.plotter.axis, yoffset=-err*5, clear=False,
    #                         color='#444444', label=False)
    sp.plotter.axis.set_ylim(sp.plotter.ymin - err * 5, sp.plotter.ymax)
    sp.plotter.savefig(
        os.path.join(
            figurepath,
            "simple/{0}_fit_4_lines_simple_splinebaselined.pdf".format(
                spname)))

    returns.append(copy.copy(sp.specfit.parinfo))

    if write:
        sp.write(mpath("spectra/{0}_spectrum_basesplined.fits".format(spname)))

    if radexfit:
        guesses = [
            x for ii in range(ncomp) for x in
            (100, 14, 4.5, sp.specfit.parinfo['VELOCITY{0}'.format(ii)].value,
             (sp.specfit.parinfo['WIDTH{0}'.format(ii)].value if
              (sp.specfit.parinfo['WIDTH{0}'.format(ii)].value < width_max
               and sp.specfit.parinfo['WIDTH{0}'.format(ii)].value > width_min
               ) else 5))
        ]

        sp.specfit.Registry.add_fitter('h2co_mm_radex',
                                       h2co_radex_fitter,
                                       5,
                                       multisingle='multi')
        sp.specfit(
            fittype='h2co_mm_radex',
            multifit=True,
            guesses=guesses,
            limits=[(10, 300), (11, 15), (3, 5.5), (-105, 125),
                    (width_min, width_max)] * ncomp,
            limited=[(True, True)] * 5 * ncomp,
            fixed=[False, False, False, True, True] * ncomp,
            quiet=False,
        )
        sp.plotter.savefig(
            os.path.join(figurepath,
                         "radex/{0}_fit_h2co_mm_radex.pdf".format(spname)))

        returns.append(copy.copy(sp.specfit.parinfo))

    return returns
コード例 #53
0
 def write_all_tpfs(self):
     """Produce TPF files for all targets in the cadence data."""
     target_ids = list(self.pixel_mapping.targets.keys())
     log.info("Writing {} Target Pixel Files.".format(len(target_ids)))
     ProgressBar.map(self.write_tpf, target_ids, multiprocess=True)
コード例 #54
0
def vst_pawplot(filename,
                out_fn=None,
                dpi=100,
                scale='log',
                min_cut=None,
                max_cut=None,
                min_percent=1.0,
                max_percent=99.5,
                cmap='gist_heat',
                show_hdu=False,
                normalize=False):
    """Plots the 32-CCD OmegaCam mosaic as a pretty bitmap.

    Parameters
    ----------
    filename : str
        The filename of the FITS file.

    out_fn : str
        The filename of the output bitmap image.  The type of bitmap
        is determined by the filename extension (e.g. '.jpg', '.png').
        The default is a JPG file with the same name as the FITS file.

    dpi : float, optional [dots per inch]
        Resolution of the output 10-by-9 inch output bitmap.
        The default is 100.

    scale : {{'linear', 'log'}}
        The scaling/stretch function to apply to the image.  The default
        is 'log'.

    min_cut : float, optional
        The pixel value of the minimum cut level.  Data values less than
        ``min_cut`` will set to ``min_cut`` before scaling the image.
        The default is the image minimum.  ``min_cut`` overrides
        ``min_percent``.

    max_cut : float, optional
        The pixel value of the maximum cut level.  Data values greater
        than ``min_cut`` will set to ``min_cut`` before scaling the
        image.  The default is the image maximum.  ``max_cut`` overrides
        ``max_percent``.

    min_percent : float, optional
        The percentile value used to determine the pixel value of
        minimum cut level.  The default is 1.0.

    max_percent : float, optional
        The percentile value used to determine the pixel value of
        maximum cut level.  The default is 99.5.

    cmap : str, optional
        The matplotlib color map name.  The default is 'gist_heat',
        can also be e.g. 'Greys_r'.

    show_hdu : boolean, optional
        Plot the HDU extension number if True (default: False).

    normalize : boolean, optional
        Divide each HDU by its median value before plotting (default: False).
    """
    # Check input
    if out_fn is None:
        out_fn = filename + '.jpg'
    log.info('Writing {0}'.format(out_fn))
    if cmap not in pl.cm.datad.keys():
        raise ValueError('{0} is not a valid matplotlib colormap '
                         'name'.format(cmap))
    # Determine the interval
    f = fits.open(filename)
    if min_cut is not None or max_cut is not None:
        vmin, vmax = min_cut or 0, max_cut or 65536
    else:
        # Determine vmin/vmax based on a sample of pixels across the mosaic
        sample = np.concatenate([
            f[hdu].data[::200, ::100]
            for hdu in [1, 6, 8, 12, 13, 20, 21, 23, 25, 27, 32]
        ])
        vmin, vmax = np.percentile(sample, [min_percent, max_percent])
        del sample  # save memory
    log.debug('vst_pawplot: vmin={0}, vmax={1}'.format(vmin, vmax))
    # Determine the stretch
    if scale == 'linear':
        normobj = Normalize(vmin=vmin, vmax=vmax)
    else:
        normobj = LogNorm(vmin=vmin, vmax=vmax)
    # Setup the figure and plot the extensions
    pl.interactive(False)
    fig = pl.figure(figsize=(10, 9))
    idx = 0
    for hduno in ProgressBar(OMEGACAM_CCD_ARRANGEMENT):
        idx += 1
        log.debug('vst_pawplot: adding HDU #{0}'.format(hduno))
        ax = fig.add_subplot(4, 8, idx)
        sampling = int(500 / dpi)
        data_to_show = f[hduno].data[::sampling, ::-sampling]
        if normalize:
            data_to_show = data_to_show / np.median(data_to_show)
        im = ax.matshow(data_to_show, norm=normobj, cmap=cmap, origin='lower')
        ax.set_xticks([])
        ax.set_yticks([])
        ax.axis('off')
        if show_hdu:
            # Show the HDU extension number on top of the image
            txt = ax.text(0.05,
                          0.97,
                          hduno,
                          fontsize=14,
                          color='white',
                          ha='left',
                          va='top',
                          transform=ax.transAxes)
            # Add a black border around the text
            txt.set_path_effects([
                path_effects.Stroke(linewidth=2, foreground='black'),
                path_effects.Normal()
            ])
    # Aesthetics and colorbar
    fig.subplots_adjust(left=0.04,
                        right=0.85,
                        top=0.93,
                        bottom=0.05,
                        wspace=0.02,
                        hspace=0.02)
    cbar_ax = fig.add_axes([0.9, 0.06, 0.02, 0.86])
    t = np.logspace(np.log10(vmin), np.log10(vmax), num=10)
    if (vmax - vmin) > 10:
        fmt = '%.0f'
    elif (vmax - vmin) > 1:
        fmt = '%.1f'
    else:
        fmt = '%.2f'
    cb = fig.colorbar(im, cax=cbar_ax, ticks=t, format=fmt)
    cb.set_label('Pixel count [ADU]')
    # Title and footer text
    fig.text(0.05, 0.95, filename, fontsize=16, ha='left')
    try:
        filterfooter = '{0} ({1}/{2})'.format(
            f[0].header['ESO INS FILT1 NAME'], f[0].header['ESO TPL EXPNO'],
            f[0].header['ESO TPL NEXP'])
        fig.text(0.04, 0.02, f[0].header['OBJECT'], fontsize=12, ha='left')
        fig.text(0.50, 0.02, filterfooter, fontsize=12, ha='right')
        fig.text(0.85,
                 0.02,
                 f[0].header['DATE-OBS'][0:19],
                 fontsize=12,
                 ha='right')
    except KeyError as e:
        log.warning('Could not write footer text: {0}'.format(e))
        pass
    # Save output and clean up
    fig.savefig(out_fn, dpi=dpi)
    pl.close()
    del f
コード例 #55
0
    def download_data(self, observation_id, *, filename=None, verbose=False,
                      **kwargs):
        """
        Download data from XMM-Newton

        Parameters
        ----------
        observation_id : string
            id of the observation to be downloaded, mandatory
            The identifier of the observation we want to retrieve, 10 digits
            example: 0144090201
        filename : string
            file name to be used to store the file
        verbose : bool
            optional, default 'False'
            flag to display information about the process
        level : string
            level to download, optional, by default everything is downloaded
            values: ODF, PPS
        instname : string
            instrument name, optional, two characters, by default everything
            values: OM, R1, R2, M1, M2, PN
        instmode : string
            instrument mode, optional
            examples: Fast, FlatFieldLow, Image, PrimeFullWindow
        filter : string
            filter, optional
            examples: Closed, Open, Thick, UVM2, UVW1, UVW2, V
        expflag : string
            exposure flag, optional, by default everything
            values: S, U, X(not applicable)
        expno : integer
            exposure number with 3 digits, by default all exposures
            examples: 001, 003
        name : string
            product type, optional, 6 characters, by default all product types
            examples: 3COLIM, ATTTSR, EVENLI, SBSPEC, EXPMAP, SRCARF
        datasubsetno : character
            data subset number, optional, by default all
            examples: 0, 1
        sourceno : hex value
            source number, optional, by default all sources
            example: 00A, 021, 001
        extension : string
            file format, optional, by default all formats
            values: ASC, ASZ, FTZ, HTM, IND, PDF, PNG


        Returns
        -------
        None if not verbose. It downloads the observation indicated
        If verbose returns the filename
        """

        link = self.data_aio_url + "obsno=" + observation_id

        link = link + "".join("&{0}={1}".format(key, val)
                              for key, val in kwargs.items())

        if verbose:
            log.info(link)

        response = self._request('GET', link, save=False, cache=True)

        # Get original extension
        _, params = cgi.parse_header(response.headers['Content-Disposition'])
        r_filename = params["filename"]
        suffixes = Path(r_filename).suffixes

        if filename is None:
            filename = observation_id

        filename += "".join(suffixes)

        log.info("Copying file to {0}...".format(filename))
        with open(filename, 'wb') as f:
            f.write(response.content)

        if verbose:
            log.info("Wrote {0} to {1}".format(link, filename))
コード例 #56
0
def _prefit(p0, data, model, prior):
    P0_IS_ML = False
    from .extern.minimize import minimize

    def flat_prior(*args):
        return 0.0

    if prior is None:
        prior = flat_prior

    def nll(*args):
        return -lnprob(*args)[0]

    log.info(
        "Finding Maximum Likelihood parameters through Nelder-Mead fitting..."
    )
    log.info("   Initial parameters: {0}".format(p0))
    log.info(
        "   Initial lnprob(p0): {0:.3f}".format(-nll(p0, data, model, prior))
    )
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        result = minimize(
            nll,
            p0,
            args=(data, model, flat_prior),
            method="Nelder-Mead",
            options={"maxfev": 500, "xtol": 1e-1, "ftol": 1e-3},
        )
        ll_prior = lnprob(result["x"], data, model, prior)[0]

    if (result["success"] or result["status"] == 1) and not np.isinf(ll_prior):
        # also keep result if we have reached maxiter, it is likely
        # better than p0
        if result["status"] == 1:
            log.info("   Maximum number of function evaluations reached!")
        if result["status"] == 1:
            log.info("      New parameters : {0}".format(result["x"]))
        else:
            log.info("   New ML parameters : {0}".format(result["x"]))
            P0_IS_ML = True
        if -result["fun"] == ll_prior:
            log.info("   Maximum lnprob(p0): {0:.3f}".format(-result["fun"]))
        else:
            log.info("flat prior lnprob(p0): {0:.3f}".format(-result["fun"]))
            log.info("full prior lnprob(p0): {0:.3f}".format(ll_prior))
        p0 = result["x"]
    elif np.isinf(ll_prior):
        log.warning(
            "Maximum Likelihood procedure converged on a parameter"
            " vector forbidden by prior,"
            " using original parameters for MCMC"
        )
    else:
        log.warning(
            "Maximum Likelihood procedure failed to converge,"
            " using original parameters for MCMC"
        )
    return p0, P0_IS_ML
コード例 #57
0
ファイル: core.py プロジェクト: stargaser/astroquery
    def query_criteria(self,
                       calibration_level=None,
                       data_product_type=None,
                       intent=None,
                       obs_collection=None,
                       instrument_name=None,
                       filters=None,
                       async_job=True,
                       output_file=None,
                       output_format="votable",
                       verbose=False,
                       get_query=False):
        """
        Launches a synchronous or asynchronous job to query the HST tap
        using calibration level, data product type, intent, collection,
        instrument name, and filters as criteria to create and execute the
        associated query.

        Parameters
        ----------
        calibration_level : str or int, optional
            The identifier of the data reduction/processing applied to the
            data. RAW (1), CALIBRATED (2), PRODUCT (3) or AUXILIARY (0)
        data_product_type : str, optional
            High level description of the product.
            image, spectrum or timeseries.
        intent : str, optional
            The intent of the original observer in acquiring this observation.
            SCIENCE or CALIBRATION
        collection : list of str, optional
            List of collections that are available in eHST catalogue.
            HLA, HST
        instrument_name : list of str, optional
            Name(s) of the instrument(s) used to generate the dataset
        filters : list of str, optional
            Name(s) of the filter(s) used to generate the dataset
        async_job : bool, optional, default 'True'
            executes the query (job) in asynchronous/synchronous mode (default
            synchronous)
        output_file : str, optional, default None
            file name where the results are saved if dumpToFile is True.
            If this parameter is not provided, the jobid is used instead
        output_format : str, optional, default 'votable'
            results format
        verbose : bool, optional, default 'False'
            flag to display information about the process
        get_query : bool, optional, default 'False'
            flag to return the query associated to the criteria as the result
            of this function.

        Returns
        -------
        A table object
        """

        parameters = []
        if calibration_level is not None:
            parameters.append("p.calibration_level LIKE '%{}%'".format(
                self.__get_calibration_level(calibration_level)))
        if data_product_type is not None:
            if isinstance(data_product_type, str):
                parameters.append("p.data_product_type LIKE '%{}%'".format(
                    data_product_type))
            else:
                raise ValueError("data_product_type must be a string")
        if intent is not None:
            if isinstance(intent, str):
                parameters.append("o.intent LIKE '%{}%'".format(intent))
            else:
                raise ValueError("intent must be a string")
        if self.__check_list_strings(obs_collection):
            parameters.append("(o.collection LIKE '%{}%')".format(
                "%' OR o.collection LIKE '%".join(obs_collection)))
        if self.__check_list_strings(instrument_name):
            parameters.append("(o.instrument_name LIKE '%{}%')".format(
                "%' OR o.instrument_name LIKE '%".join(instrument_name)))
        if self.__check_list_strings(filters):
            parameters.append(
                "(o.instrument_configuration LIKE '%{}%')".format(
                    "%' OR o.instrument_configuration "
                    "LIKE '%".join(filters)))
        query = "select o.*, p.calibration_level, p.data_product_type "\
                "from ehst.observation AS o LEFT JOIN ehst.plane as p "\
                "on o.observation_uuid=p.observation_uuid"
        if parameters:
            query += " where({})".format(" AND ".join(parameters))
        table = self.query_hst_tap(query=query,
                                   async_job=async_job,
                                   output_file=output_file,
                                   output_format=output_format,
                                   verbose=verbose)
        if verbose:
            log.info(query)
        if get_query:
            return query
        return table
コード例 #58
0
def _report_at_exit():
    if _track:
        log.info('Thank you for using sbpy.  ' +
                 'Your session results were based on:\n' +
                 to_text())
コード例 #59
0
                            drawstyle='steps-mid',
                            linewidth=2,
                            alpha=0.75)

                    pl.axis(axlims)
                    pl.xlabel("Frequency")
                    pl.ylabel("Flux [Jy/beam]")
                    pl.title(os.path.split(specname)[-1].replace(".fits", ""))
                    pl.savefig(
                        basepath /
                        f'imaging_results/spectra/pngs/{field}_B{band}_spw{spw}_{"12M" if "12M" in config else "7M"}_robust{robust}_lines.image_mean.coverage.png',
                        bbox_inches='tight')

                if muid == 'member.uid___A001_X1296_X127':
                    assert not np.isnan(included_bw[band][spw][field][config])
                    log.info(f"Success for muid {muid}")

            if 1 in included_bw[3]:
                if 'G012.80' in included_bw[3][1]:
                    assert included_bw[3][1]['G012.80']['12Mlong'] is not None

        # insanity checks
        if 6 in included_bw:
            assert not np.isnan(included_bw[6][0]['W43-MM3']['12Mshort'])

        #assert frqmask.sum() > 0

        #if band == 6:
        #    # W41-MM1 B6 doesn't exist
        #    assert not np.any(frqmask[10,:])
コード例 #60
0
ファイル: combiner.py プロジェクト: aaryapatil/ccdproc
def combine(img_list,
            output_file=None,
            method='average',
            weights=None,
            scale=None,
            mem_limit=16e9,
            clip_extrema=False,
            nlow=1,
            nhigh=1,
            minmax_clip=False,
            minmax_clip_min=None,
            minmax_clip_max=None,
            sigma_clip=False,
            sigma_clip_low_thresh=3,
            sigma_clip_high_thresh=3,
            sigma_clip_func=ma.mean,
            sigma_clip_dev_func=ma.std,
            dtype=None,
            combine_uncertainty_function=None,
            **ccdkwargs):
    """
    Convenience function for combining multiple images.

    Parameters
    -----------
    img_list : list or str
        A list of fits filenames or `~ccdproc.CCDData` objects that will be
        combined together. Or a string of fits filenames separated by comma
        ",".

    output_file : str or None, optional
        Optional output fits file-name to which the final output can be
        directly written.
        Default is ``None``.

    method : str, optional
        Method to combine images:

        - ``'average'`` : To combine by calculating the average.
        - ``'median'`` : To combine by calculating the median.

        Default is ``'average'``.

    weights : `numpy.ndarray` or None, optional
        Weights to be used when combining images.
        An array with the weight values. The dimensions should match the
        the dimensions of the data arrays being combined.
        Default is ``None``.

    scale : function or `numpy.ndarray`-like or None, optional
        Scaling factor to be used when combining images.
        Images are multiplied by scaling prior to combining them. Scaling
        may be either a function, which will be applied to each image
        to determine the scaling factor, or a list or array whose length
        is the number of images in the `Combiner`. Default is ``None``.

    mem_limit : float, optional
        Maximum memory which should be used while combining (in bytes).
        Default is ``16e9``.

    clip_extrema : bool, optional
        Set to True if you want to mask pixels using an IRAF-like minmax
        clipping algorithm.  The algorithm will mask the lowest nlow values and
        the highest nhigh values before combining the values to make up a
        single pixel in the resulting image.  For example, the image will be a
        combination of Nimages-low-nhigh pixel values instead of the
        combination of Nimages.

        Parameters below are valid only when clip_extrema is set to True,
        see :meth:`Combiner.clip_extrema` for the parameter description:

        - ``nlow`` : int or None, optional
        - ``nhigh`` : int or None, optional


    minmax_clip : bool, optional
        Set to True if you want to mask all pixels that are below
        minmax_clip_min or above minmax_clip_max before combining.
        Default is ``False``.

        Parameters below are valid only when minmax_clip is set to True, see
        :meth:`Combiner.minmax_clipping` for the parameter description:

        - ``minmax_clip_min`` : float or None, optional
        - ``minmax_clip_max`` : float or None, optional

    sigma_clip : bool, optional
        Set to True if you want to reject pixels which have deviations greater
        than those
        set by the threshold values. The algorithm will first calculated
        a baseline value using the function specified in func and deviation
        based on sigma_clip_dev_func and the input data array. Any pixel with
        a deviation from the baseline value greater than that set by
        sigma_clip_high_thresh or lower than that set by sigma_clip_low_thresh
        will be rejected.
        Default is ``False``.

        Parameters below are valid only when sigma_clip is set to True. See
        :meth:`Combiner.sigma_clipping` for the parameter description.

        - ``sigma_clip_low_thresh`` : positive float or None, optional
        - ``sigma_clip_high_thresh`` : positive float or None, optional
        - ``sigma_clip_func`` : function, optional
        - ``sigma_clip_dev_func`` : function, optional

    dtype : str or `numpy.dtype` or None, optional
        The intermediate and resulting ``dtype`` for the combined CCDs. See
        `ccdproc.Combiner`. If ``None`` this is set to ``float64``.
        Default is ``None``.

    combine_uncertainty_function : callable, None, optional
        If ``None`` use the default uncertainty func when using average or
        median combine, otherwise use the function provided.
        Default is ``None``.

    ccdkwargs : Other keyword arguments for `ccdproc.fits_ccddata_reader`.

    Returns
    -------
    combined_image : `~ccdproc.CCDData`
        CCDData object based on the combined input of CCDData objects.
    """
    if not isinstance(img_list, list):
        # If not a list, check whether it is a string of filenames separated
        # by comma
        if isinstance(img_list, str) and (',' in img_list):
            img_list = img_list.split(',')
        else:
            raise ValueError(
                "unrecognised input for list of images to combine.")

    # Select Combine function to call in Combiner
    if method == 'average':
        combine_function = 'average_combine'
    elif method == 'median':
        combine_function = 'median_combine'
    else:
        raise ValueError("unrecognised combine method : {0}.".format(method))

    # First we create a CCDObject from first image for storing output
    if isinstance(img_list[0], CCDData):
        ccd = img_list[0].copy()
    else:
        # User has provided fits filenames to read from
        ccd = CCDData.read(img_list[0], **ccdkwargs)

    # If uncertainty_func is given for combine this will create an uncertainty
    # even if the originals did not have one. In that case we need to create
    # an empty placeholder.
    if ccd.uncertainty is None and combine_uncertainty_function is not None:
        ccd.uncertainty = StdDevUncertainty(np.zeros(ccd.data.shape))

    if dtype is None:
        dtype = np.float64

    # Convert the master image to the appropriate dtype so when overwriting it
    # later the data is not downcast and the memory consumption calculation
    # uses the internally used dtype instead of the original dtype. #391
    if ccd.data.dtype != dtype:
        ccd.data = ccd.data.astype(dtype)

    size_of_an_img = ccd.data.nbytes
    try:
        size_of_an_img += ccd.uncertainty.array.nbytes
    # In case uncertainty is None it has no "array" and in case the "array" is
    # not a numpy array:
    except AttributeError:
        pass
    # Mask is enforced to be a numpy.array across astropy versions
    if ccd.mask is not None:
        size_of_an_img += ccd.mask.nbytes
    # flags is not necessarily a numpy array so do not fail with an
    # AttributeError in case something was set!
    # TODO: Flags are not taken into account in Combiner. This number is added
    #       nevertheless for future compatibility.
    try:
        size_of_an_img += ccd.flags.nbytes
    except AttributeError:
        pass

    no_of_img = len(img_list)

    # determine the number of chunks to split the images into
    no_chunks = int((size_of_an_img * no_of_img) / mem_limit) + 1
    if no_chunks > 1:
        log.info('splitting each image into {0} chunks to limit memory usage '
                 'to {1} bytes.'.format(no_chunks, mem_limit))
    xs, ys = ccd.data.shape
    # First we try to split only along fast x axis
    xstep = max(1, int(xs / no_chunks))
    # If more chunks need to be created we split in Y axis for remaining number
    # of chunks
    ystep = max(1, int(ys / (1 + no_chunks - int(xs / xstep))))

    # Dictionary of Combiner properties to set and methods to call before
    # combining
    to_set_in_combiner = {}
    to_call_in_combiner = {}

    # Define all the Combiner properties one wants to apply before combining
    # images
    if weights is not None:
        to_set_in_combiner['weights'] = weights

    if scale is not None:
        # If the scale is a function, then scaling function need to be applied
        # on full image to obtain scaling factor and create an array instead.
        if callable(scale):
            scalevalues = []
            for image in img_list:
                if isinstance(image, CCDData):
                    imgccd = image
                else:
                    imgccd = CCDData.read(image, **ccdkwargs)

                scalevalues.append(scale(imgccd.data))

            to_set_in_combiner['scaling'] = np.array(scalevalues)
        else:
            to_set_in_combiner['scaling'] = scale

    if clip_extrema:
        to_call_in_combiner['clip_extrema'] = {'nlow': nlow, 'nhigh': nhigh}

    if minmax_clip:
        to_call_in_combiner['minmax_clipping'] = {
            'min_clip': minmax_clip_min,
            'max_clip': minmax_clip_max
        }

    if sigma_clip:
        to_call_in_combiner['sigma_clipping'] = {
            'low_thresh': sigma_clip_low_thresh,
            'high_thresh': sigma_clip_high_thresh,
            'func': sigma_clip_func,
            'dev_func': sigma_clip_dev_func
        }

    # Finally Run the input method on all the subsections of the image
    # and write final stitched image to ccd
    for x in range(0, xs, xstep):
        for y in range(0, ys, ystep):
            xend, yend = min(xs, x + xstep), min(ys, y + ystep)
            ccd_list = []
            for image in img_list:
                if isinstance(image, CCDData):
                    imgccd = image
                else:
                    imgccd = CCDData.read(image, **ccdkwargs)

                # Trim image
                ccd_list.append(imgccd[x:xend, y:yend])

            # Create Combiner for tile
            tile_combiner = Combiner(ccd_list, dtype=dtype)
            # Set all properties and call all methods
            for to_set in to_set_in_combiner:
                setattr(tile_combiner, to_set, to_set_in_combiner[to_set])
            for to_call in to_call_in_combiner:
                getattr(tile_combiner, to_call)(**to_call_in_combiner[to_call])

            # Finally call the combine algorithm
            combine_kwds = {}
            if combine_uncertainty_function is not None:
                combine_kwds['uncertainty_func'] = combine_uncertainty_function

            comb_tile = getattr(tile_combiner,
                                combine_function)(**combine_kwds)

            # add it back into the master image
            ccd.data[x:xend, y:yend] = comb_tile.data
            if ccd.mask is not None:
                ccd.mask[x:xend, y:yend] = comb_tile.mask
            if ccd.uncertainty is not None:
                ccd.uncertainty.array[x:xend,
                                      y:yend] = comb_tile.uncertainty.array

    # Write fits file if filename was provided
    if output_file is not None:
        ccd.write(output_file)

    return ccd