Exemplo n.º 1
0
    def frequency(self, f):
        """
        Find the most significant frequency of a (complex) time series, :math:`f(t)`,
        by Fourier transforming the function convolved with a Hanning filter and
        picking the most significant peak. This assumes the time series, `f`,
        is aligned with / given at the times specified when constructing this
        object. An internal function.

        Parameters
        ----------
        f : array_like
            Complex time-series, :math:`q(t) + i p(t)`.

        Returns
        -------
        freq : numeric
            The strongest frequency in the specified complex time series, ``f``.

        """

        if len(f) != self.n:
            logger.warning(
                "Truncating time series to match shape of time array ({0}) ({1})"
                .format(len(f), self.n))
            f = f[:self.n]

        # take Fourier transform of input (complex) function f
        # if HAS_PYFFTW:
        #     _f = pyfftw.n_byte_align_empty(f.size, 16, 'complex128')
        #     _f[:] = f

        #     fft_obj = pyfftw.builders.fft(f, overwrite_input=True,
        #                                   planner_effort='FFTW_ESTIMATE')
        #     fff = fft_obj() / np.sqrt(self.n)
        # else:
        t1 = time.time()
        fff = fft(f) / np.sqrt(self.n)
        logger.log(0, "Took {} seconds to FFT.".format(time.time() - t1))

        # frequencies
        omegas = 2 * np.pi * fftfreq(f.size, self.dt)

        # wmax is just an initial guess for optimization
        xyf = np.abs(fff)
        wmax = xyf.argmax()
        if np.allclose(xyf[wmax], 0):
            # return early -- "this may be an axial or planar orbit"
            logger.log(0, "Returning early - may be an axial or planar orbit?")
            return 0.

        # real and complex part of input time series
        Re_f = f.real.copy()
        Im_f = f.imag.copy()

        # frequency associated with the peak index
        omega0 = omegas[wmax]

        freq = naff_frequency(omega0, self.tz, self.chi, Re_f, Im_f, self.T)
        return freq
Exemplo n.º 2
0
Arquivo: naff.py Projeto: abonaca/gary
    def frequency(self, f):
        """
        Find the most significant frequency of a (complex) time series, :math:`f(t)`,
        by Fourier transforming the function convolved with a Hanning filter and
        picking the most significant peak. This assumes the time series, `f`,
        is aligned with / given at the times specified when constructing this
        object. An internal function.

        Parameters
        ----------
        f : array_like
            Complex time-series, :math:`q(t) + i p(t)`.

        Returns
        -------
        freq : numeric
            The strongest frequency in the specified complex time series, ``f``.

        """

        if len(f) != self.n:
            logger.warning("Truncating time series to match shape of time array ({0}) ({1})"
                           .format(len(f), self.n))
            f = f[:self.n]

        # take Fourier transform of input (complex) function f
        # if HAS_PYFFTW:
        #     _f = pyfftw.n_byte_align_empty(f.size, 16, 'complex128')
        #     _f[:] = f

        #     fft_obj = pyfftw.builders.fft(f, overwrite_input=True,
        #                                   planner_effort='FFTW_ESTIMATE')
        #     fff = fft_obj() / np.sqrt(self.n)
        # else:
        t1 = time.time()
        fff = fft(f) / np.sqrt(self.n)
        logger.log(0, "Took {} seconds to FFT.".format(time.time()-t1))

        # frequencies
        omegas = 2*np.pi*fftfreq(f.size, self.dt)

        # wmax is just an initial guess for optimization
        xyf = np.abs(fff)
        wmax = xyf.argmax()
        if np.allclose(xyf[wmax], 0):
            # return early -- "this may be an axial or planar orbit"
            logger.log(0, "Returning early - may be an axial or planar orbit?")
            return 0.

        # real and complex part of input time series
        Re_f = f.real.copy()
        Im_f = f.imag.copy()

        # frequency associated with the peak index
        omega0 = omegas[wmax]

        freq = naff_frequency(omega0, self.tz, self.chi, Re_f, Im_f, self.T)
        return freq
Exemplo n.º 3
0
    def n_modelfunc(self, pars=None, debug=False, **kwargs):
        """
        Simple wrapper to deal with N independent peaks for a given spectral model
        """
        if pars is None:
            pars = self.parinfo
        elif not isinstance(pars, ParinfoList):
            try:
                partemp = copy.copy(self.parinfo)
                partemp._from_Parameters(pars)
                pars = partemp
            except AttributeError:
                log.log(5, "Reading pars {0} as LMPar failed.".format(pars))
                if debug > 1:
                    import pdb
                    pdb.set_trace()
        if hasattr(pars, 'values'):
            # important to treat as Dictionary, since lmfit params & parinfo both have .items
            parnames, parvals = list(zip(*list(pars.items())))
            parnames = [p.lower() for p in parnames]
            parvals = [p.value for p in parvals]
        else:
            parvals = list(pars)

        if np.any(np.isnan(parvals)):
            raise ValueError("A parameter is NaN.  Unless you gave a NaN "
                             "value directly, this is a bug and should be "
                             "reported.  If you specified a NaN parameter, "
                             "don't do that.")

        log.debug("pars to n_modelfunc: {0}, parvals:{1}".format(
            pars, parvals))

        def L(x):
            v = np.zeros(len(x))
            if self.vheight:
                v += parvals[0]
            # use len(pars) instead of self.npeaks because we want this to work
            # independent of the current best fit
            for jj in range(int((len(parvals) - self.vheight) / self.npars)):
                lower_parind = jj * self.npars + self.vheight
                upper_parind = (jj + 1) * self.npars + self.vheight
                v += self.modelfunc(x, *parvals[lower_parind:upper_parind],
                                    **kwargs)
            return v

        return L
Exemplo n.º 4
0
    def n_modelfunc(self, pars=None, debug=False, **kwargs):
        """
        Simple wrapper to deal with N independent peaks for a given spectral model
        """
        if pars is None:
            pars = self.parinfo
        elif not isinstance(pars, ParinfoList):
            try:
                partemp = copy.copy(self.parinfo)
                partemp._from_Parameters(pars)
                pars = partemp
            except AttributeError:
                log.log(5, "Reading pars {0} as LMPar failed.".format(pars))
                if debug > 1:
                    import pdb; pdb.set_trace()
        if hasattr(pars,'values'):
            # important to treat as Dictionary, since lmfit params & parinfo both have .items
            parnames,parvals = list(zip(*list(pars.items())))
            parnames = [p.lower() for p in parnames]
            parvals = [p.value for p in parvals]
        else:
            parvals = list(pars)

        if np.any(np.isnan(parvals)):
            raise ValueError("A parameter is NaN.  Unless you gave a NaN "
                             "value directly, this is a bug and should be "
                             "reported.  If you specified a NaN parameter, "
                             "don't do that.")

        log.debug("pars to n_modelfunc: {0}, parvals:{1}".format(pars, parvals))
        def L(x):
            v = np.zeros(len(x))
            if self.vheight:
                v += parvals[0]
            # use len(pars) instead of self.npeaks because we want this to work
            # independent of the current best fit
            for jj in range(int((len(parvals)-self.vheight)/self.npars)):
                lower_parind = jj*self.npars+self.vheight
                upper_parind = (jj+1)*self.npars+self.vheight
                v += self.modelfunc(x, *parvals[lower_parind:upper_parind], **kwargs)
            return v
        return L
Exemplo n.º 5
0
    def _parse_staging_request_page(self, data_list_page):
        """
        Parse pages like this one:
        https://almascience.eso.org/rh/requests/anonymous/786572566

        that include links to data sets that have been requested and staged

        Parameters
        ----------
        data_list_page : requests.Response object

        """

        root = BeautifulSoup(data_list_page.content, 'html5lib')

        # for link in root.findAll('a'):
        #    if 'script.sh' in link.text:
        #        download_script_url = urljoin(self.dataarchive_url,
        #                                      link['href'])
        # if 'download_script_url' not in locals():
        #    raise RemoteServiceError("No download links were found.")

        # download_script = self._request('GET', download_script_url,
        #                                cache=False)
        # download_script_target_urls = []
        # for line in download_script.text.split('\n'):
        #    if line and line.split() and line.split()[0] == 'wget':
        #        download_script_target_urls.append(line.split()[1].strip('"'))

        # if len(download_script_target_urls) == 0:
        #    raise RemoteServiceError("There was an error parsing the download"
        #                             " script; it is empty.  "
        #                             "You can access the download script "
        #                             "directly from this URL: "
        #                             "{0}".format(download_script_url))

        data_table = root.findAll('table', class_='list', id='report')[0]
        columns = {'uid': [], 'URL': [], 'size': []}
        for tr in data_table.findAll('tr'):
            tds = tr.findAll('td')

            # Cannot check class if it is not defined
            cl = 'class' in tr.attrs

            if (len(tds) > 1 and 'uid' in tds[0].text and
                    (cl and 'Level' in tr['class'][0])):
                # New Style
                text = tds[0].text.strip().split()
                if text[0] in ('Asdm', 'Member'):
                    uid = text[-1]
            elif len(tds) > 1 and 'uid' in tds[1].text:
                # Old Style
                uid = tds[1].text.strip()
            elif cl and tr['class'] == 'Level_1':
                raise ValueError("Heading was found when parsing the download "
                                 "page but it was not parsed correctly")

            if len(tds) > 3 and (cl and tr['class'][0] == 'fileRow'):
                # New Style
                size, unit = re.search('(-|[0-9\.]*)([A-Za-z]*)',
                                       tds[2].text).groups()
                href = tds[1].find('a')
                if size == '':
                    # this is a header row
                    continue
                authorized = ('access_authorized.png' in
                              tds[3].findChild('img')['src'])
                if authorized:
                    columns['uid'].append(uid)
                    if href and 'href' in href.attrs:
                        columns['URL'].append(href.attrs['href'])
                    else:
                        columns['URL'].append('None_Found')
                    unit = (u.Unit(unit) if unit in ('GB', 'MB')
                            else u.Unit('kB') if 'kb' in unit.lower()
                            else 1)
                    try:
                        columns['size'].append(float(size) * u.Unit(unit))
                    except ValueError:
                        # size is probably a string?
                        columns['size'].append(-1 * u.byte)
                    log.log(level=5, msg="Found a new-style entry.  "
                            "size={0} uid={1} url={2}"
                            .format(size, uid, columns['URL'][-1]))
                else:
                    log.warn("Access to {0} is not authorized.".format(uid))
            elif len(tds) > 3 and tds[2].find('a'):
                # Old Style
                href = tds[2].find('a')
                size, unit = re.search('([0-9\.]*)([A-Za-z]*)',
                                       tds[3].text).groups()
                columns['uid'].append(uid)
                columns['URL'].append(href.attrs['href'])
                unit = (u.Unit(unit) if unit in ('GB', 'MB')
                        else u.Unit('kB') if 'kb' in unit.lower()
                        else 1)
                columns['size'].append(float(size) * u.Unit(unit))
                log.log(level=5, msg="Found an old-style entry.  "
                        "size={0} uid={1} url={2}".format(size, uid,
                                                          columns['URL'][-1]))

        columns['size'] = u.Quantity(columns['size'], u.Gbyte)

        if len(columns['uid']) == 0:
            raise RemoteServiceError(
                "No valid UIDs were found in the staged data table. "
                "Please include {0} in a bug report."
                .format(self._staging_log['data_list_url']))

        # if len(download_script_target_urls) != len(columns['URL']):
        #    log.warn("There was an error parsing the data staging page.  "
        #             "The results from the page and the download script "
        #             "differ.  You can access the download script directly "
        #             "from this URL: {0}".format(download_script_url))
        # else:
        #    bad_urls = []
        #    for (rurl,url) in (zip(columns['URL'],
        #                           download_script_target_urls)):
        #        if rurl == 'None_Found':
        #            url_uid = os.path.split(url)[-1]
        #            ind = np.where(np.array(columns['uid']) == url_uid)[0][0]
        #            columns['URL'][ind] = url
        #        elif rurl != url:
        #            bad_urls.append((rurl, url))
        #    if bad_urls:
        #        log.warn("There were mismatches between the parsed URLs "
        #                 "from the staging page ({0}) and the download "
        #                 "script ({1})."
        #                 .format(self._staging_log['data_list_url'],
        #                         download_script_url))

        tbl = Table([Column(name=k, data=v) for k, v in iteritems(columns)])

        return tbl
Exemplo n.º 6
0
    def _parse_staging_request_page(self, data_list_page):
        """
        Parse pages like this one:
        https://almascience.eso.org/rh/requests/anonymous/786572566

        that include links to data sets that have been requested and staged

        Parameters
        ----------
        data_list_page : requests.Response object

        """

        root = BeautifulSoup(data_list_page.content, 'html5lib')

        data_table = root.findAll('table', class_='list', id='report')[0]
        columns = {'uid': [], 'URL': [], 'size': []}
        for tr in data_table.findAll('tr'):
            tds = tr.findAll('td')

            # Cannot check class if it is not defined
            cl = 'class' in tr.attrs

            if (len(tds) > 1 and 'uid' in tds[0].text
                    and (cl and 'Level' in tr['class'][0])):
                # New Style
                text = tds[0].text.strip().split()
                if text[0] in ('Asdm', 'Member'):
                    uid = text[-1]
            elif len(tds) > 1 and 'uid' in tds[1].text:
                # Old Style
                uid = tds[1].text.strip()
            elif cl and tr['class'] == 'Level_1':
                raise ValueError("Heading was found when parsing the download "
                                 "page but it was not parsed correctly")

            if len(tds) > 3 and (cl and tr['class'][0] == 'fileRow'):
                # New Style
                size, unit = re.search(r'(-|[0-9\.]*)([A-Za-z]*)',
                                       tds[2].text).groups()
                href = tds[1].find('a')
                if size == '':
                    # this is a header row
                    continue
                authorized = ('access_authorized.png'
                              in tds[3].findChild('img')['src'])
                if authorized:
                    columns['uid'].append(uid)
                    if href and 'href' in href.attrs:
                        columns['URL'].append(href.attrs['href'])
                    else:
                        columns['URL'].append('None_Found')
                    unit = (u.Unit(unit) if unit in ('GB', 'MB') else
                            u.Unit('kB') if 'kb' in unit.lower() else 1)
                    try:
                        columns['size'].append(float(size) * u.Unit(unit))
                    except ValueError:
                        # size is probably a string?
                        columns['size'].append(-1 * u.byte)
                    log.log(level=5,
                            msg="Found a new-style entry.  "
                            "size={0} uid={1} url={2}".format(
                                size, uid, columns['URL'][-1]))
                else:
                    log.warning("Access to {0} is not authorized.".format(uid))
            elif len(tds) > 3 and tds[2].find('a'):
                # Old Style
                href = tds[2].find('a')
                size, unit = re.search(r'([0-9\.]*)([A-Za-z]*)',
                                       tds[3].text).groups()
                columns['uid'].append(uid)
                columns['URL'].append(href.attrs['href'])
                unit = (u.Unit(unit) if unit in ('GB', 'MB') else
                        u.Unit('kB') if 'kb' in unit.lower() else 1)
                columns['size'].append(float(size) * u.Unit(unit))
                log.log(level=5,
                        msg="Found an old-style entry.  "
                        "size={0} uid={1} url={2}".format(
                            size, uid, columns['URL'][-1]))

        columns['size'] = u.Quantity(columns['size'], u.Gbyte)

        if len(columns['uid']) == 0:
            raise RemoteServiceError(
                "No valid UIDs were found in the staged data table. "
                "Please include {0} in a bug report.".format(
                    self._staging_log['data_list_url']))

        tbl = Table([Column(name=k, data=v) for k, v in iteritems(columns)])

        return tbl
Exemplo n.º 7
0
    def _parse_staging_request_page(self, data_list_page):
        """
        Parse pages like this one:
        https://almascience.eso.org/rh/requests/anonymous/786572566

        that include links to data sets that have been requested and staged

        Parameters
        ----------
        data_list_page : requests.Response object

        """

        root = BeautifulSoup(data_list_page.content, 'html5lib')

        data_table = root.findAll('table', class_='list', id='report')[0]
        columns = {'uid': [], 'URL': [], 'size': []}
        for tr in data_table.findAll('tr'):
            tds = tr.findAll('td')

            # Cannot check class if it is not defined
            cl = 'class' in tr.attrs

            if (len(tds) > 1 and 'uid' in tds[0].text and
                    (cl and 'Level' in tr['class'][0])):
                # New Style
                text = tds[0].text.strip().split()
                if text[0] in ('Asdm', 'Member'):
                    uid = text[-1]
            elif len(tds) > 1 and 'uid' in tds[1].text:
                # Old Style
                uid = tds[1].text.strip()
            elif cl and tr['class'] == 'Level_1':
                raise ValueError("Heading was found when parsing the download "
                                 "page but it was not parsed correctly")

            if len(tds) > 3 and (cl and tr['class'][0] == 'fileRow'):
                # New Style
                size, unit = re.search('(-|[0-9\.]*)([A-Za-z]*)',
                                       tds[2].text).groups()
                href = tds[1].find('a')
                if size == '':
                    # this is a header row
                    continue
                authorized = ('access_authorized.png' in
                              tds[3].findChild('img')['src'])
                if authorized:
                    columns['uid'].append(uid)
                    if href and 'href' in href.attrs:
                        columns['URL'].append(href.attrs['href'])
                    else:
                        columns['URL'].append('None_Found')
                    unit = (u.Unit(unit) if unit in ('GB', 'MB')
                            else u.Unit('kB') if 'kb' in unit.lower()
                            else 1)
                    try:
                        columns['size'].append(float(size) * u.Unit(unit))
                    except ValueError:
                        # size is probably a string?
                        columns['size'].append(-1 * u.byte)
                    log.log(level=5, msg="Found a new-style entry.  "
                            "size={0} uid={1} url={2}"
                            .format(size, uid, columns['URL'][-1]))
                else:
                    log.warning("Access to {0} is not authorized.".format(uid))
            elif len(tds) > 3 and tds[2].find('a'):
                # Old Style
                href = tds[2].find('a')
                size, unit = re.search('([0-9\.]*)([A-Za-z]*)',
                                       tds[3].text).groups()
                columns['uid'].append(uid)
                columns['URL'].append(href.attrs['href'])
                unit = (u.Unit(unit) if unit in ('GB', 'MB')
                        else u.Unit('kB') if 'kb' in unit.lower()
                        else 1)
                columns['size'].append(float(size) * u.Unit(unit))
                log.log(level=5, msg="Found an old-style entry.  "
                        "size={0} uid={1} url={2}".format(size, uid,
                                                          columns['URL'][-1]))

        columns['size'] = u.Quantity(columns['size'], u.Gbyte)

        if len(columns['uid']) == 0:
            raise RemoteServiceError(
                "No valid UIDs were found in the staged data table. "
                "Please include {0} in a bug report."
                .format(self._staging_log['data_list_url']))

        tbl = Table([Column(name=k, data=v) for k, v in iteritems(columns)])

        return tbl
Exemplo n.º 8
0
    def _parse_staging_request_page(self, data_list_page):
        """
        Parse pages like this one:
        https://almascience.eso.org/rh/requests/anonymous/786572566

        that include links to data sets that have been requested and staged

        Parameters
        ----------
        data_list_page : requests.Response object

        """

        root = BeautifulSoup(data_list_page.content, 'html5lib')

        #for link in root.findAll('a'):
        #    if 'script.sh' in link.text:
        #        download_script_url = urljoin(self.dataarchive_url,
        #                                      link['href'])
        #if 'download_script_url' not in locals():
        #    raise RemoteServiceError("No download links were found.")

        #download_script = self._request('GET', download_script_url,
        #                                cache=False)
        #download_script_target_urls = []
        #for line in download_script.text.split('\n'):
        #    if line and line.split() and line.split()[0] == 'wget':
        #        download_script_target_urls.append(line.split()[1].strip('"'))

        #if len(download_script_target_urls) == 0:
        #    raise RemoteServiceError("There was an error parsing the download "
        #                             "script; it is empty.  "
        #                             "You can access the download script "
        #                             "directly from this URL: "
        #                             "{0}".format(download_script_url))

        data_table = root.findAll('table', class_='list', id='report')[0]
        columns = {'uid':[], 'URL':[], 'size':[]}
        for tr in data_table.findAll('tr'):
            tds = tr.findAll('td')

            # Cannot check class if it is not defined
            cl = 'class' in tr.attrs

            if len(tds) > 1 and 'uid' in tds[0].text and (cl and
                                                          'Level' in tr['class'][0]):
                # New Style
                text = tds[0].text.strip().split()
                if text[0] in ('Asdm', 'Member'):
                    uid = text[-1]
            elif len(tds) > 1 and 'uid' in tds[1].text:
                # Old Style
                uid = tds[1].text.strip()
            elif cl and tr['class'] == 'Level_1':
                raise ValueError("A heading was found when parsing the download page but "
                                 "it was not parsed correctly")

            if len(tds) > 3 and (cl and tr['class'][0] == 'fileRow'):
                # New Style
                size,unit = re.search('(-|[0-9\.]*)([A-Za-z]*)', tds[2].text).groups()
                href = tds[1].find('a')
                if size == '':
                    # this is a header row
                    continue
                authorized = ('access_authorized.png' in tds[3].findChild('img')['src'])
                if authorized:
                    columns['uid'].append(uid)
                    if href and 'href' in href.attrs:
                        columns['URL'].append(href.attrs['href'])
                    else:
                        columns['URL'].append('None_Found')
                    unit = (u.Unit(unit) if unit in ('GB','MB')
                            else u.Unit('kB') if 'kb' in unit.lower()
                            else 1)
                    try:
                        columns['size'].append(float(size)*u.Unit(unit))
                    except ValueError: 
                        # size is probably a string?
                        columns['size'].append(-1*u.byte)
                    log.log(level=5, msg="Found a new-style entry.  "
                            "size={0} uid={1} url={2}".format(size, uid,
                                                              columns['URL'][-1]))
                else:
                    log.warn("Access to {0} is not authorized.".format(uid))
            elif len(tds) > 3 and tds[2].find('a'):
                # Old Style
                href = tds[2].find('a')
                size,unit = re.search('([0-9\.]*)([A-Za-z]*)', tds[3].text).groups()
                columns['uid'].append(uid)
                columns['URL'].append(href.attrs['href'])
                unit = (u.Unit(unit) if unit in ('GB','MB')
                        else u.Unit('kB') if 'kb' in unit.lower()
                        else 1)
                columns['size'].append(float(size)*u.Unit(unit))
                log.log(level=5, msg="Found an old-style entry.  "
                        "size={0} uid={1} url={2}".format(size, uid,
                                                          columns['URL'][-1]))

        columns['size'] = u.Quantity(columns['size'], u.Gbyte)

        if len(columns['uid']) == 0:
            raise RemoteServiceError("No valid UIDs were found in the staged "
                                     "data table.  Please include {0} "
                                     "in a bug report."
                                     .format(self._staging_log['data_list_url']))

        #if len(download_script_target_urls) != len(columns['URL']):
        #    log.warn("There was an error parsing the data staging page.  "
        #             "The results from the page and the download script "
        #             "differ.  You can access the download script directly "
        #             "from this URL: {0}".format(download_script_url))
        #else:
        #    bad_urls = []
        #    for (rurl,url) in (zip(columns['URL'],
        #                           download_script_target_urls)):
        #        if rurl == 'None_Found':
        #            url_uid = os.path.split(url)[-1]
        #            ind = np.where(np.array(columns['uid']) == url_uid)[0][0]
        #            columns['URL'][ind] = url
        #        elif rurl != url:
        #            bad_urls.append((rurl, url))
        #    if bad_urls:
        #        log.warn("There were mismatches between the parsed URLs "
        #                 "from the staging page ({0}) and the download "
        #                 "script ({1})."
        #                 .format(self._staging_log['data_list_url'],
        #                         download_script_url))

        tbl = Table([Column(name=k, data=v) for k,v in iteritems(columns)])


        return tbl
Exemplo n.º 9
0
def _hducut(img_hdu, center_coord, cutout_size, correct_wcs=False, verbose=False):
    """
    Takes an ImageHDU (image and associated metatdata in the fits format), as well as a center 
    coordinate and size and make a cutout of that image, which is returned as another ImageHDU,
    including updated  WCS information.


    Parameters
    ----------
    img_hdu : `~astropy.io.fits.hdu.image.ImageHDU`
        The image and assciated metadata that is being cut out.
    center_coord : `~astropy.coordinates.sky_coordinate.SkyCoord`
        The coordinate to cut out around.
    cutout_size : array
        The size of the cutout as [nx,ny], where nx/ny can be integers (assumed to be pixels)
        or `~astropy.Quantity` values, either pixels or angular quantities.
    correct_wcs : bool
        Default False. If true a new WCS will be created for the cutout that is tangent projected
        and does not include distortions.  
    verbose : bool
        Default False. If true intermediate information is printed.

    Returns
    -------
    response : `~astropy.io.fits.hdu.image.ImageHDU` 
        The cutout image and associated metadata.
    """
    
    hdu_header = fits.Header(img_hdu.header, copy=True)

    # We are going to reroute the logging to a string stream temporarily so we can
    # intercept any message from astropy, chiefly the "Inconsistent SIP distortion information"
    # INFO message which will indicate that we need to remove existing SIP keywords
    # from a WCS whose CTYPE does not include SIP. In this we are taking the CTYPE to be
    # correct and adjusting the header keywords to match.
    hdlrs = log.handlers
    log.handlers = []
    with log.log_to_list() as log_list:        
        img_wcs = wcs.WCS(hdu_header, relax=True)

    for hd in hdlrs:
        log.addHandler(hd)

    no_sip = False
    if (len(log_list) > 0):
        if ("Inconsistent SIP distortion information" in log_list[0].msg):

            # Remove sip coefficients
            img_wcs.sip = None
            no_sip = True
            
        else:  # Message(s) we didn't prepare for we want to go ahead and display
            for log_rec in log_list:
                log.log(log_rec.levelno, log_rec.msg, extra={"origin": log_rec.name})

    img_data = img_hdu.data

    if verbose:
        print("Original image shape: {}".format(img_data.shape))

    # Get cutout limits
    cutout_lims = get_cutout_limits(img_wcs, center_coord, cutout_size)

    if verbose:
        print("xmin,xmax: {}".format(cutout_lims[0]))
        print("ymin,ymax: {}".format(cutout_lims[1]))

    # These limits are not guarenteed to be within the image footprint
    xmin, xmax = cutout_lims[0]
    ymin, ymax = cutout_lims[1]

    ymax_img, xmax_img = img_data.shape

    # Check the cutout is on the image
    if (xmax <= 0) or (xmin >= xmax_img) or (ymax <= 0) or (ymin >= ymax_img):
        raise InvalidQueryError("Cutout location is not in image footprint!")

    # Adjust limits and figuring out the` padding
    padding = np.zeros((2, 2), dtype=int)
    if xmin < 0:
        padding[1, 0] = -xmin
        xmin = 0
    if ymin < 0:
        padding[0, 0] = -ymin
        ymin = 0
    if xmax > xmax_img:
        padding[1, 1] = xmax - xmax_img
        xmax = xmax_img
    if ymax > ymax_img:
        padding[0, 1] = ymax - ymax_img
        ymax = ymax_img  
        
    img_cutout = img_hdu.data[ymin:ymax, xmin:xmax]

    # Adding padding to the cutout so that it's the expected size
    if padding.any():  # only do if we need to pad
        img_cutout = np.pad(img_cutout, padding, 'constant', constant_values=np.nan)

    if verbose:
        print("Image cutout shape: {}".format(img_cutout.shape))

    # Getting the cutout wcs
    cutout_wcs = get_cutout_wcs(img_wcs, cutout_lims)

    # Updating the header with the new wcs info
    if no_sip:
        hdu_header.update(cutout_wcs.to_header(relax=False))
    else:
        hdu_header.update(cutout_wcs.to_header(relax=True))  # relax arg is for sip distortions if they exist

    # Naming the extension and preserving the original name
    hdu_header["O_EXT_NM"] = (hdu_header.get("EXTNAME"), "Original extension name.")
    hdu_header["EXTNAME"] = "CUTOUT"

    # Moving the filename, if present, into the ORIG_FLE keyword
    hdu_header["ORIG_FLE"] = (hdu_header.get("FILENAME"), "Original image filename.")
    hdu_header.remove("FILENAME", ignore_missing=True)

    hdu = fits.ImageHDU(header=hdu_header, data=img_cutout)

    return hdu