Exemplo n.º 1
0
    def _sherpa_plot(self, func, *args, **kwargs):
        """Call Sherpa plot ``func`` for each dataset.

        :param func: Sherpa plot function
        :param args: plot function list arguments
        :param kwargs: plot function named (keyword) arguments
        :rtype: None
        """
        for shell in range(self.nshell):
            window_id = 'Shell%d' % shell
            try:
                pychips.add_window(['id', window_id])
            except RuntimeError:
                pass  # already exists

            new_args = args
            if len(args) > 0:
                # Try to format first arg
                try:
                    new_args = tuple([args[0] % shell]) + args[1:]
                except TypeError:
                    pass

            pychips.set_current_window(window_id)
            func(*new_args, **kwargs)
Exemplo n.º 2
0
        def _sherpa_plot(self, *args, **kwargs):
            """Call Sherpa plot ``func`` for each dataset.

            :param func: Sherpa plot function
            :param args: plot function list arguments
            :param kwargs: plot function named (keyword) arguments
            :rtype: None
            """
            # FIXME We need to make sure ChIPS is initialized.
            # As a hack, we just call begin() and end() on the chips backend
            # To make sure the backend is initialized before we start creating windows.
            if _plot_pkg == 'chips':
                try:
                    chips_backend.begin()
                finally:
                    chips_backend.end()
            for dataset in self.filter_datasets():
                if _plot_pkg == 'chips':
                    try:
                        pychips.add_window(['id', dataset['id']])
                    except RuntimeError:
                        pass  # already exists
                    # window_id = pychips.ChipsId()
                    # window_id.window = dataset['id']
                    pychips.current_window(str(dataset['id']))
                elif _plot_pkg == 'pylab':
                    plt.figure(self.ids.index(dataset['id']) + 1)
                else:
                    raise ValueError('Unknown plot package')

                func(dataset['id'], *args, **kwargs)
Exemplo n.º 3
0
        def _sherpa_plot(self, *args, **kwargs):
            """Call Sherpa plot ``func`` for each dataset.

            :param func: Sherpa plot function
            :param args: plot function list arguments
            :param kwargs: plot function named (keyword) arguments
            :rtype: None
            """
            # FIXME We need to make sure ChIPS is initialized.
            # As a hack, we just call begin() and end() on the chips backend
            # To make sure the backend is initialized before we start creating windows.
            if _plot_pkg == 'chips':
                try:
                    chips_backend.begin()
                finally:
                    chips_backend.end()
            for dataset in self.filter_datasets():
                if _plot_pkg == 'chips':
                    try:
                        pychips.add_window(['id', dataset['id']])
                    except RuntimeError:
                        pass  # already exists
                    # window_id = pychips.ChipsId()
                    # window_id.window = dataset['id']
                    pychips.current_window(str(dataset['id']))
                elif _plot_pkg == 'pylab':
                    plt.figure(self.ids.index(dataset['id']) + 1)
                else:
                    raise ValueError('Unknown plot package')

                func(dataset['id'], *args, **kwargs)
Exemplo n.º 4
0
def _clear_window():
    if chips.info_current() is not None:
        if chips.info_current().find('Frame') != -1:
            chips.erase()
        else:
            chips.add_frame()
    
    if chips.info() is None:
        chips.add_window()
Exemplo n.º 5
0
def _clear_window():
    if chips.info_current() is not None:
        if chips.info_current().find('Frame') != -1:
            chips.erase()
        else:
            chips.add_frame()

    if chips.info() is None:
        chips.add_window()
Exemplo n.º 6
0
def begin():
    global _initialized

    chips.lock()    
    chips.advanced.open_undo_buffer()
    if _initialized is False:
        try:
            overrides = config.items('chips')
            for item in overrides:
                chips.set_preference(item[0], item[1])
            chips.add_window() # Have Sherpa talk to its own
                               # chips window
        except NoSectionError:
            chips.unlock()
        except:
            chips.unlock()
            raise
        _initialized = True
Exemplo n.º 7
0
def initialize_plot(dataset, ids):
    """Create the plot window or figure for the given dataset.

    Parameters
    ----------
    dataset : str or int
       The dataset.
    ids : array_like
       The identifier array from the DataStack object.

    See Also
    --------
    select_plot

    """
    try:
        pychips.add_window(['id', dataset['id']])
    except RuntimeError:
        pass
    pychips.current_window(str(dataset['id']))
Exemplo n.º 8
0
        def _sherpa_plot(self, *args, **kwargs):
            """Call Sherpa plot ``func`` for each dataset.

            :param func: Sherpa plot function
            :param args: plot function list arguments
            :param kwargs: plot function named (keyword) arguments
            :rtype: None
            """
            for dataset in self.filter_datasets():
                if _plot_pkg == 'chips':
                    try:
                        pychips.add_window(['id', dataset['id']])
                    except RuntimeError:
                        pass  # already exists
                    # window_id = pychips.ChipsId()
                    # window_id.window = dataset['id']
                    pychips.current_window(str(dataset['id']))
                elif _plot_pkg == 'pylab':
                    plt.figure(self.ids.index(dataset['id']) + 1)
                else:
                    raise ValueError('Unknown plot package')

                func(dataset['id'], *args, **kwargs)
def main(opt):

    # Use verbose option to control sherpa output
    logger = logging.getLogger("sherpa")
    logger.setLevel(LOGLEVELS[opt['verbose']])

    events = extract_events(opt['evtfile'],
                            opt['x'], opt['y'], opt['radius'])

    evt_ra_pnt = events.get_key('RA_PNT').value
    evt_dec_pnt = events.get_key('DEC_PNT').value
    evt_roll_pnt = events.get_key('ROLL_PNT').value

    asol = pycrates.read_file(opt['infile'])
    asol_times = asol.get_column('time').values

    # Sanity check the two input files
    asol_obsid = asol.get_key('OBS_ID').value
    evt_obsid = events.get_key('OBS_ID').value
    if asol_obsid != evt_obsid:
        v1("Error Aspect solution obsid {} != event file obsid {}".format(asol_obsid, evt_obsid))

    # Extract event RA, Dec, and times from event file
    # Do the WCS transformation directly instead of using the pycrates RA/Dec properties to
    # work around intermittent bug https://icxc.harvard.edu/pipe/ascds_help/2013a/0315.html
    wcs = events.get_transform("eqpos")
    evt_x = events.get_column("x").values
    evt_y = events.get_column("y").values
    rd = wcs.apply(np.column_stack([evt_x, evt_y]))
    evt_ra = rd[:, 0]
    evt_dec = rd[:, 1]
    evt_times = events.get_column('Time').values

    # Limit to only using events contained within the range of the aspect solution
    ok_times = (evt_times > asol_times[0]) & (evt_times < asol_times[-1])
    if not np.any(ok_times):
        raise ValueError("No events in region are contained within time range of aspect solution.")
    # Limit this *in place*
    evt_ra = evt_ra[ok_times]
    evt_dec = evt_dec[ok_times]
    evt_times = evt_times[ok_times]

    if len(evt_times) < opt['src_min_counts']:
        v1("Warning only {} counts in src region.  {} minimum suggested 'src_min_counts'".format(
                len(evt_times), opt['src_min_counts']))

    ax_data = {}
    ax_map = {'yag': 'dy',
              'zag': 'dz'}

    ax_data['yag'], ax_data['zag'] = get_event_yag_zag(evt_ra, evt_dec,
                                                       evt_ra_pnt, evt_dec_pnt, evt_roll_pnt)

    # Store comments to print in block after all of the sherpa fit output
    fit_comments = []
    plot_list = []

    for data_id, ax in enumerate(['yag', 'zag']):
        fit_data = ax_data[ax] - np.mean(ax_data[ax])
        mp, model = _fit_poly(fit_data, evt_times, opt['corr_poly_degree'], data_id=data_id)

        bin_centers, bin_mean, bin_std = time_bins(evt_times, fit_data)

        add_window(6, 4, "inches")
        add_curve((bin_centers - evt_times[0]) / 1000., bin_mean, [bin_std, +bin_std],
                  ["line.style", "none", "symbol.style", "none", "err.style", "cap"])
        add_curve(mp.x / 1000., mp.y, ["symbol.style", "none"])
        # set minimum limit on fit plot in arcsecs and set this explicitly as a symmetric limit
        fit_ymax = max(0.3, np.max(np.abs(bin_mean - bin_std)), np.max(np.abs(bin_mean + bin_std)))
        limits(Y_AXIS, -1 * fit_ymax, fit_ymax)
        set_plot_xlabel("Observation elapsed/delta time (ks)")
        set_plot_ylabel("Position offset from mean, {} (arcsec)".format(ax))
        set_plot_title("Fit of {} data (with time-binned event offsets)".format(ax))
        fit_plot = "{}_fit_{}.png".format(opt['corr_plot_root'], ax)
        if os.path.exists(fit_plot) and opt['clobber'] == 'yes':
            os.unlink(fit_plot)
        plot_list.append(fit_plot)
        print_window(fit_plot)

        add_window(6, 4, "inches")
        data_plot = "{}_data_{}.png".format(opt['corr_plot_root'], ax)
        ui.get_data_plot_prefs()['yerrorbars'] = False
        ui.plot_fit(data_id)
        if os.path.exists(data_plot) and opt['clobber'] == 'yes':
            os.unlink(data_plot)
        # set minimum limit on data plot in arcsecs and set this explicitly as a symmetric limit
        data_ymax = max(2.0, np.max(np.abs(fit_data)) + .2)
        limits(Y_AXIS, -1 * data_ymax, data_ymax)
        set_plot_xlabel("Observation elapsed/delta time (s)")
        set_plot_ylabel("Position offset from mean, {} (arcsec)".format(ax))
        set_plot_title("Raw data and fit in {}".format(ax))
        plot_list.append(data_plot)
        print_window(data_plot)

        asol_corr = np.interp(asol_times, mp.x + evt_times[0], mp.y)
        asol_col_to_fix = asol.get_column(ax_map[ax])
        fit_comments.append("Events show drift range of {:.2f} arcsec in {} axis".format(
                np.max(asol_corr) - np.min(asol_corr), ax))
        fit_comments.append("Max absolute correction of {:.2f} arcsec for {} axis".format(
                np.max(np.abs(asol_corr)), ax))

        # Convert the correction from arcsecs to mm (divide by 20) and add the correction
        # to the dy and dz columns in the file.
        asol_col_to_fix.values += (asol_corr / 20)

        # Add header keys saving the axis-specific parts of this correction
        write_key(asol, "ADC{}MN".format(ax.upper()), np.mean(ax_data[ax]),
                  "Aspect Drift Corr. Mean of uncorr {} data".format(ax))
        for deg in range(0, 1 + opt['corr_poly_degree']):
            write_key(asol, "ADC{}C{}".format(ax.upper(), deg),
                      getattr(model, 'c{}'.format(deg)).val,
                      "Aspect Drift Corr. {} model c{}".format(ax, deg))

    # Add header keywords about fit
    write_key(asol, "ADCTIME0", evt_times[0],
              "Aspect Drift Corr. reference time")
    write_key(asol, "ADCSRCX", opt['x'],
              "Aspect Drift Corr. input src x")
    write_key(asol, "ADCSRCY", opt['y'],
              "Aspect Drift Corr. input src y")
    write_key(asol, "ADCSRCR", opt['radius'],
              "Aspect Drift Corr. input src radius", units='pix')
    write_key(asol, "ADCORDR", opt['corr_poly_degree'],
              "Aspect Drift Corr. model poly degree")
    write_key(asol, "ADCVER", VERSION,
              "Aspect Drift Corr. tool version")

    v2("-" * 60)
    v2("Fit results")
    for c in fit_comments:
        v2("\t{}".format(c))
    v2("-" * 60)
    v2("Writing out corrected aspect solution file to {}".format(opt['outfile']))
    v2("\tTo review fit see correction plots in:")
    for p in plot_list:
        v2("\t\t{}".format(p))

    # Actually write out the new aspect solution file
    asol.write(opt['outfile'], clobber=opt['clobber'])

    # Add history
    add_tool_history(opt['outfile'], TOOLNAME, params=opt, toolversion=VERSION)
Exemplo n.º 10
0
def main(opt):

    # Use verbose option to control sherpa output
    logger = logging.getLogger("sherpa")
    logger.setLevel(LOGLEVELS[opt['verbose']])

    events = extract_events(opt['evtfile'], opt['x'], opt['y'], opt['radius'])

    evt_ra_pnt = events.get_key('RA_PNT').value
    evt_dec_pnt = events.get_key('DEC_PNT').value
    evt_roll_pnt = events.get_key('ROLL_PNT').value

    asol = pycrates.read_file(opt['infile'])
    asol_times = asol.get_column('time').values

    # Sanity check the two input files
    asol_obsid = asol.get_key('OBS_ID').value
    evt_obsid = events.get_key('OBS_ID').value
    if asol_obsid != evt_obsid:
        v1("Error Aspect solution obsid {} != event file obsid {}".format(
            asol_obsid, evt_obsid))

    # Extract event RA, Dec, and times from event file
    # Do the WCS transformation directly instead of using the pycrates RA/Dec properties to
    # work around intermittent bug https://icxc.harvard.edu/pipe/ascds_help/2013a/0315.html
    wcs = events.get_transform("eqpos")
    evt_x = events.get_column("x").values
    evt_y = events.get_column("y").values
    rd = wcs.apply(np.column_stack([evt_x, evt_y]))
    evt_ra = rd[:, 0]
    evt_dec = rd[:, 1]
    evt_times = events.get_column('Time').values

    # Limit to only using events contained within the range of the aspect solution
    ok_times = (evt_times > asol_times[0]) & (evt_times < asol_times[-1])
    if not np.any(ok_times):
        raise ValueError(
            "No events in region are contained within time range of aspect solution."
        )
    # Limit this *in place*
    evt_ra = evt_ra[ok_times]
    evt_dec = evt_dec[ok_times]
    evt_times = evt_times[ok_times]

    if len(evt_times) < opt['src_min_counts']:
        v1("Warning only {} counts in src region.  {} minimum suggested 'src_min_counts'"
           .format(len(evt_times), opt['src_min_counts']))

    ax_data = {}
    ax_map = {'yag': 'dy', 'zag': 'dz'}

    ax_data['yag'], ax_data['zag'] = get_event_yag_zag(evt_ra, evt_dec,
                                                       evt_ra_pnt, evt_dec_pnt,
                                                       evt_roll_pnt)

    # Store comments to print in block after all of the sherpa fit output
    fit_comments = []
    plot_list = []

    for data_id, ax in enumerate(['yag', 'zag']):
        fit_data = ax_data[ax] - np.mean(ax_data[ax])
        mp, model = _fit_poly(fit_data,
                              evt_times,
                              opt['corr_poly_degree'],
                              data_id=data_id)

        bin_centers, bin_mean, bin_std = time_bins(evt_times, fit_data)

        add_window(6, 4, "inches")
        add_curve(
            (bin_centers - evt_times[0]) / 1000., bin_mean,
            [bin_std, +bin_std],
            ["line.style", "none", "symbol.style", "none", "err.style", "cap"])
        add_curve(mp.x / 1000., mp.y, ["symbol.style", "none"])
        # set minimum limit on fit plot in arcsecs and set this explicitly as a symmetric limit
        fit_ymax = max(0.3, np.max(np.abs(bin_mean - bin_std)),
                       np.max(np.abs(bin_mean + bin_std)))
        #limits(Y_AXIS, -1 * fit_ymax, fit_ymax)
        set_plot_xlabel("Observation elapsed/delta time (ks)")
        set_plot_ylabel("Position offset from mean, {} (arcsec)".format(ax))
        set_plot_title(
            "Fit of {} data (with time-binned event offsets)".format(ax))
        fit_plot = "{}_fit_{}.png".format(opt['corr_plot_root'], ax)
        if os.path.exists(fit_plot) and opt['clobber']:
            os.unlink(fit_plot)
        plot_list.append(fit_plot)
        print_window(fit_plot)

        add_window(6, 4, "inches")
        data_plot = "{}_data_{}.png".format(opt['corr_plot_root'], ax)
        ui.get_data_plot_prefs()['yerrorbars'] = False
        ui.plot_fit(data_id)
        if os.path.exists(data_plot) and opt['clobber']:
            os.unlink(data_plot)
        # set minimum limit on data plot in arcsecs and set this explicitly as a symmetric limit
        data_ymax = max(2.0, np.max(np.abs(fit_data)) + .2)
        #limits(Y_AXIS, -1 * data_ymax, data_ymax)
        set_plot_xlabel("Observation elapsed/delta time (s)")
        set_plot_ylabel("Position offset from mean, {} (arcsec)".format(ax))
        set_plot_title("Raw data and fit in {}".format(ax))
        plot_list.append(data_plot)
        print_window(data_plot)

        asol_corr = np.interp(asol_times, mp.x + evt_times[0], mp.y)
        asol_col_to_fix = asol.get_column(ax_map[ax])
        fit_comments.append(
            "Events show drift range of {:.2f} arcsec in {} axis".format(
                np.max(asol_corr) - np.min(asol_corr), ax))
        fit_comments.append(
            "Max absolute correction of {:.2f} arcsec for {} axis".format(
                np.max(np.abs(asol_corr)), ax))

        # Convert the correction from arcsecs to mm (divide by 20) and add the correction
        # to the dy and dz columns in the file.
        asol_col_to_fix.values += (asol_corr / 20)

        # Add header keys saving the axis-specific parts of this correction
        write_key(asol, "ADC{}MN".format(ax.upper()), np.mean(ax_data[ax]),
                  "Aspect Drift Corr. Mean of uncorr {} data".format(ax))
        for deg in range(0, 1 + opt['corr_poly_degree']):
            write_key(asol, "ADC{}C{}".format(ax.upper(), deg),
                      getattr(model, 'c{}'.format(deg)).val,
                      "Aspect Drift Corr. {} model c{}".format(ax, deg))

    # Add header keywords about fit
    write_key(asol, "ADCTIME0", evt_times[0],
              "Aspect Drift Corr. reference time")
    write_key(asol, "ADCSRCX", opt['x'], "Aspect Drift Corr. input src x")
    write_key(asol, "ADCSRCY", opt['y'], "Aspect Drift Corr. input src y")
    write_key(asol,
              "ADCSRCR",
              opt['radius'],
              "Aspect Drift Corr. input src radius",
              units='pix')
    write_key(asol, "ADCORDR", opt['corr_poly_degree'],
              "Aspect Drift Corr. model poly degree")
    write_key(asol, "ADCVER", VERSION, "Aspect Drift Corr. tool version")

    v2("-" * 60)
    v2("Fit results")
    for c in fit_comments:
        v2("\t{}".format(c))
    v2("-" * 60)
    v2("Writing out corrected aspect solution file to {}".format(
        opt['outfile']))
    v2("\tTo review fit see correction plots in:")
    for p in plot_list:
        v2("\t\t{}".format(p))

    # Actually write out the new aspect solution file
    asol.write(opt['outfile'], clobber=opt['clobber'])
Exemplo n.º 11
0
    def _sherpa_plot(self, func, *args, **kwargs):
        """Call the Sherpa plot ``func`` for each shell.

        Parameters
        ----------
        func : function reference
            The Sherpa plot function
        args
            The arguments for `func`
        kwargs
            Any keyword arguments for `func`. There are special
            keywords which are not passed on: `single_call_per_shell` is
            a boolean which indicates that the function is only
            called once per shell, and `add_shell_value` which indicates
            that the first argument is formatted to accept the shell
            value.

        Notes
        -----
        This method attempts to handle the differences when using
        ChIPS or Matplotlib as the Sherpa plotting backend, but has
        not been properly tested, so there may be issues.

        It is known not to work when the input function is plot_fit_delchi
        or plot_fit_resid, at least with the Matplotlib backend. It is
        unclear whether this is a problem here or the Sherpa
        matplotlib backend.

        """

        # Attempt to support multiple datasets per annulus.
        #
        dmap = [[] for _ in range(self.nshell)]
        for d in self.datasets:
            dmap[d['annulus']].append(d['id'])

        # Extract the arguments used here
        #
        special = {}
        for key in ['add_shell_value', 'single_call_per_shell']:
            val = False
            if key in kwargs:
                val = kwargs[key]
                del kwargs[key]

            special[key] = val

        nargs = len(args)

        for shell in range(self.nshell):
            if backend_name == 'pychips':
                window_id = 'Shell%d' % shell
                try:
                    pychips.add_window(['id', window_id])
                except RuntimeError:
                    pychips.set_current_window(window_id)

            elif backend_name == 'pylab':
                plt.figure(shell)

            new_args = args
            if nargs > 0:
                if special['add_shell_value']:
                    new_args = list(args)[:]
                    new_args[0] = new_args[0].format(shell)

            # For the moment assume that if the user supplied an
            # argument then we use it, whatever the various
            # settings are. This catches errors like
            #   dep.plot_fit('rstat')
            #
            if special['single_call_per_shell'] or nargs > 0:
                func(*new_args, **kwargs)
            else:
                # call it once per data set, assuming that the
                # overplot keyword is supported by this function
                # (could check this condition but leave that for now)
                #
                overplot = False
                for did in dmap[shell]:
                    kwargs['overplot'] = overplot
                    func(did, **kwargs)
                    overplot = True