Example #1
0
def stats_proc(optlist, hduids, hdulist):
    """print statistics for region according to options
    """
    # Process each HDU in the list "hduids"
    for hduid in hduids:
        hdu = hdulist[hduid]
        name = hdu.name
        if not optlist.sbias and not optlist.pbias:
            pass
        else:
            iu.subtract_bias(optlist.sbias, optlist.pbias, hdu)
        slices = []
        (datasec, soscan, poscan) = iu.get_data_oscan_slices(hdu)
        if optlist.datasec:
            slices.append(datasec)
        if optlist.overscan:
            slices.append(soscan)
        if optlist.poverscan:
            slices.append(poscan)
        if optlist.region:
            for reg in optlist.region:  # if there are regions
                logging.debug("processing %s", reg)
                slice_spec = iu.parse_region(reg)
                if slice_spec:
                    slices.append(slice_spec)
                else:
                    logging.error("skipping region %s", reg)

        if len(slices) == 0:
            stats_print(optlist, hduid, name, hdu.data, None)
        for slice_spec in slices:
            y1, y2 = slice_spec[0].start or "", slice_spec[0].stop or ""
            x1, x2 = slice_spec[1].start or "", slice_spec[1].stop or ""
            reg = "{}:{},{}:{}".format(y1, y2, x1, x2)
            stats_print(optlist, hduid, name, hdu.data[slice_spec], reg)
Example #2
0
def imxtalk():
    """main logic:"""
    optlist = parse_args()
    mu.init_logging(optlist.debug)
    mu.init_warnings()
    ncalls.counter = 0
    # begin processing -- loop over files
    for ffile in optlist.fitsfile:
        try:
            hdulist = fits.open(ffile)
        except IOError as ioerr:
            logging.error("IOError: %s", ioerr)
            exit(1)
        if optlist.info:  # just print the image info per file
            hdulist.info()
            continue
        # Construct a list of the source HDU's to work on
        srcids = iu.get_requested_image_hduids(hdulist, optlist.srcname,
                                               optlist.srcindex)
        # Construct a list of the response HDU's to work on
        rspids = iu.get_requested_image_hduids(hdulist, optlist.rspname,
                                               optlist.rspindex)
        # do stuff
        logging.debug("calling iu.get_union_of_bad_column_segs(hdulist)")
        bad_segs = iu.get_union_of_bad_column_segs(hdulist)
        logging.debug("bad_segs=%s", bad_segs)
        max_rn = 7.0
        pcnt = 20
        lsst_num = hdulist[0].header.get("LSST_NUM")
        # get_xtalk_coefs(hdulist, srcids, rspids, optlist.threshold)
        for srcid in srcids:
            hdu_s = hdulist[srcid]
            # subtract the bias estimate from the source array
            if lsst_num and re.match(r"^E2V-CCD250", lsst_num):
                stype = "byrowe2v"
            else:
                stype = "byrow"
            ptype = "bycolfilter"
            iu.subtract_bias(stype, ptype, hdu_s, bad_segs)
            logging.info("hdu_s = %s", hdu_s.header["EXTNAME"])
            (datasec_s, soscan_s, poscan_s) = iu.get_data_oscan_slices(hdu_s)
            rn_est = min(np.std(hdu_s.data[poscan_s[0], soscan_s[1]]), max_rn)
            if optlist.threshold:
                thresh = optlist.threshold
            else:
                rn_est = min(np.std(hdu_s.data[poscan_s[0], soscan_s[1]]),
                             max_rn)
                thresh = 500 * rn_est
            # estimate source background level, the threshold will be added to that
            # since we are only interested in source pixels above background by thresh
            thresh += np.percentile(hdu_s.data[datasec_s], pcnt)

            # make the (weights) mask used in response hdu bckgrnd subtraction
            mask_s = np.ones_like(hdu_s.data, dtype=int)
            mask_s[np.nonzero(hdu_s.data > thresh)] = 0
            mask_s[:, bad_segs] = 0  # fold these in

            arr_s = hdu_s.data.flatten("K")
            logging.debug("np.shape(arr_s)= %s", np.shape(arr_s))
            arr_x = arr_s[arr_s > thresh]
            logging.debug("found %d nans in arr_x",
                          np.count_nonzero(np.isnan(arr_x)))
            arr_x = arr_x.reshape(
                -1, 1)  # infer 1st axis, 2nd axis for 1 "feature"
            logging.debug("np.shape(arr_x)= %s", np.shape(arr_x))
            if np.size(arr_x) < 1000:
                logging.warn(
                    "not enough source points to produce a coef: %d < 100",
                    np.size(arr_x),
                )
                continue

            if optlist.plot:
                plt.style.use(optlist.style)
                pu.update_rcparams()
                fig, axes = pu.get_fig_and_axis(
                    len(rspids),
                    optlist.layout,
                    False,
                    optlist.sharex,
                    optlist.sharey,
                    None,
                )
                nprows, npcols = (axes.shape[0], axes.shape[1])
                pu.set_fig_title(optlist.title, ffile, fig)
                sylim_upper = sylim_lower = 0.0

            for rindex, rspid in enumerate(rspids):
                if rspid == srcid:
                    sindex = rindex
                    continue
                hdu_r = hdulist[rspid]
                logging.info("    hdu_r = %s", hdu_r.header["EXTNAME"])
                if np.shape(hdu_s.data) != np.shape(hdu_r.data):
                    logging.warning(
                        "hdu's %s, %s shapes not commensurate: %s != %s, skipping",
                        hdu_s.header["EXTNAME"],
                        hdu_r.header["EXTNAME"],
                        np.shape(hdu_s.data),
                        np.shape(hdu_r.data),
                    )
                    continue
                (datasec_r, soscan_r,
                 poscan_r) = iu.get_data_oscan_slices(hdu_r)
                iu.subtract_bias(stype, ptype, hdu_r, bad_segs)
                # need to subtract background level estimate from hdu_s but it may
                # have lots of structure so need somewhat careful estimate
                # ------------
                # redo this with masking and line by line interp across the mask
                logging.debug("found %d nans in hdu_r",
                              np.count_nonzero(np.isnan(hdu_r.data)))
                iu.subtract_background_for_xtalk(hdu_r, mask_s, datasec_r)
                logging.debug("found %d nans in hdu_r",
                              np.count_nonzero(np.isnan(hdu_r.data)))
                arr_r = hdu_r.data.flatten("K")
                logging.debug("np.shape(arr_r)= %s", np.shape(arr_r))
                arr_y = arr_r[arr_s > thresh]

                logging.debug("found %d nans in arr_y",
                              np.count_nonzero(np.isnan(arr_y)))
                arr_xp = arr_x[~np.isnan(arr_y)]
                arr_yp = arr_y[~np.isnan(arr_y)]

                # reject high sources in response channel
                arr_xp = arr_xp[arr_yp < thresh]
                arr_yp = arr_yp[arr_yp < thresh]

                if optlist.intercept:
                    lr = linear_model.LinearRegression()
                    ransac = linear_model.RANSACRegressor()
                else:
                    lr = linear_model.LinearRegression(fit_intercept=False)
                    ransac = linear_model.RANSACRegressor(
                        linear_model.LinearRegression(fit_intercept=False))

                # lr.fit(arr_xp, arr_yp)
                # ransac.fit(arr_xp, arr_yp)
                # print(f"lr.coef={lr.coef_}")
                # print(f"ransac.estimator.coef={ransac.estimator_.coef_}")
                if np.max(arr_xp) < 0.95 * np.max(arr_x):
                    logging.warning("threshold is too low, raise and re-run")
                nbins = (np.max(arr_xp) - np.min(arr_xp)) / 1000 * rn_est
                logging.debug("np.max(arr_xp) = %.2f", np.max(arr_xp))
                logging.debug("np.min(arr_xp) = %.2f", np.min(arr_xp))
                logging.debug("nbins = %d", nbins)
                s, edges, _ = binned_statistic(arr_xp[:, 0], arr_yp, "median",
                                               nbins)
                cnt, cedges, _ = binned_statistic(arr_xp[:, 0], arr_yp,
                                                  "count", nbins)
                bin_width = edges[1] - edges[0]
                logging.debug("bin_width = %.2f", bin_width)
                binx = edges[1:] - bin_width / 2
                binx = binx[~np.isnan(s)]  # remove the empty bins
                count = cnt[~np.isnan(s)]
                logging.debug(
                    "count: mean: %.2f  median: %.2f  stddev: %.2f min: %.2f  max: %.2f",
                    np.mean(count),
                    np.median(count),
                    np.std(count),
                    np.min(count),
                    np.max(count),
                )
                count = np.sqrt(count) * (np.log10(binx)
                                          )  # extra weight on high bins
                logging.debug("binx[-10:] = %s", binx[-10:])
                logging.debug("count[-10:] = %s", count[-10:])
                s = s[~np.isnan(s)]
                sylim_upper = np.percentile(arr_yp, 99)
                sylim_lower = np.percentile(arr_yp, 1)
                if optlist.raw:  # expand limits
                    sydel = sylim_upper - sylim_lower
                    sylim_upper += 0.4 * sydel
                    sylim_lower -= 0.3 * sydel

                binx = binx.reshape(
                    -1, 1)  # infer 1st axis, 2nd axis for 1 "feature"
                lr.fit(binx, s, sample_weight=count)
                ransac.fit(binx, s, count)
                inlier_mask = ransac.inlier_mask_
                outlier_mask = np.logical_not(inlier_mask)
                lrcoef = lr.coef_[0]
                lrincpt = lr.intercept_
                rscoef = ransac.estimator_.coef_[0]
                rsincpt = ransac.estimator_.intercept_
                print(f"binned lr.coef={lrcoef:>3g}")
                if optlist.intercept:
                    print(f"binned lr.intercept={lrincpt:>.2f}")
                print(f"binned ransac.estimator.coef={rscoef:>3g}")
                if optlist.intercept:
                    print(f"binned ransac.estimator.intercept={rsincpt:>.2f}")

                # plotting
                if optlist.plot:
                    ax = np.ravel(axes)[int(rindex / npcols) * npcols +
                                        rindex % npcols]
                    if optlist.style == "ggplot":
                        ax.scatter([], [])  # skip the first color
                    ax.grid(True)
                    ax.set_xlabel("source signal", size="x-small")
                    ax.set_ylabel("response signal", size="x-small")
                    if optlist.raw:
                        ax.scatter(arr_xp[:, 0], arr_yp, s=1.0, label="raw")
                    si = s[inlier_mask]
                    ci = count[inlier_mask]
                    # ci[-1] = 1.0
                    ax.scatter(
                        binx[inlier_mask, 0],
                        si,
                        # s=np.sqrt(count),
                        s=np.sqrt(ci),
                        color="blue",
                        alpha=0.5,
                        label="inliers",
                    )
                    si = s[outlier_mask]
                    ci = count[outlier_mask]
                    # ci[-1] = 1.0
                    ax.scatter(
                        binx[outlier_mask, 0],
                        si,
                        # s=np.sqrt(count),
                        s=np.sqrt(ci),
                        color="purple",
                        alpha=0.5,
                        label="outliers",
                    )
                    if optlist.predict:  # Predict and plot result of estimated models
                        line_x = np.arange(0.0, binx.max())[:, np.newaxis]
                        line_y = lr.predict(line_x)
                        line_y_ransac = ransac.predict(line_x)
                        lw = 2
                        if optlist.intercept:
                            lbl = f"lr: {lrcoef:>.3g}*x + {lrincpt:>.3g}"
                        else:
                            lbl = f"lr: {lrcoef:>.3g}*x"
                        ax.plot(line_x,
                                line_y,
                                color="navy",
                                linewidth=lw,
                                label=lbl)
                        if optlist.intercept:
                            lbl = f"ransac: {rscoef:>.3g}*x + {rsincpt:>.3g}"
                        else:
                            lbl = f"ransac: {rscoef:>.3g}*x"
                        ax.plot(
                            line_x,
                            line_y_ransac,
                            color="cornflowerblue",
                            linewidth=lw,
                            label=lbl,
                        )

                    if optlist.ylimits:
                        ax.set_ylim(optlist.ylimits[0], optlist.ylimits[1])
                    else:
                        ax.set_ylim(sylim_lower, sylim_upper)
                    ax.xaxis.set_tick_params(labelsize="x-small")
                    ax.xaxis.set_major_formatter(ticker.EngFormatter())
                    ax.yaxis.set_tick_params(labelsize="x-small")
                    ax.set_title(
                        f"SRC:RSP {hdu_s.header['EXTNAME']}:{hdu_r.header['EXTNAME']}",
                        fontsize="xx-small",
                    )
                    handles, labels = ax.get_legend_handles_labels()
                    lgnd = pu.mk_legend("inside", nprows, handles, labels, ax)
                    # big hack
                    print(f"sizes={lgnd.legendHandles[-2]._sizes}")
                    lgnd.legendHandles[-2]._sizes = [6]
                    lgnd.legendHandles[-1]._sizes = [6]

            if optlist.plot:
                for gidx in range(rindex + 1, nprows * npcols):
                    # ax = np.ravel(axes)[int(gidx / npcols) * npcols + gidx % npcols]
                    ax = np.ravel(axes)[gidx]
                    ax.grid(False)
                    ax.set_frame_on(False)
                    ax.get_xaxis().set_visible(False)
                    ax.get_yaxis().set_visible(False)

                if srcid in rspids:
                    ax = np.ravel(axes)[sindex]
                    ax.grid(False)
                    ax.set_frame_on(False)
                    ax.get_xaxis().set_visible(False)
                    ax.get_yaxis().set_visible(False)

                fig.set_tight_layout({
                    "h_pad": 0.50,
                    "w_pad": 1.0,
                    "rect": [0, 0, 1, 0.97]
                })
                plt.show()

        # end of doing stuff
        ncalls.counter = 0  # reset per file, triggers headers

    ncalls()  # track call count, acts like static variable)
Example #3
0
def imarith():
    """main logic:"""
    optlist = parse_args()
    mu.init_logging(optlist.debug)
    mu.init_warnings()

    # evaluate operands as either a filename, float, floats or error
    verify_args(optlist)
    region = None
    if optlist.region:
        region = iu.parse_region(optlist.region)

    # Open files, throws exception on error
    hdulist1 = fits.open(optlist.operand1, mode="readonly")
    if os.path.isfile(optlist.operand2):
        hdulist2 = fits.open(optlist.operand2, mode="readonly")
    else:
        hdulist2 = None
        operand2 = optlist.operand2.split()  # list of floats as strings

    # create output image with primary header and updates
    hdulisto = iu.create_output_hdulist(hdulist1, sys.argv)

    # loop over HDU id's from Master file, copy non-image HDUs
    # and process the image HDUs accordingly
    hduids = iu.get_requested_image_hduids(hdulist1, optlist.hduname,
                                           optlist.hduindex)
    if hduids is None:
        logging.info("No data HDUs found or requested")
        sys.exit(1)
    for hduid in hduids:  # process these images
        #
        # This needs work to allow more flexible hdulist2 type images
        # as-is, it enforces that names match for corresponding hdu's
        hdu1 = hdulist1[hduid]
        if hdulist2:
            if isinstance(hdulist2[hduid], (fits.ImageHDU, fits.CompImageHDU)):
                hdu2 = hdulist2[hduid]
            else:
                logging.error("HDU %d does not exist in %s", hduid,
                              hdulist2.filename())
        #
        if hdulist2 and np.shape(hdu1.data) != np.shape(hdu2.data):
            logging.error("Images are not comensurate")
            sys.exit(1)

        # prepare the output hdu
        hduo = iu.init_image_hdu(hdu1, hdulisto, region)

        # optionally subtract bias
        if not optlist.sbias and not optlist.pbias:
            pass
        else:
            iu.subtract_bias(optlist.sbias, optlist.pbias, hdu1)
            if hdulist2:
                iu.subtract_bias(optlist.sbias, optlist.pbias, hdu2)
        #
        # do the arithmetic
        if hdulist2:
            hduo.data = ffcalc(hdu1.data, hdu2.data, optlist.op, region)
        else:  # scalar or list of scalars
            if len(operand2) == 1:
                arg2 = float(operand2[0])
            else:
                arg2 = float(operand2.pop(0))
            hduo.data = fscalc(hdulist1[hduid].data, arg2, optlist.op, region)
        # finish up this hdu
        hduo.update_header()
        dtstr = datetime.datetime.utcnow().isoformat(timespec="milliseconds")
        hduo.add_checksum(dtstr)

    for hdu in hdulist1:
        # append to output if it does not contain image data
        if not isinstance(hdu,
                          (fits.ImageHDU, fits.CompImageHDU, fits.PrimaryHDU)):
            hdulisto.append(hdu)

    # write the output file
    hdulisto.info()
    hdulisto.writeto(optlist.result, overwrite=True)
    sys.exit(0)
Example #4
0
def quicklook(optlist, hduids, hdulist):
    """print quicklook for hdu's according to options
    """
    try:
        expt = float(hdulist[0].header["EXPTIME"])
    except KeyError as ke:
        emsg = "KeyError: {}".format(ke)
        logging.warninging(emsg)
        emsg = "adu/sec won't be available"
        logging.warninging(emsg)
        expt = 0.0

    # perform and print the given statistics quantities
    # fields are: mean, bias, signal, noise, adu/s
    quick_fields = [
        "mean",
        "bias",
        "signal",
        "noise",
        "adu/sec",
        "eper:s-cte",
        "eper:p-cte",
    ]
    if optlist.tearing:
        quick_fields.append("tearing")
    if optlist.dipoles:
        quick_fields.append("dipoles")
    if optlist.threshold:
        quick_fields.append("threshold")

    for hduid in hduids:
        #
        hdu = hdulist[hduid]
        name = hdu.name

        if not optlist.sbias and not optlist.pbias:
            pass
        else:
            iu.subtract_bias(optlist.sbias, optlist.pbias, hdu)

        # get datasec, serial overscan, parallel overscan as slices
        (datasec, soscan, poscan) = iu.get_data_oscan_slices(hdu)
        if not datasec or not soscan or not poscan:
            logging.error("Could not get DATASEC or overscan specs for %s", name)
            exit(1)

        if optlist.rstats:
            median_str, bias_str, noise_str = "rmedian", "rbias", "rnoise"
        else:
            median_str, bias_str, noise_str = "median", "bias", "noise"

        if not optlist.noheadings and ncalls.counter == 0:
            print("#{:>3s} {:>9s}".format("id", "HDUname"), end="")
            if "mean" in quick_fields:
                print(" {:>9s}".format(median_str), end="")
            if "bias" in quick_fields:
                print(" {:>9s}".format(bias_str), end="")
            if "signal" in quick_fields:
                print(" {:>9s}".format("signal"), end="")
            if "noise" in quick_fields:
                print(" {:>8s}".format(noise_str), end="")
            if "adu/sec" in quick_fields and expt > 0:
                print("{:>9s}".format("adu/sec"), end="")
            if "eper:s-cte" in quick_fields:
                print("{:>9s}".format("s-cte"), end="")
            if "eper:p-cte" in quick_fields:
                print("{:>9s}".format("p-cte"), end="")
            if "tearing" in quick_fields:
                if re.match(r"^data", optlist.tearing):
                    trows = int(datasec[0].stop - 1)
                elif re.match(r"^div", optlist.tearing):
                    trows = 100
                else:
                    trows = int(optlist.tearing)
                print("  {:s}({:>4d}r){:s}".format("tml", trows, "tmr"), end="")
            if "dipoles" in quick_fields:
                print("{:>9s}".format("%dipoles"), end="")
            if "threshold" in quick_fields:
                print("{:>9s}".format("N>thresh"), end="")
            print("")  # newline)

        if not optlist.noheadings:
            print(" {:3d} {:>9s}".format(hduid, name), end="")

        # noise evaluated in smaller region to avoid any gradient effects
        y0 = int(0.6 * datasec[0].start) + int(0.4 * datasec[0].stop)
        y1 = int(0.4 * datasec[0].start) + int(0.6 * datasec[0].stop)
        sx0 = int(0.95 * soscan[1].start) + int(0.05 * soscan[1].stop)
        if optlist.rstats:
            avg, med, std = stats.sigma_clipped_stats(hdu.data[datasec])
            sig_mean = med
            avg, med, std = stats.sigma_clipped_stats(hdu.data[soscan])
            bias_mean = med
            avg, med, std = stats.sigma_clipped_stats(hdu.data[y0:y1, sx0:])
            noise = std
        else:
            sig_mean = np.median(hdu.data[datasec])
            bias_mean = np.median(hdu.data[soscan])
            noise = np.std(hdu.data[y0:y1, sx0:])

        if "mean" in quick_fields:
            print(" {:>9.6g}".format(sig_mean), end="")
        if "bias" in quick_fields:
            print(" {:>9.6g}".format(bias_mean), end="")
        if "signal" in quick_fields:
            signal = sig_mean - bias_mean
            print(" {:>9.6g}".format(signal), end="")
        if "noise" in quick_fields:
            print(" {:>8.3f}".format(noise), end="")
        if "adu/sec" in quick_fields and expt > 0:
            print(" {:>8.3f}".format(float(signal) / expt), end="")
        if "eper:s-cte" in quick_fields:
            logging.debug("s-cte------------------")
            if signal < 5.0 * noise:
                print(" {:>8s}".format("None"), end="")
            else:
                scte = iu.eper_serial(hdu)
                if scte:
                    print(" {:>8.6f}".format(scte), end="")
                else:
                    print(" {:>8s}".format("None"), end="")
        # ---------
        if "eper:p-cte" in quick_fields:
            logging.debug("p-cte------------------")
            if signal < 5.0 * noise:
                print(" {:>8s}".format("None"), end="")
            else:
                pcte = iu.eper_parallel(hdu)
                if pcte:
                    print(" {:>8.6f}".format(pcte), end="")
                else:
                    print(" {:>8s}".format("None"), end="")
        # ---------
        if "tearing" in quick_fields:
            logging.debug("tearing check----------")
            tml, tmr = tearing_metric(hdu.data[datasec], trows)
            print(" {:>5.2f}    {:>5.2f}".format(tml, tmr), end="")
        # ---------
        if "dipoles" in quick_fields:
            logging.debug("dipoles check----------")
            ndipole = count_dipoles(hdu.data[datasec])
            print(
                "{:>9.2f}".format(
                    100.0 * float(2 * ndipole) / (np.size(hdu.data[datasec]))
                ),
                end="",
            )
        # ---------
        if "threshold" in quick_fields:
            logging.debug("threshold check----------")
            print(
                "{:>9d}".format(
                    np.count_nonzero(hdu.data[datasec] > optlist.threshold)
                ),
                end="",
            )
        # ---------
        print("")  # newline)
        ncalls()  # track call count, acts like static variable)
Example #5
0
def plot_hdus(optdict: dict, hduids: list, hdulist: fits.HDUList, pax: plt.axes):
    """
    For each hdu specified by hduids in hdulist, make a line
    plot on axis pax according the the input parameters

    Parameters
    ----------
    optdict[]: Dictionary with options such as row, col regions, flags
             etc.  Typically the dict version of an argparse namespace.
        key       value
        ---       -----
        col       region spec list
        row       region spec list
        stype     mean|median|byrow|byrowsmooth
        ptype     mean|median|bycol|bycolsmooth|lsste2v|lsstitl
        ltype     median|mean|clipped|series
        steps     default|steps-mid
        offset    mean|median|delta
        series    boolean
        wcs       str
        smooth    int
    hduids: List of hdu ids to work on
    hdulist: A fits HDUList object containing the hdu's
    pax: A matplotlib.axes.Axes instance to contain the line plots
    """
    if optdict["row"]:
        map_axis = 0  # first axis (y) converts to scalar
    elif optdict["col"]:
        map_axis = 1  # second axis (x) converts to scalar
    else:
        exit(1)
    # Process each HDU in the list "hduids"
    for hduid in hduids:
        hdu = hdulist[hduid]
        try:
            name = hdu.name
        except IndexError as ierr:
            logging.debug("IndexError: %s", ierr)
            logging.debug("using name=%s", hduid)
            name = "{}".format(hduid)

        if not optdict["sbias"] and not optdict["pbias"]:
            pass
        else:
            iu.subtract_bias(optdict["sbias"], optdict["pbias"], hdu)

        (datasec, soscan, poscan) = iu.get_data_oscan_slices(hdu)
        wcs = None
        if optdict["wcs"]:
            wcs = WCS(hdu.header, key=optdict["wcs"][0])
            # logging.debug("%s", wcs.printwcs())

        slices = []  # define regions to plot
        for reg in optdict["row"] or optdict["col"]:
            logging.debug("processing %s", reg)
            if re.match(r"data", reg):
                slice_spec = datasec
            elif re.match(r"over", reg):
                slice_spec = soscan
            elif re.match(r"pover", reg):
                slice_spec = poscan
            else:
                slice_spec = iu.parse_region(reg)
            if slice_spec != (None, None):
                slices.append(slice_spec)
            else:
                logging.error("skipping region %s", reg)
        for slice_spec in slices:
            logging.debug(
                "calling line_plot() %s[%s]", name, optdict["row"] or optdict["col"]
            )
            line_plot(
                slice_spec,
                hdu.data,
                optdict["ltype"],
                optdict["steps"],
                optdict["offset"],
                optdict["smooth"],
                wcs,
                map_axis,
                name,
                pax,
            )