Beispiel #1
0
def imhead():
    """print out headers"""
    optlist = parse_args()
    mu.init_logging(optlist.debug)
    mu.init_warnings()

    # processing -- loop over files
    for ffile in optlist.fitsfile:
        try:
            hdulist = fits.open(ffile)
        except IOError as ioerr:
            emsg = "IOError: {}".format(ioerr)
            logging.error(emsg)
            exit(1)
        if optlist.info:  # just print the image info and exit
            hdulist.info()
        else:
            hduids = iu.get_requested_hduids(hdulist, optlist.hduname,
                                             optlist.hduindex)
            for hduid in hduids:
                print("#--------{}:{}--------".format(hduid,
                                                      hdulist[hduid].name))
                print(repr(hdulist[hduid].header))
        hdulist.close()

    sys.exit(0)
Beispiel #2
0
def imstat():
    """main logic:"""
    optlist = parse_args()
    mu.init_logging(optlist.debug)
    mu.init_warnings()
    ncalls.counter = 0
    # begin processing -- loop over files
    for ffile in optlist.fitsfile:
        try:
            hdulist = fits.open(ffile)
        except IOError as ioerr:
            logging.error("IOError: %s", ioerr)
            exit(1)
        if optlist.info:  # just print the image info per file
            hdulist.info()
            continue
        if not optlist.noheadings:  # print filename
            print("#")
            print("# {}".format(os.path.basename(ffile)))
        # Construct a list of the HDU's to work on
        hduids = iu.get_requested_image_hduids(
            hdulist, optlist.hduname, optlist.hduindex
        )
        if optlist.quicklook:
            quicklook(optlist, hduids, hdulist)
        else:
            stats_proc(optlist, hduids, hdulist)
        ncalls.counter = 0  # reset per file, triggers headers
Beispiel #3
0
def main():
    """
    main logic
    a glob pattern for a given input list of files/paths
    """
    optlist = parse_args()
    mu.init_logging(optlist.debug)
    mu.init_warnings()
    filenames = []
    for fname in optlist.file:
        # fname = re.sub(r"^.*/(.*)$", r"\1", fname)
        fname = re.sub(r"^(.*)\.fits?(\.fz)*$", r"\1", fname)
        filenames.append(fname)
    logging.debug('using %d filenames', len(filenames))
    if len(filenames) < 1:
        logging.error('No filenames to process')
        exit(1)
    if len(filenames) == 1:
        print("{}".format(filenames[0]))
        exit(0)

    ss_arr = []
    iu.get_lcs_array(filenames, ss_arr, 0, '', optlist.minsize)
    if ss_arr:
        logging.debug('%d substrings were found', len(ss_arr))
        suptitle = "{}".format('*'.join(ss_arr))
        if not re.match(ss_arr[0], filenames[0]):
            suptitle = "*{}".format(suptitle)
        if not re.search(r"{}$".format(ss_arr[-0]), filenames[0]):
            suptitle = "{}*".format(suptitle)
        print("{}".format(suptitle))
    else:
        logging.debug("No common substrings found")
Beispiel #4
0
def imxtalk():
    """main logic:"""
    optlist = parse_args()
    mu.init_logging(optlist.debug)
    mu.init_warnings()
    ncalls.counter = 0
    # begin processing -- loop over files
    for ffile in optlist.fitsfile:
        try:
            hdulist = fits.open(ffile)
        except IOError as ioerr:
            logging.error("IOError: %s", ioerr)
            exit(1)
        if optlist.info:  # just print the image info per file
            hdulist.info()
            continue
        # Construct a list of the source HDU's to work on
        srcids = iu.get_requested_image_hduids(hdulist, optlist.srcname,
                                               optlist.srcindex)
        # Construct a list of the response HDU's to work on
        rspids = iu.get_requested_image_hduids(hdulist, optlist.rspname,
                                               optlist.rspindex)
        # do stuff
        logging.debug("calling iu.get_union_of_bad_column_segs(hdulist)")
        bad_segs = iu.get_union_of_bad_column_segs(hdulist)
        logging.debug("bad_segs=%s", bad_segs)
        max_rn = 7.0
        pcnt = 20
        lsst_num = hdulist[0].header.get("LSST_NUM")
        # get_xtalk_coefs(hdulist, srcids, rspids, optlist.threshold)
        for srcid in srcids:
            hdu_s = hdulist[srcid]
            # subtract the bias estimate from the source array
            if lsst_num and re.match(r"^E2V-CCD250", lsst_num):
                stype = "byrowe2v"
            else:
                stype = "byrow"
            ptype = "bycolfilter"
            iu.subtract_bias(stype, ptype, hdu_s, bad_segs)
            logging.info("hdu_s = %s", hdu_s.header["EXTNAME"])
            (datasec_s, soscan_s, poscan_s) = iu.get_data_oscan_slices(hdu_s)
            rn_est = min(np.std(hdu_s.data[poscan_s[0], soscan_s[1]]), max_rn)
            if optlist.threshold:
                thresh = optlist.threshold
            else:
                rn_est = min(np.std(hdu_s.data[poscan_s[0], soscan_s[1]]),
                             max_rn)
                thresh = 500 * rn_est
            # estimate source background level, the threshold will be added to that
            # since we are only interested in source pixels above background by thresh
            thresh += np.percentile(hdu_s.data[datasec_s], pcnt)

            # make the (weights) mask used in response hdu bckgrnd subtraction
            mask_s = np.ones_like(hdu_s.data, dtype=int)
            mask_s[np.nonzero(hdu_s.data > thresh)] = 0
            mask_s[:, bad_segs] = 0  # fold these in

            arr_s = hdu_s.data.flatten("K")
            logging.debug("np.shape(arr_s)= %s", np.shape(arr_s))
            arr_x = arr_s[arr_s > thresh]
            logging.debug("found %d nans in arr_x",
                          np.count_nonzero(np.isnan(arr_x)))
            arr_x = arr_x.reshape(
                -1, 1)  # infer 1st axis, 2nd axis for 1 "feature"
            logging.debug("np.shape(arr_x)= %s", np.shape(arr_x))
            if np.size(arr_x) < 1000:
                logging.warn(
                    "not enough source points to produce a coef: %d < 100",
                    np.size(arr_x),
                )
                continue

            if optlist.plot:
                plt.style.use(optlist.style)
                pu.update_rcparams()
                fig, axes = pu.get_fig_and_axis(
                    len(rspids),
                    optlist.layout,
                    False,
                    optlist.sharex,
                    optlist.sharey,
                    None,
                )
                nprows, npcols = (axes.shape[0], axes.shape[1])
                pu.set_fig_title(optlist.title, ffile, fig)
                sylim_upper = sylim_lower = 0.0

            for rindex, rspid in enumerate(rspids):
                if rspid == srcid:
                    sindex = rindex
                    continue
                hdu_r = hdulist[rspid]
                logging.info("    hdu_r = %s", hdu_r.header["EXTNAME"])
                if np.shape(hdu_s.data) != np.shape(hdu_r.data):
                    logging.warning(
                        "hdu's %s, %s shapes not commensurate: %s != %s, skipping",
                        hdu_s.header["EXTNAME"],
                        hdu_r.header["EXTNAME"],
                        np.shape(hdu_s.data),
                        np.shape(hdu_r.data),
                    )
                    continue
                (datasec_r, soscan_r,
                 poscan_r) = iu.get_data_oscan_slices(hdu_r)
                iu.subtract_bias(stype, ptype, hdu_r, bad_segs)
                # need to subtract background level estimate from hdu_s but it may
                # have lots of structure so need somewhat careful estimate
                # ------------
                # redo this with masking and line by line interp across the mask
                logging.debug("found %d nans in hdu_r",
                              np.count_nonzero(np.isnan(hdu_r.data)))
                iu.subtract_background_for_xtalk(hdu_r, mask_s, datasec_r)
                logging.debug("found %d nans in hdu_r",
                              np.count_nonzero(np.isnan(hdu_r.data)))
                arr_r = hdu_r.data.flatten("K")
                logging.debug("np.shape(arr_r)= %s", np.shape(arr_r))
                arr_y = arr_r[arr_s > thresh]

                logging.debug("found %d nans in arr_y",
                              np.count_nonzero(np.isnan(arr_y)))
                arr_xp = arr_x[~np.isnan(arr_y)]
                arr_yp = arr_y[~np.isnan(arr_y)]

                # reject high sources in response channel
                arr_xp = arr_xp[arr_yp < thresh]
                arr_yp = arr_yp[arr_yp < thresh]

                if optlist.intercept:
                    lr = linear_model.LinearRegression()
                    ransac = linear_model.RANSACRegressor()
                else:
                    lr = linear_model.LinearRegression(fit_intercept=False)
                    ransac = linear_model.RANSACRegressor(
                        linear_model.LinearRegression(fit_intercept=False))

                # lr.fit(arr_xp, arr_yp)
                # ransac.fit(arr_xp, arr_yp)
                # print(f"lr.coef={lr.coef_}")
                # print(f"ransac.estimator.coef={ransac.estimator_.coef_}")
                if np.max(arr_xp) < 0.95 * np.max(arr_x):
                    logging.warning("threshold is too low, raise and re-run")
                nbins = (np.max(arr_xp) - np.min(arr_xp)) / 1000 * rn_est
                logging.debug("np.max(arr_xp) = %.2f", np.max(arr_xp))
                logging.debug("np.min(arr_xp) = %.2f", np.min(arr_xp))
                logging.debug("nbins = %d", nbins)
                s, edges, _ = binned_statistic(arr_xp[:, 0], arr_yp, "median",
                                               nbins)
                cnt, cedges, _ = binned_statistic(arr_xp[:, 0], arr_yp,
                                                  "count", nbins)
                bin_width = edges[1] - edges[0]
                logging.debug("bin_width = %.2f", bin_width)
                binx = edges[1:] - bin_width / 2
                binx = binx[~np.isnan(s)]  # remove the empty bins
                count = cnt[~np.isnan(s)]
                logging.debug(
                    "count: mean: %.2f  median: %.2f  stddev: %.2f min: %.2f  max: %.2f",
                    np.mean(count),
                    np.median(count),
                    np.std(count),
                    np.min(count),
                    np.max(count),
                )
                count = np.sqrt(count) * (np.log10(binx)
                                          )  # extra weight on high bins
                logging.debug("binx[-10:] = %s", binx[-10:])
                logging.debug("count[-10:] = %s", count[-10:])
                s = s[~np.isnan(s)]
                sylim_upper = np.percentile(arr_yp, 99)
                sylim_lower = np.percentile(arr_yp, 1)
                if optlist.raw:  # expand limits
                    sydel = sylim_upper - sylim_lower
                    sylim_upper += 0.4 * sydel
                    sylim_lower -= 0.3 * sydel

                binx = binx.reshape(
                    -1, 1)  # infer 1st axis, 2nd axis for 1 "feature"
                lr.fit(binx, s, sample_weight=count)
                ransac.fit(binx, s, count)
                inlier_mask = ransac.inlier_mask_
                outlier_mask = np.logical_not(inlier_mask)
                lrcoef = lr.coef_[0]
                lrincpt = lr.intercept_
                rscoef = ransac.estimator_.coef_[0]
                rsincpt = ransac.estimator_.intercept_
                print(f"binned lr.coef={lrcoef:>3g}")
                if optlist.intercept:
                    print(f"binned lr.intercept={lrincpt:>.2f}")
                print(f"binned ransac.estimator.coef={rscoef:>3g}")
                if optlist.intercept:
                    print(f"binned ransac.estimator.intercept={rsincpt:>.2f}")

                # plotting
                if optlist.plot:
                    ax = np.ravel(axes)[int(rindex / npcols) * npcols +
                                        rindex % npcols]
                    if optlist.style == "ggplot":
                        ax.scatter([], [])  # skip the first color
                    ax.grid(True)
                    ax.set_xlabel("source signal", size="x-small")
                    ax.set_ylabel("response signal", size="x-small")
                    if optlist.raw:
                        ax.scatter(arr_xp[:, 0], arr_yp, s=1.0, label="raw")
                    si = s[inlier_mask]
                    ci = count[inlier_mask]
                    # ci[-1] = 1.0
                    ax.scatter(
                        binx[inlier_mask, 0],
                        si,
                        # s=np.sqrt(count),
                        s=np.sqrt(ci),
                        color="blue",
                        alpha=0.5,
                        label="inliers",
                    )
                    si = s[outlier_mask]
                    ci = count[outlier_mask]
                    # ci[-1] = 1.0
                    ax.scatter(
                        binx[outlier_mask, 0],
                        si,
                        # s=np.sqrt(count),
                        s=np.sqrt(ci),
                        color="purple",
                        alpha=0.5,
                        label="outliers",
                    )
                    if optlist.predict:  # Predict and plot result of estimated models
                        line_x = np.arange(0.0, binx.max())[:, np.newaxis]
                        line_y = lr.predict(line_x)
                        line_y_ransac = ransac.predict(line_x)
                        lw = 2
                        if optlist.intercept:
                            lbl = f"lr: {lrcoef:>.3g}*x + {lrincpt:>.3g}"
                        else:
                            lbl = f"lr: {lrcoef:>.3g}*x"
                        ax.plot(line_x,
                                line_y,
                                color="navy",
                                linewidth=lw,
                                label=lbl)
                        if optlist.intercept:
                            lbl = f"ransac: {rscoef:>.3g}*x + {rsincpt:>.3g}"
                        else:
                            lbl = f"ransac: {rscoef:>.3g}*x"
                        ax.plot(
                            line_x,
                            line_y_ransac,
                            color="cornflowerblue",
                            linewidth=lw,
                            label=lbl,
                        )

                    if optlist.ylimits:
                        ax.set_ylim(optlist.ylimits[0], optlist.ylimits[1])
                    else:
                        ax.set_ylim(sylim_lower, sylim_upper)
                    ax.xaxis.set_tick_params(labelsize="x-small")
                    ax.xaxis.set_major_formatter(ticker.EngFormatter())
                    ax.yaxis.set_tick_params(labelsize="x-small")
                    ax.set_title(
                        f"SRC:RSP {hdu_s.header['EXTNAME']}:{hdu_r.header['EXTNAME']}",
                        fontsize="xx-small",
                    )
                    handles, labels = ax.get_legend_handles_labels()
                    lgnd = pu.mk_legend("inside", nprows, handles, labels, ax)
                    # big hack
                    print(f"sizes={lgnd.legendHandles[-2]._sizes}")
                    lgnd.legendHandles[-2]._sizes = [6]
                    lgnd.legendHandles[-1]._sizes = [6]

            if optlist.plot:
                for gidx in range(rindex + 1, nprows * npcols):
                    # ax = np.ravel(axes)[int(gidx / npcols) * npcols + gidx % npcols]
                    ax = np.ravel(axes)[gidx]
                    ax.grid(False)
                    ax.set_frame_on(False)
                    ax.get_xaxis().set_visible(False)
                    ax.get_yaxis().set_visible(False)

                if srcid in rspids:
                    ax = np.ravel(axes)[sindex]
                    ax.grid(False)
                    ax.set_frame_on(False)
                    ax.get_xaxis().set_visible(False)
                    ax.get_yaxis().set_visible(False)

                fig.set_tight_layout({
                    "h_pad": 0.50,
                    "w_pad": 1.0,
                    "rect": [0, 0, 1, 0.97]
                })
                plt.show()

        # end of doing stuff
        ncalls.counter = 0  # reset per file, triggers headers

    ncalls()  # track call count, acts like static variable)
Beispiel #5
0
def imarith():
    """main logic:"""
    optlist = parse_args()
    mu.init_logging(optlist.debug)
    mu.init_warnings()

    # evaluate operands as either a filename, float, floats or error
    verify_args(optlist)
    region = None
    if optlist.region:
        region = iu.parse_region(optlist.region)

    # Open files, throws exception on error
    hdulist1 = fits.open(optlist.operand1, mode="readonly")
    if os.path.isfile(optlist.operand2):
        hdulist2 = fits.open(optlist.operand2, mode="readonly")
    else:
        hdulist2 = None
        operand2 = optlist.operand2.split()  # list of floats as strings

    # create output image with primary header and updates
    hdulisto = iu.create_output_hdulist(hdulist1, sys.argv)

    # loop over HDU id's from Master file, copy non-image HDUs
    # and process the image HDUs accordingly
    hduids = iu.get_requested_image_hduids(hdulist1, optlist.hduname,
                                           optlist.hduindex)
    if hduids is None:
        logging.info("No data HDUs found or requested")
        sys.exit(1)
    for hduid in hduids:  # process these images
        #
        # This needs work to allow more flexible hdulist2 type images
        # as-is, it enforces that names match for corresponding hdu's
        hdu1 = hdulist1[hduid]
        if hdulist2:
            if isinstance(hdulist2[hduid], (fits.ImageHDU, fits.CompImageHDU)):
                hdu2 = hdulist2[hduid]
            else:
                logging.error("HDU %d does not exist in %s", hduid,
                              hdulist2.filename())
        #
        if hdulist2 and np.shape(hdu1.data) != np.shape(hdu2.data):
            logging.error("Images are not comensurate")
            sys.exit(1)

        # prepare the output hdu
        hduo = iu.init_image_hdu(hdu1, hdulisto, region)

        # optionally subtract bias
        if not optlist.sbias and not optlist.pbias:
            pass
        else:
            iu.subtract_bias(optlist.sbias, optlist.pbias, hdu1)
            if hdulist2:
                iu.subtract_bias(optlist.sbias, optlist.pbias, hdu2)
        #
        # do the arithmetic
        if hdulist2:
            hduo.data = ffcalc(hdu1.data, hdu2.data, optlist.op, region)
        else:  # scalar or list of scalars
            if len(operand2) == 1:
                arg2 = float(operand2[0])
            else:
                arg2 = float(operand2.pop(0))
            hduo.data = fscalc(hdulist1[hduid].data, arg2, optlist.op, region)
        # finish up this hdu
        hduo.update_header()
        dtstr = datetime.datetime.utcnow().isoformat(timespec="milliseconds")
        hduo.add_checksum(dtstr)

    for hdu in hdulist1:
        # append to output if it does not contain image data
        if not isinstance(hdu,
                          (fits.ImageHDU, fits.CompImageHDU, fits.PrimaryHDU)):
            hdulisto.append(hdu)

    # write the output file
    hdulisto.info()
    hdulisto.writeto(optlist.result, overwrite=True)
    sys.exit(0)
Beispiel #6
0
def imfft():
    """main logic:"""
    optlist = parse_args()
    mu.init_logging(optlist.debug)
    mu.init_warnings()

    if optlist.scaling == "density":
        window = "boxcar"
    else:
        window = "flattop"

    # Open files
    fileno = 0
    hduids = []
    # loop over files
    for ffile in optlist.fitsfile:
        try:
            hdulist = fits.open(ffile)
        except IOError as ioerr:
            emsg = "IOError: {}".format(ioerr)
            logging.error(emsg)
            sys.exit(1)
        if optlist.info:  # just print the image info and exit
            hdulist.info()
            continue
        # Construct a list of the HDU's to work on
        hduids = iu.get_requested_image_hduids(hdulist, optlist.hduname,
                                               optlist.hduindex)
        # loop over hdu's
        hducnt = 0
        for hduid in hduids:
            hdr = hdulist[hduid].header
            try:
                dstr = hdr["DATASEC"]
            except KeyError as ke:
                emsg = "KeyError: {}, required".format(ke)
                logging.error(emsg)
                sys.exit(1)
            debugmsg = "DATASEC={}".format(dstr)
            logging.debug(debugmsg)
            res = re.match(r"\[*([0-9]*):([0-9]+),([0-9]+):([0-9]+)\]*", dstr)
            if res:
                datasec = res.groups()
            else:
                emsg = "DATASEC:{} parsing failed".format(dstr)
                logging.error(emsg)
                sys.exit(1)

            # define region to measure
            x1 = int(datasec[0]) - 1
            x2 = int(datasec[1])

            stddev = float(hdr["STDVBIAS"])
            pix = hdulist[hduid].data
            fs = 1.0 / (optlist.rt * 1e-9)
            # measure the size needed
            arr = pix[optlist.row, x1:x2]
            x, p = signal.periodogram(arr, fs, window, scaling=optlist.scaling)
            flen = x.size
            plen = p.size
            if flen != plen:
                emsg = "flen({}) != plen({})".format(flen, plen)
                logging.error(emsg)
                emsg = "DATASEC:{} parsing failed".format(dstr)
                logging.error(emsg)
                sys.exit(1)
            # now do the real calculation
            f = np.empty((optlist.nrows, flen))
            Pxx_den = np.empty((optlist.nrows, plen))
            for rr in range(0, optlist.nrows):
                arr = pix[rr + optlist.row, x1:x2]
                if optlist.clip:
                    amed = np.median(arr)
                    farr = sigma_clip(arr)
                    x, p = signal.periodogram(farr.filled(amed),
                                              fs,
                                              window,
                                              scaling=optlist.scaling)
                else:
                    x, p = signal.periodogram(arr,
                                              fs,
                                              window,
                                              scaling=optlist.scaling)
                f[rr] = x
                Pxx_den[rr] = p

            f_avg = np.average(f, axis=0)
            Pxx_den_avg = np.average(Pxx_den, axis=0)
            # track the range needed for y-axis limits
            if (fileno + hducnt) == 0:
                pmin = Pxx_den_avg.min()
                pmax = Pxx_den_avg.max()
                debugmsg = "pmin0={:>g}".format(pmin)
                logging.debug(debugmsg)
                debugmsg = "pmax0={:>g}".format(pmax)
                logging.debug(debugmsg)
            else:
                if pmin > Pxx_den_avg.min():
                    pmin = Pxx_den_avg.min()
                    debugmsg = "pmin={:>g}".format(pmin)
                    logging.debug(debugmsg)
                if pmax < Pxx_den_avg.max():
                    pmax = Pxx_den_avg.max()
                    debugmsg = "pmax={:>g}".format(pmax)
                    logging.debug(debugmsg)

            plt.semilogy(
                f_avg,
                Pxx_den_avg,
                label="{}:{:>02d}:{:>7.2f}".format(fileno, hduid, stddev),
            )
            hducnt += 1
            # end loop over hdui's
        fileno += 1
        # end loop over files
    #
    plt.ylim([0.8 * pmin, 1.2 * pmax])
    plt.xlabel("freqquency [Hz]")
    if optlist.scaling == "density":
        plt.ylabel("PSD [V**2/Hz]")
    else:
        plt.ylabel("Linear spectrum [V RMS]")
    plt.grid(True)
    plt.legend(fontsize="xx-small", title="File:HDUi RN")
    plt.show()
Beispiel #7
0
def imcombine():
    """main logic:"""
    optlist = parse_args()
    mu.init_logging(optlist.debug)
    mu.init_warnings()
    verbose = optlist.verbose

    # convert to slice format
    region = None
    if optlist.region:
        region = iu.parse_region(optlist.region)

    # Prepare scaling region in slice format
    scaling = None
    if optlist.scaling:
        scaling = iu.parse_region(optlist.scaling)

    # build file list
    if optlist.fitsfile:  # input files listed on cmd line
        ifiles = optlist.fitsfile
    elif optlist.ifile:  # input files listed in one or more files
        if not ifiles:
            ifiles = []
        for b in mu.file_to_tokens(optlist.ifile):
            ifiles.extend(b)
    ifiles = sorted(list(set(ifiles)))  # remove duplicates
    if verbose:
        print(f"using {len(ifiles)} input files")
    if optlist.debug:
        logging.debug("input files:")
        for ff in ifiles:
            logging.debug("  %s", ff)

    # get a list of verified images as hdulists
    # prepare input files for use (open and verify)
    if optlist.bimage:  # include in verification
        ifiles.append(optlist.bimage)
    iimages = iu.files_to_hdulists(ifiles, True)
    if optlist.bimage:
        bimage = iimages.pop()  # remove & assign last as bias image
    else:
        bimage = None

    # create output image with primary header and updates
    hdulisto = iu.create_output_hdulist(iimages[0], sys.argv)

    # get all requested hduids
    hduids_to_proc = iu.get_requested_hduids(iimages[0], optlist.hduname,
                                             optlist.hduindex)
    if hduids_to_proc is None:
        logging.error("No HDUs found or requested")
        sys.exit(1)
    # get just requested hduids with image data to be combined
    hduids_to_comb = iu.get_requested_image_hduids(iimages[0], optlist.hduname,
                                                   optlist.hduindex)
    if hduids_to_comb is None:
        logging.error("No data HDUs found or requested")
        sys.exit(1)

    # choose the method to combine images
    if optlist.median:
        method = ["median"]
        if len(iimages) < 3:
            logging.warning("image count %d < 3, can only choose mean",
                            len(iimages))
            sys.exit()
    elif optlist.mean:
        method = ["mean"]
    elif optlist.sigmaclipped:
        method = ["sigmaclipped", optlist.sigmaclipped]
        if len(iimages) < 5:
            logging.warning("image count %d < 5, can only choose mean",
                            len(iimages))
            sys.exit()
    elif optlist.rank:
        method = ["rank", optlist.rank]
        if len(iimages) < 5:
            logging.warning("image count %d < 5, can only choose median, mean",
                            len(iimages))
            sys.exit()
    else:
        method = ["median"]  # default

    # prepare the output image hdus using the first image as a template
    for hduid, hdui in enumerate(iimages[0]):
        # hdu image has data to combine
        if hduid in hduids_to_comb:
            logging.debug(f"processing hdu index {hduid}")
            hduo = iu.init_image_hdu(hdui, hdulisto, region)
            # this is the main algorithm/function
            iu.image_combine_hdu(
                iimages,
                hduid,
                method,
                region,
                bimage,
                optlist.sbias,
                optlist.ptype,
                scaling,
                hduo,
            )
            # finish up this hdu
            hduo.update_header()
            dtstr = datetime.datetime.utcnow().isoformat(
                timespec="milliseconds")
            hduo.add_checksum(dtstr)

        # just append if image hdu has no data (eg. tables etc.)
        if hduid in hduids_to_proc:
            if not isinstance(
                    hdui, (fits.ImageHDU, fits.CompImageHDU, fits.PrimaryHDU)):
                # append extensions that contain non-image data
                logging.debug(f"appending hdu index {hduid}")
                hdulisto.append(hdui)

    # write the output file
    hdulisto.writeto(optlist.result[0], overwrite=True)
Beispiel #8
0
def trender():
    """main logic"""
    # get command args and options
    optlist = parse_args()
    mu.init_logging(optlist.debug)
    mu.init_warnings()

    logging.debug("optlist: %s", optlist)

    # get list of time intervals to process
    intervals = tu.get_unique_time_intervals(optlist.start, optlist.stop,
                                             optlist.interval,
                                             optlist.duration)
    if intervals:  # interval accounting
        intcnt = len(intervals)
        inttot = int(sum([t[1] - t[0] for t in intervals]) / 1000)
        tmin = intervals[0][0]
        tmax = intervals[-1][1]
    else:
        logging.error("time interval spec failed")
        sys.exit(1)

    # set up the trending source(chached-on-disk, slac, base, summit etc.)
    if not optlist.input_file:
        tsite = tu.get_trending_server(optlist.site)
        if tsite and tsite["server"]:
            data_url = "http://{}:{}/rest/data/dataserver".format(
                tsite["server"], tsite["port"])
        else:
            logging.error("failed to determine trending server")
            sys.exit(1)

        # access the file with the channel list and update if needed
        if optlist.forceupdate:
            channel_file = tu.update_trending_channels_xml(tsite["name"])
        else:
            channel_file = tu.update_trending_channels_xml(
                tsite["name"], tmin / 1000, tmax / 1000)
    else:  # get site and data from input file
        logging.debug("using input file %s", optlist.input_file)
        tsite = tu.init_trending_from_input_xml(optlist.input_file)
        if not tsite:
            logging.error("failed to determine trending server")
            sys.exit(1)
        channel_file = None

    # construct the dict of input channels as {id:path} and store regexes as list
    oflds = dict()  # dict to hold channel information
    regexes = []
    oflds, regexes = tu.parse_channel_sources(optlist.channel_source,
                                              channel_file)
    if oflds:
        logging.debug("found %d channels", len(oflds))
    else:
        logging.error("no channels found")
        sys.exit(1)
    if regexes:
        logging.debug("found %d regexes with valid channels", len(regexes))

    # remove channels on the reject list (eg bad RTDs etc)
    rflds, rregexes = tu.parse_channel_sources(optlist.reject, channel_file)
    if rflds:
        logging.debug("found %d channels to reject", len(rflds))
        for rid in rflds.keys():
            if rid in oflds.keys():
                removed = oflds.pop(rid)
                logging.debug("removing %s from channels to process", removed)
            else:
                logging.debug("NOT removing %s from channels to process",
                              rflds[rid])
        logging.debug("%d channels remaining", len(oflds))

    # filter on E2V, ITL, science, corner by removing other types
    rafts_to_reject = []
    if optlist.e2v:
        rafts_to_reject.extend(rafts_of_type["ITL"])
    if optlist.itl:
        rafts_to_reject.extend(rafts_of_type["E2V"])
    if optlist.science:
        rafts_to_reject.extend(rafts_of_type["CORNER"])
    if optlist.corner:
        rafts_to_reject.extend(rafts_of_type["SCIENCE"])
    if rafts_to_reject:
        rids = []
        for chid in oflds:  # loop over paths
            logging.debug("id= %5d  path= %s", int(chid), oflds[chid])
            for raft in set(rafts_to_reject):  # loop over rafts of type
                logging.debug("raft to reject = %s", raft)
                if re.search(f"/{raft}/", oflds[chid]):
                    rids.append(chid)
                    logging.debug("adding %s to channels to reject",
                                  oflds[chid])
                    break
            else:
                logging.debug("NOT adding %s to channels to reject",
                              oflds[chid])
        for rid in rids:
            removed = oflds.pop(rid)
            logging.debug("removing %s from channels to process", removed)
    logging.debug("%d channels remaining", len(oflds))

    # now have info needed to query the CCS trending db

    if optlist.match:
        print("#--- Found matching channels:")
        for chid in oflds:
            print("   id: {}  path: {}".format(chid, oflds[chid]))
        sys.exit(0)

    logging.debug("Found matching channels:")
    for chid in oflds:
        logging.debug("id= %5d  path= %s", int(chid), oflds[chid])

    #  Get the trending data either from local saved files or via
    #  trending db queries to the rest service
    if optlist.input_file:
        # get input from files rather than trending service
        # an issue is that the input file need not have the
        # same set of channels or time intervals as requested on command line.
        # The output time intervals will be restricted to the intersection
        # of the intervals present in the input files.
        responses = []
        parser = etree.XMLParser(remove_blank_text=True)
        for ifile in optlist.input_file:
            logging.debug("using %s for input", ifile)
            logging.debug("test for well-formed xml...")
            try:
                tree = etree.parse(ifile, parser)
            except etree.ParseError as e:
                logging.debug("parsing %s failed: %s", ifile, e)
                sys.exit(1)
            except etree.XMLSyntaxError as e:
                logging.debug("parsing %s failed: %s", ifile, e)
                sys.exit(1)
            else:
                logging.debug("successfully parsed %s", ifile)

            logging.debug("appending to responses...")
            responses.append(
                etree.tostring(
                    tree.getroot(),
                    encoding="UTF-8",
                    xml_declaration=True,
                    pretty_print=False,
                ))

            logging.debug("deleting the etree")
            del tree

    else:
        # CCS is pre-binned at 5m, 30m, or will rebin on-the-fly
        # default is raw data, timebins triggers stat data
        # query the rest server and place responses into a list
        # join the ids requested as "id0&id=id1&id=id2..." for query
        idstr = "&id=".join(id for id in oflds)
        responses = []
        timebins = 0
        nbins = 0
        if optlist.timebins == 0:  # autosize it
            for ival in intervals:  # only one interval per query allowed
                logging.debug("timebins=0")
                logging.debug("ival[1]= %d, ival[0]= %d", ival[1], ival[0])
                if int((ival[1] - ival[0]) / 1000 / 60) < 5:  # <5m => raw data
                    timebins = None
                elif int(
                    (ival[1] - ival[0]) / 1000 / 3600) < 10:  # <10h => 1m bins
                    timebins = int(((ival[1] - ival[0]) / 1000.0) / 60.0)
                elif int(
                    (ival[1] - ival[0]) / 1000 / 3600) < 50:  # <50h => 5m bins
                    timebins = int(((ival[1] - ival[0]) / 1000.0) / 300.0)
                else:  # 30m bins
                    timebins = int(((ival[1] - ival[0]) / 1000.0) / 1800.0)
                logging.debug("timebins= %d", timebins)
                if timebins and nbins < timebins:
                    nbins = timebins
        else:
            nbins = optlist.timebins  # is None or an integer

        for ival in intervals:  # only one interval per query allowed
            res = tu.query_rest_server(ival[0], ival[1], data_url, idstr,
                                       nbins)
            responses.append(res)
    # Now have the data from trending service

    # Output to stdout a well formed xml tree aggregating the xml received
    # Main use is to save to local file, and re-use for subsequent queries
    # for statistics, plots etc. with subset of channels and time periods
    # Also useful fo debugging and verification of data
    # need to have server info as attribs to get tz correct
    if optlist.xml:
        xml_dec = b'<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\n'
        os.write(1, xml_dec)
        datas_str = '<datas {}="{}" {}="{}" {}="{}">\n'.format(
            "trending_server",
            tsite["server"],
            "trending_port",
            tsite["port"],
            "trending_tz",
            tsite["tz"],
        )
        os.write(1, str.encode(datas_str))
        for res in responses:
            root = etree.fromstring(res)
            for data in root.iter("data"):
                os.write(
                    1,
                    etree.tostring(data,
                                   encoding="UTF-8",
                                   xml_declaration=False,
                                   pretty_print=True),
                )
        try:
            os.write(1, b"</datas>")
        except OSError:
            # 'Broken pipe' OSError when stdout is closed
            pass

        sys.exit(0)

    # Translate the xml responses into internal arrays etc.
    # XML Tree structure looks like this:
    # 1: data [id, path]
    # 2: trendingresult [-]
    #     3: channelmetadata [-]
    #         4: channelmetadatavalue [tstart, tstop, name, value]
    #     3: trendingdata [-]
    #         4: datavalue [name, value]
    #         4: axisvalue [name, value, loweredge, upperedge]
    # where [id, path] could appear multiple times and input time intervals are
    # allowed to overlap
    #
    chanspec = dict()  # where keys are chids, element is also a dict
    chanmd = dict()  # key is chid, elements will be dicts holding arrays
    chandata = dict()  # key is chid, element is list of (time, value) tuples
    datacnt = 0
    for res in responses:
        root = etree.fromstring(res)
        for data in root.iter("data"):
            datacnt += 1
            chid = data.attrib.get("id")
            path = data.attrib.get("path")
            # verify this element's (chid, path) matches the input list
            # logging.debug('id=%s  path=%s', chid, path)
            if chid not in oflds:
                continue
            if path is None or oflds[chid] != path:
                logging.warning(
                    "inputpath(id=%s): %s != %s (xmlpath), using %s",
                    chid,
                    oflds[chid],
                    path,
                    oflds[chid],
                )
                path = oflds[chid]
            # check if chid in
            if chid in chanspec:
                if chanspec[chid]["path"] != path:
                    logging.warning("path mismatch for channel_id= %d", chid)
                    logging.warning("  %s != %s, skipping....",
                                    chanspec[chid]["path"], path)
            else:
                chanspec[chid] = dict()
                chanspec[chid]["path"] = path
                chanspec[chid]["units"] = "none"

            # channelmetadata:
            # each element is a name, value and time interval
            # a name can appear multiple times with distinct time intervals
            # convert to a list, per name, of ordered pairs (value,time)
            # that could be plotted using those points
            #
            if chid not in chanmd:  # check if already exists
                chanmd[chid] = dict()
            # metadata:
            # parse all but only using units for now
            for mdval in data.iter("channelmetadatavalue"):
                if mdval.keys():  # empty sequence is false
                    mdname = mdval.attrib.get("name")  # key
                    mdvalue = mdval.attrib.get("value")  # value
                    mdstart = mdval.attrib.get("tstart")
                    mdstop = mdval.attrib.get("tstop")
                if mdname in chanmd[chid]:
                    chanmd[chid][mdname].append((mdstart, mdvalue))
                    chanmd[chid][mdname].append((mdstop, mdvalue))
                else:  # first assignment
                    chanmd[chid][mdname] = [(mdstart, mdvalue),
                                            (mdstop, mdvalue)]
            # trendingdata:
            # extract timestamp, value pairs in axisvalue, datavalue tags
            if chid not in chandata:  # first time
                chandata[chid] = []  # empty list
            for tdval in data.iter("trendingdata"):
                dataval = tdval.find("datavalue")
                if dataval is not None:
                    tvalue = dataval.attrib.get("value")
                else:
                    continue
                axisval = tdval.find("axisvalue")
                if axisval is not None:
                    tstamp = axisval.attrib.get("value")
                else:
                    continue
                # if tstamp is in intervals then append
                for ival in intervals:  # slow, but no other way?
                    if ival[0] < int(tstamp) < ival[1]:
                        chandata[chid].append((tstamp, tvalue))
                        break

    # Done translating the xml responses into internal lists etc.
    # Delete all the raw xml responses
    logging.debug("processed %d xml channel responses", len(responses))
    logging.debug("processed %d uniq channel requests", len(chanspec))
    logging.debug("processed %d total channel queries", datacnt)
    del responses

    # chanspec = dict()  # where keys are chids, values are ccs paths
    # chanmd = dict()  # key is chid, elements will be dicts holding lists
    # chandata = dict() # key is chid, elements are (time, value) pair lists
    # so all responses processed, now have data organized by a set of dicts
    # with the the index on channel id.  Multiple queries for a given channel
    # id are grouped together and there could be duplicate values.
    #
    # To facilitate operating on the data, transform chandat from list[] based
    # (which was easy to append to) to np.array based data.
    chandt = np.dtype({
        "names": ["tstamp", "value"],
        "formats": ["int", "float"]
    })
    trimids = []
    for chid in chanspec:
        path = chanspec[chid]["path"]
        logging.debug("id=%s  path=%s", chid, path)
        for mdname in chanmd[chid]:
            # pick out and process the md's we want
            if mdname == "units" and chanspec[chid]["units"] == "none":
                chanspec[chid]["units"] = chanmd[chid][mdname][-1][1]

        logging.debug("    units=%s", chanspec[chid]["units"])
        # sort and remove duplicates from chandata[chid] where:
        # chandata[chid] = [(t0, v0), (t1, v1), ...., (tn, vn)]
        # and convert to np array
        tmparr = np.array(chandata[chid], dtype=chandt)
        chandata[chid] = np.unique(tmparr)
        logging.debug(
            "    chandata: %d uniq/sorted values from %d entries",
            np.size(chandata[chid]),
            np.size(tmparr),
        )
        del tmparr
        if np.size(chandata[chid]) == 0:  # append chid to trimid list
            logging.debug("%s has no data", chanspec[chid]["path"])
            # arrange to trim empty data
            trimids.append(chid)

    for chid in trimids:
        del chandata[chid]
        del chanmd[chid]
        del chanspec[chid]

    # print to stdout a text dump of the data, in time order per channel
    #
    if optlist.text:
        # print a header for the text
        #
        print("#")
        print("# {}".format(optlist.title))
        print("#")
        print("# CCS trending dump at {}".format(
            dt.datetime.now(gettz()).isoformat(timespec="seconds")))
        print(
            "# Data for {} total seconds from {} intervals".format(
                inttot, intcnt),
            end="",
        )
        print(" over {} (h:m:s) from:".format(
            dt.timedelta(seconds=(tmax / 1000 - tmin / 1000))))
        print('#     tmin={}: "{}"'.format(
            tmin,
            dt.datetime.fromtimestamp(tmin / 1000, gettz(
                tsite["tz"])).isoformat(timespec="seconds"),
        ))
        print('#     tmax={}: "{}"'.format(
            tmax,
            dt.datetime.fromtimestamp(tmax / 1000, gettz(
                tsite["tz"])).isoformat(timespec="seconds"),
        ))
        print("#{:<{wt}s} {:>{wv}s} {:<{wu}s}  {:<{wp}s}  {:<{wd}s}".format(
            " 'time (ms)'",
            "'value'",
            "'unit'",
            "'channel CCS path'",
            "'iso-8601 Date'",
            wt=13,
            wv=12,
            wu=6,
            wp=30,
            wd=30,
        ))
        # loop over all channels sorted on units then path
        for chid in sorted(chanspec.keys(),
                           key=lambda x:
                           (chanspec[x]["units"], chanspec[x]["path"])):
            path = chanspec[chid]["path"]
            unitstr = chanspec[chid]["units"]
            if np.size(chandata[chid]) == 0:
                continue
            for (tstamp, value) in chandata[chid]:
                try:
                    date = dt.datetime.fromtimestamp(
                        tstamp / 1000.0,
                        gettz(tsite["tz"])).isoformat(timespec="milliseconds")
                    print(
                        "{:<{wt}d} {:>{wv}g} {:>{wu}s}   ".format(int(tstamp),
                                                                  float(value),
                                                                  unitstr,
                                                                  wt=14,
                                                                  wv="12.7",
                                                                  wu=6),
                        end="",
                    )
                    print("{:<{wp}s}  {:<{wd}s}".format(path,
                                                        date,
                                                        wt=14,
                                                        wv="12.7",
                                                        wu=6,
                                                        wp=30,
                                                        wd=30))
                except IOError:
                    # 'Broken pipe' IOError when stdout is closed
                    pass

    # print some statistics for each channel
    #
    if optlist.stats:
        # print a header for the stats
        #
        print("#")
        print("# {}".format(optlist.title))
        print("#")
        print("# CCS trending stats at {}".format(
            dt.datetime.now(gettz()).isoformat(timespec="seconds")))
        print(
            "# Data for {} total seconds from {} intervals".format(
                inttot, intcnt),
            end="",
        )
        print(" over {} (h:m:s) from:".format(
            dt.timedelta(seconds=(tmax / 1000 - tmin / 1000))))
        print('#     tmin="{}"'.format(
            dt.datetime.fromtimestamp(tmin / 1000, gettz(
                tsite["tz"])).isoformat(timespec="seconds")))
        print('#     tmax="{}"'.format(
            dt.datetime.fromtimestamp(tmax / 1000, gettz(
                tsite["tz"])).isoformat(timespec="seconds")))
        print(
            "# {:>4s} {:>8s} {:>8s} {:>8s} {:>8s} {:>8s} {:>11s}".format(
                "cnt", "mean", "median", "stddev", "min", "max", "d/dt 1/m"),
            end="",
        )
        if optlist.rstats:
            print("{:>8s} {:>8s} {:>8s}  ".format("rmean", "rmedian",
                                                  "rstddev"),
                  end="")
        print(" {:<{wt}s} {:>{wu}s}".format("path", "units", wt=40, wu=6))

        # loop over all channels sorted on units then path
        for chid in sorted(chanspec.keys(),
                           key=lambda x:
                           (chanspec[x]["units"], chanspec[x]["path"])):
            path = chanspec[chid]["path"]
            unitstr = chanspec[chid]["units"]
            tstamp = chandata[chid]["tstamp"]
            nelem = tstamp.size
            if nelem > 0:
                y = chandata[chid]["value"]
                avg = np.mean(y)
                med = np.median(y)
                std = np.std(y)
                npmin = np.min(y)
                npmax = np.max(y)
                if y.size > 5:
                    # silly but better than taking last value
                    npgrad = np.gradient(y, tstamp)
                    grad = (60 * 1000 * (npgrad[-4] * 1.0 + npgrad[-3] * 1.0 +
                                         npgrad[-2] * 1.0 + npgrad[-1] * 1.0) /
                            4.0)
                else:
                    grad = math.nan
                if optlist.rstats:
                    rmean, rmedian, rstd = stats.sigma_clipped_stats(y)
            else:
                avg = med = std = npmin = npmax = 0
                grad = rmean = rmedian = rstd = 0
            try:
                print(
                    "{:>6g} {:>8.4g} {:>8.4g} {:>8.4g} ".format(
                        nelem,
                        avg,
                        med,
                        std,
                    ),
                    end="",
                )
                print("{:>8.4g} {:>8.4g} ".format(npmin, npmax), end="")
                print("{:>11.3g} ".format(grad), end="")
                if optlist.rstats:
                    print(
                        "{:>8.4g} {:>8.4g} {:>8.4g}   ".format(
                            rmean, rmedian, rstd),
                        end="",
                    )
                print("{:<{wt}s} {:>{wu}s}".format(path, unitstr, wt=40, wu=6))
            except IOError:
                # 'Broken pipe' IOError when stdout is closed
                pass

    # Plotting:
    #
    if optlist.plot:
        # make one or more plots of the time series data
        # default is plot per channel per interval
        # option to combine intervals and channels by units
        # and to overlay all

        # update/override some critical parameters
        plt.style.use(optlist.style)
        pu.update_rcparams()

        # figure out how many distinct plots and windows to make
        # subplots layout and shape are determined
        # nax will store the number of actual plots
        # the nxm array of plots may be larger
        #
        nax = len(chanspec)  # default

        if optlist.overlayunits:  # axis set per unit
            unit_map = dict()
            unit_idx = 0  # counts types of units
            # loop over all channels sorted on units then path
            for chid in sorted(chanspec,
                               key=lambda x:
                               (chanspec[x]["units"], chanspec[x]["path"])):
                unit = chanspec[chid]["units"]
                if unit not in unit_map:
                    unit_map[unit] = unit_idx
                    unit_idx += 1
            nax = len(unit_map)
            logging.debug("unit_map=%s", unit_map)

        elif optlist.overlayregex:  # axis set per regex and per unit
            # regexes[] is a list of regex's used to select channels
            regex_map = dict()
            axis_idx = 0  # will be the axis index
            # loop over all channels sorted on units then path
            for chid in sorted(chanspec,
                               key=lambda x:
                               (chanspec[x]["units"], chanspec[x]["path"])):
                chid_matched = False
                path = chanspec[chid]["path"]
                unit = chanspec[chid]["units"]
                for regex in regexes:
                    if re.search(regex, path):
                        logging.debug("regex_map[%s] matches %s", regex, path)
                        if regex not in regex_map:
                            regex_map[regex] = dict()
                            regex_map[regex][unit] = axis_idx
                            axis_idx += 1
                        elif unit not in regex_map[regex]:
                            regex_map[regex][unit] = axis_idx
                            axis_idx += 1
                        else:  # both regex and unit accounted for
                            pass
                        chid_matched = True
                        break  # found match
                if not chid_matched:
                    logging.error("no regex matches %s", path)
                    sys.exit(1)
            nax = axis_idx  # so now have an axis count, need to re-assign
            # now re-assign axis ids to match command line regexes order
            regex_map_tmp = copy.deepcopy(regex_map)
            aix = 0
            for regex in regexes:
                for unit in sorted(regex_map[regex].keys()):
                    regex_map_tmp[regex][unit] = aix
                    aix += 1
            regex_map = regex_map_tmp
            logging.debug("regex_map=%s", regex_map)
        elif optlist.overlay:
            nax = 1

        if nax == 0:
            logging.error("no data to plot, check inputs?")
            logging.error("try running with --debug")
            sys.exit(1)

        if (not optlist.overlaytime and not optlist.overlaystart
                and not optlist.overlaystop):
            nax = nax * len(intervals)  # per interval, per channel

        logging.debug("nax=%d", nax)
        # logging.debug('nrows= %d  ncols=%d', nrows, ncols)

        if not optlist.overlaytime and len(intervals) > 1 and optlist.sharex:
            sharex = False
        else:
            sharex = optlist.sharex

        fig, axes = pu.get_fig_and_axis(
            nax,
            optlist.layout,
            optlist.overlay,
            sharex,
            False,
            optlist.dpi,
            optlist.fsize,
        )

        logging.debug("len(axes)=%d", len(axes))
        logging.debug("axes.shape= %s", axes.shape)
        nrows, ncols = (axes.shape[0], axes.shape[1])

        # loop over data channels and plot them on correct axis
        # chids = list(chanspec.keys())
        chids = sorted(chanspec,
                       key=lambda x:
                       (chanspec[x]["units"], chanspec[x]["path"]))
        logging.debug("chids=%s", chids)
        unit = None
        for chidx in range(0, len(chids)):  # channels
            chid = chids[chidx]
            unit = chanspec[chid]["units"]
            path = chanspec[chid]["path"]
            tstamp = chandata[chid]["tstamp"]
            mcolor = None
            labeled = False
            for idx in range(0, len(intervals)):
                #
                # choose on which axis to plot
                if optlist.overlayunits:
                    axcnt = unit_map[unit]  # map unit to correct axis
                elif optlist.overlayregex:
                    chid_matched = False
                    for regex in regexes:
                        if re.search(regex, path):
                            logging.debug("regex_map[%s] matches %s", regex,
                                          path)
                            axcnt = regex_map[regex][unit]
                            chid_matched = True
                            logging.debug("using axcnt=%d for regex_map[%s]",
                                          axcnt, regex)
                    if not chid_matched:
                        logging.error("no regex match found for %s", path)
                else:
                    axcnt = chidx
                if not (optlist.overlaytime or optlist.overlaystart
                        or optlist.overlaystop):
                    # stride is number of intervals
                    axcnt = axcnt * len(intervals) + idx

                if optlist.overlay:
                    axcnt = 0
                #
                # now set up this axis
                logging.debug("using axcnt=%d", axcnt)
                ax = np.ravel(axes)[axcnt]
                rowid = int(axcnt / ncols)
                colid = int(axcnt % ncols)
                logging.debug("axcnt= %d  idx= %d", axcnt, idx)
                logging.debug("rowid = %d  colid = %d", rowid, colid)
                ax.grid(True)
                ax.set_frame_on(True)
                ax.get_xaxis().set_visible(True)
                ax.get_yaxis().set_visible(True)
                ax.xaxis.set_tick_params(labelsize="x-small")
                ax.yaxis.set_tick_params(labelsize="x-small")
                if optlist.style == "ggplot":
                    ax.plot([], [])  # consumes the first color (red)
                #
                # mask the tstamps outside of the interval
                mask = (intervals[idx][0] < tstamp) & (tstamp <
                                                       intervals[idx][1])
                mask_start = intervals[idx][0] / 1000.0
                mask_stop = intervals[idx][1] / 1000.0
                x = chandata[chid]["tstamp"][mask] / 1000.0  # time in seconds
                y = chandata[chid]["value"][mask]
                #
                # deal with point/line format
                if optlist.fmt:
                    fmt = optlist.fmt[0]
                else:
                    if optlist.timebins:
                        fmt = "|-"
                    else:
                        fmt = "o-"
                # do the actual plotting
                #
                if not (optlist.overlaystart or optlist.overlaystop):
                    #
                    # convert time axis to matplotlib dates sequence
                    dates = [
                        dt.datetime.fromtimestamp(ts, gettz(tsite["tz"]))
                        for ts in x
                    ]
                    mds = mdate.date2num(dates)
                    if mds.size == 0 and not (
                            optlist.overlay or optlist.overlaytime
                            or optlist.overlayunits or optlist.overlayregex):
                        #
                        # no data, blank, annotate as empty and skip
                        ax.grid(False)
                        ax.set_frame_on(True)
                        ax.get_xaxis().set_visible(False)
                        ax.get_yaxis().set_visible(False)
                        anno_string = "{}:{} empty".format(
                            chanspec[chid]["path"], idx)
                        ax.annotate(
                            anno_string,
                            xy=(0.03, 0.55),
                            xycoords="axes fraction",
                            horizontalalignment="left",
                            verticalalignment="bottom",
                            fontsize="small",
                        )
                        anno_string = "{} (tstart)".format(
                            dt.datetime.fromtimestamp(
                                intervals[idx][0] / 1000,
                                gettz(tsite["tz"])).isoformat(
                                    timespec="seconds"))
                        ax.annotate(
                            anno_string,
                            xy=(0.03, 0.45),
                            xycoords="axes fraction",
                            horizontalalignment="left",
                            verticalalignment="top",
                            fontsize="small",
                        )
                        continue

                    #   # normalization and shift
                    #   if optlist.normalize:
                    # make label for legend
                    if not labeled:  # label first valid interval, save color
                        mlabel = "{}".format(chanspec[chid]["path"])
                        line = ax.plot_date(mds,
                                            y,
                                            fmt,
                                            label=mlabel,
                                            tz=gettz(tsite["tz"]))
                        mcolor = line[0].get_color()
                        logging.debug("mcolor= %s", mcolor)
                        labeled = True
                    else:  # no label on later intervals, use saved color
                        line = ax.plot_date(mds,
                                            y,
                                            fmt,
                                            color=mcolor,
                                            label=None,
                                            tz=gettz(tsite["tz"]))

                    # set x,y-axis label format
                    if not ax.get_ylabel():
                        ax.set_ylabel("{}".format(unit), size="small")
                        ax.ticklabel_format(axis="y",
                                            style="sci",
                                            scilimits=(-3, 5))
                    if not ax.get_xlabel():
                        # xlabel and tick labels on bottom plots
                        # only unless multiple intervals
                        if (len(intervals) > 1 and not optlist.overlaytime
                            ) or nax - axcnt - 1 < ncols:
                            xlabel_str = "{} (tstart)".format(
                                dt.datetime.fromtimestamp(
                                    intervals[idx][0] / 1000,
                                    gettz(tsite["tz"])).isoformat(
                                        timespec="seconds"))
                            if optlist.timebins:
                                xlabel_str = "{} [{} timebins]".format(
                                    xlabel_str, optlist.timebins)
                            logging.debug("ax.set_xlabel(%s)", xlabel_str)
                            ax.set_xlabel(
                                "{}".format(xlabel_str),
                                position=(0.0, 1e6),
                                size="small",
                                horizontalalignment="left",
                            )

                            ax.tick_params(axis="x", labelbottom=True)
                            # rotate the labels
                            for xtick in ax.get_xticklabels():
                                xtick.set_rotation(30)
                                xtick.set_horizontalalignment("right")
                        else:
                            ax.tick_params(axis="x", labelbottom=False)
                else:  # overlay start or stop
                    # convert x to duration axis units (s+/-)
                    #
                    if optlist.overlaystart:
                        x = x - mask_start
                    elif optlist.overlaystop:
                        x = x - mask_stop
                    else:
                        logging.error("overlaystart/stop problem")
                        sys.exit(1)

                    mlabel = "{}[{}]".format(chanspec[chid]["path"], idx)
                    line = ax.plot(x, y, fmt, label=mlabel)
                    mcolor = line[0].get_color()
                    logging.debug("mcolor= %s", mcolor)
                    if not ax.get_ylabel():
                        ax.set_ylabel("{}".format(unit), size="small")
                        ax.ticklabel_format(axis="y",
                                            style="sci",
                                            scilimits=(-3, 5))
                    # xlabel for this axis
                    if not ax.get_xlabel():
                        if nax - axcnt - 1 < ncols:
                            if optlist.overlaystart:
                                xstr = "tstart"
                                xid = 0
                            if optlist.overlaystop:
                                xstr = "tstop"
                                xid = 1
                            xlabel_str = "{} ({}[0])".format(
                                dt.datetime.fromtimestamp(
                                    intervals[0][xid] / 1000,
                                    gettz(tsite["tz"])).isoformat(
                                        timespec="seconds"),
                                xstr,
                            )
                            if len(intervals) > 1:
                                xlabel_last = "{} ({}[{}])".format(
                                    dt.datetime.fromtimestamp(
                                        intervals[-1][xid] / 1000,
                                        gettz(tsite["tz"])).isoformat(
                                            timespec="seconds"),
                                    xstr,
                                    len(intervals) - 1,
                                )
                                if len(intervals) > 2:
                                    xlabel_last = "...{}".format(xlabel_last)
                                xlabel_str = "{}\n{}".format(
                                    xlabel_str, xlabel_last)
                            ax.set_xlabel(
                                "{}".format(xlabel_str),
                                fontsize="small",
                                position=(0.0, 1e6),
                                horizontalalignment="left",
                            )
                            #
                            ax.tick_params(axis="x",
                                           labelbottom=True,
                                           labelrotation=30.0)
                        else:
                            ax.tick_params(axis="x", labelbottom=False)

        # plot array padded with invisible boxes
        for pcnt in range(nax, nrows * ncols):
            ax = np.ravel(axes)[pcnt]
            ax.grid(False)
            ax.set_frame_on(False)
            ax.get_xaxis().set_visible(False)
            ax.get_yaxis().set_visible(False)

        # make the legends for each plot in the array
        # with legend placement/size adjustments
        for pcnt in range(0, nax):
            logging.debug("pcnt= %d, nax= %d", pcnt, nax)
            ax = np.ravel(axes)[pcnt]
            handles, labels = ax.get_legend_handles_labels()
            if not handles or not labels:
                continue
            # sort the labels for easier reading
            # using https://stackoverflow.com/questions/9764298
            labels, handles = (list(t)
                               for t in zip(*sorted(zip(labels, handles))))
            if labels:
                pu.mk_legend(optlist.placement, nrows, handles, labels, ax)

        if optlist.title:  # set the suptitle
            suptitle = optlist.title
            if optlist.title == "auto":
                suptitle = mu.mkglob(
                    [c["path"] for c in list(chanspec.values())], False)
            if suptitle:  # mkglob returns None on no solution
                logging.debug("using suptitle=%s", suptitle)
                fig.set_tight_layout({
                    "h_pad": 0.02,
                    "w_pad": 1.0,
                    "rect": [0, 0, 1, 0.97]
                })
                fig.suptitle(suptitle, size="medium")
        else:
            fig.set_tight_layout({
                "h_pad": 0.02,
                "w_pad": 1.0,
                "rect": [0, 0, 1, 1]
            })

        if optlist.saveplot:
            fig.savefig(f"{optlist.saveplot}", dpi=600)

        if optlist.noshow:
            pass
        else:
            plt.show()

    # end of main()
    sys.exit(0)
Beispiel #9
0
def implot():
    """main logic:"""
    optlist = parse_args()
    mu.init_logging(optlist.debug)
    mu.init_warnings()
    logging.debug("optlist: %s", optlist)

    # update/override some critical parameters
    plt.style.use(optlist.style)
    pu.update_rcparams()
    # uncomment to use latex
    #  plt.rcParams["text.usetex"] = True
    #  plt.rcParams["font.size"] = 12
    #  plt.rc("text.latex", preamble=r"\usepackage{underscore}")

    fig, axes = pu.get_fig_and_axis(
        len(optlist.fitsfile),
        optlist.layout,
        optlist.overlay,
        optlist.sharex,
        optlist.sharey,
        optlist.dpi,
    )

    fsize = fig.get_size_inches()
    logging.debug("width= %5.2f, height= %5.2f", fsize[0], fsize[1])
    logging.debug("len(axes)=%d", len(axes))
    logging.debug("axes.shape= %s", axes.shape)
    nprows, npcols = (axes.shape[0], axes.shape[1])
    logging.debug("nprows= %d, npcols= %d", nprows, npcols)

    pu.set_fig_title(optlist.title, optlist.fitsfile, fig)
    nfiles = len(optlist.fitsfile)

    # findex = 0
    # for ffile in optlist.fitsfile:
    for findex in range(0, nfiles):
        try:
            hdulist = fits.open(optlist.fitsfile[findex],
                                memmap=optlist.nomemmap)
        except IOError as ioerr:
            logging.error("IOError: %s", ioerr)
            sys.exit(1)
        # info option
        if optlist.info:
            hdulist.info()
            continue

        if optlist.overlay:
            ax = axes[0, 0]
        else:
            logging.debug(
                "ax = np.ravel(axes)[%d + %d]",
                int(findex / npcols) * npcols,
                findex % npcols,
            )
            ax = np.ravel(axes)[int(findex / npcols) * npcols +
                                findex % npcols]

        # construct a list of the HDU's to work on
        hduids = iu.get_requested_image_hduids(hdulist, optlist.hduname,
                                               optlist.hduindex)
        if hduids is None:
            logging.error("No valid HDUs found in %s", optlist.hduname
                          or optlist.hduindex)
            sys.exit(1)

        # plot title is truncated filename (w/out path or .fit(s))
        title_str = re.sub(r"^.*/(.*)$", r"\1", optlist.fitsfile[findex])
        title_str = re.sub(r"^(.*)\.fits?(\.fz)*$", r"\1", title_str)
        if npcols < 3:
            title_nchars = 44
            title_fontsize = "x-small"
        else:
            title_nchars = 32
            title_fontsize = "xx-small"
        if len(title_str) > title_nchars:
            title_str = "{}...".format(title_str[:title_nchars])
        else:
            title_str = "{}".format(title_str)
        logging.debug("using title_nchars=%d title_fontsize=%s", title_nchars,
                      title_fontsize)
        if optlist.overlay:
            if nfiles > 1:  # trunc'd filename in legend
                ax.plot([], [], " ", label=title_str)
        elif nfiles == 1 and not optlist.title:
            ax.set_title(title_str, fontsize=title_fontsize)
            if optlist.style == "ggplot":
                ax.plot([], [])  # skip the first color
        else:
            ax.set_title(title_str, fontsize=title_fontsize)
            if optlist.style == "ggplot":
                ax.plot([], [])  # skip the first color

        if optlist.xlimits:
            # for ax in np.ravel(axes):
            xbot, xtop = ax.set_xlim(optlist.xlimits[0], optlist.xlimits[1])
            logging.debug("xbot= %.3g xtop= %.3g", xbot, xtop)
        if optlist.ylimits:
            # for ax in np.ravel(axes):
            ybot, ytop = ax.set_ylim(optlist.ylimits[0], optlist.ylimits[1])
            logging.debug("ybot= %.3g ytop= %.3g", ybot, ytop)

        if optlist.logy:
            logging.debug("set_yscale(symlog)")
            ax.set_yscale("symlog")

        # y label depends on offset type
        if not optlist.offset:
            ax.set_ylabel("signal", size="x-small")
        elif optlist.offset == "mean":
            ax.set_ylabel("signal - mean", size="x-small")
        elif optlist.offset == "median":
            ax.set_ylabel("signal - median", size="x-small")
        elif optlist.offset == "delta":
            ax.set_ylabel("signal - mean + 5*j*stdev, j=0,1,..",
                          size="x-small")
        else:
            logging.error("invalid --offset choice")
            sys.exit(1)
        # x label
        ax.grid(True)
        if optlist.row is not None:
            ax.set_xlabel("column", size="x-small")
            if optlist.ltype == "series":
                ax.set_xlabel("col series", size="x-small")
        elif optlist.col is not None:
            ax.set_xlabel("row", size="x-small")
            if optlist.ltype == "series":
                ax.set_xlabel("row series", size="x-small")
        else:
            logging.error("must have one of --row or --col")
            sys.exit(1)

        # do the plotting
        pu.plot_hdus(vars(optlist), hduids, hdulist, ax)

        #  done with file, close it
        hdulist.close()

        # end of loop over files

    if optlist.info:  # just print the image info and exit
        sys.exit()

    if optlist.title:  # set the suptitle
        fig.set_tight_layout({
            "h_pad": 0.50,
            "w_pad": 1.0,
            "rect": [0, 0, 1, 0.97]
        })
    else:
        fig.set_tight_layout({
            "h_pad": 0.50,
            "w_pad": 1.0,
            "rect": [0, 0, 1, 1]
        })

    # Deal with the legend (ugly)
    # Get list of handles, labels from first plot
    ax = np.ravel(axes)[0]
    handles, labels = ax.get_legend_handles_labels()
    if nfiles == 1 or optlist.overlay:
        ax = np.ravel(axes)[0]
    else:
        ax = np.ravel(axes)[-1]  # put legend in last slot
    pu.mk_legend(optlist.placement, nprows, handles, labels, ax)

    if not optlist.overlay:
        for gidx in range(nfiles, nprows * npcols):
            ax = np.ravel(axes)[int(gidx / npcols) * npcols + gidx % npcols]
            ax.grid(False)
            ax.set_frame_on(False)
            ax.get_xaxis().set_visible(False)
            ax.get_yaxis().set_visible(False)

    if optlist.saveplot:
        fig.savefig(f"{optlist.saveplot}", dpi=600)

    plt.show()