Пример #1
0
def test_raw_pipeline(mask_s):
    # link the pipeline up
    namespace = link(*pipeline_order, **g_namespace)

    is_calibration_img = namespace["is_calibration_img"]
    geo_input = namespace["geo_input"]
    img_counter = namespace["img_counter"]
    namespace["mask_setting"]["setting"] = mask_s

    pdf = namespace["pdf"]
    raw_background_dark = namespace["raw_background_dark"]
    raw_background = namespace["raw_background"]
    raw_foreground_dark = namespace["raw_foreground_dark"]
    composition = namespace["composition"]
    raw_foreground = namespace["raw_foreground"]
    sl = pdf.sink_to_list()
    L = namespace["geometry"].sink_to_list()
    ml = namespace["mask"].sink_to_list()

    is_calibration_img.emit(False)
    a = geo.getPyFAI()
    geo_input.emit(a)
    for s in [raw_background_dark, raw_background, raw_foreground_dark]:
        s.emit(np.zeros(img.shape))
    composition.emit("Au")
    img_counter.emit(1)
    raw_foreground.emit(img)
    destroy_pipeline(raw_foreground)
    del namespace
    assert len(L) == 1
    assert ml
    assert len(sl) == 1
    sl.clear()
    L.clear()
    ml.clear()
Пример #2
0
def test_main_pipeline(
    exp_db,
    fast_tmp_dir,
    start_uid3,
    start_uid1,
    start_uid2,
    background,
    exception,
    pe2
):
    namespace = link(
        *pipeline_order, raw_source=Stream(stream_name="raw source"),
        db=exp_db,
    )
    iq_em = ToEventStream(
        namespace["mean"].combine_latest(namespace["q"], emit_on=0),
        ("iq", "q"))
    iq_em.sink(print)

    limg = []
    move_to_first(namespace["bg_corrected_img"].sink(lambda x: limg.append(x)))
    lbgc = namespace["mean"].sink_to_list()
    lpdf = namespace["iq_comp"].sink_to_list()
    t0 = time.time()
    if background:
        uid = start_uid1
    elif pe2:
        uid = start_uid2
    else:
        uid = -1
    for nd in exp_db[uid].documents(fill=True):
        name, doc = nd
        if name == "start":
            if exception:
                doc["bt_wavelength"] = "bla"
            nd = (name, doc)
        try:
            namespace["raw_source"].emit(nd)
        except ValueError:
            pass
    t1 = time.time()
    print(t1 - t0)
    n_events = len(list(exp_db[-1].events()))
    assert len(limg) == n_events
    if exception:
        assert_lbgc = 0
    else:
        assert_lbgc = n_events
    assert len(lbgc) == assert_lbgc
    assert len(lpdf) == assert_lbgc
    assert iq_em.state == "stopped"
    destroy_pipeline(namespace["raw_source"])
    del namespace
    limg.clear()
    lbgc.clear()
    lpdf.clear()
Пример #3
0
def raw_pipeline_parallel():
    # link the pipeline up
    gg_namespace = dict(g_namespace)
    s_ns = {
        k: v.scatter(backend="thread")
        for k, v in gg_namespace.items() if isinstance(v, Stream)
    }
    gg_namespace.update(
        {"_" + k: v
         for k, v in gg_namespace.items() if isinstance(v, Stream)})
    gg_namespace.update(s_ns)
    namespace = link(*pipeline_order[:-1], **gg_namespace)

    geo_input = namespace["_geo_input"]
    composition = namespace["_composition"]

    raw_background_dark = namespace["_raw_background_dark"]
    raw_background = namespace["_raw_background"]
    raw_foreground_dark = namespace["_raw_foreground_dark"]
    raw_foreground = namespace["_raw_foreground"]

    print(type(namespace["raw_foreground"]))

    a = namespace["mean"]
    futures = a.sink_to_list()
    b = a.buffer(10)
    g = b.gather()
    # g.sink(lambda x: print("gathered data", time.time()))
    LL = g.map(lambda x: time.time()).sink_to_list()
    L = g.sink_to_list()

    a = geo.getPyFAI()
    geo_input.emit(a)
    composition.emit("Au1.0")
    for s in [raw_background_dark, raw_background, raw_foreground_dark]:
        s.emit(np.zeros(img.shape))
    ii = 10
    t0 = time.time()
    for i in range(ii):
        rimg = np.random.random(img.shape)
        raw_foreground.emit(img + rimg)
    while len(L) < ii:
        time.sleep(.01)

    time_diff = [LL[i] - LL[i - 1] for i in range(1, ii)]
    print(max(time_diff), min(time_diff), sum(time_diff) / len(time_diff))
    # print([l - min(LL) for l in LL])
    print([l - t0 for l in LL])
    print(max([l - t0 for l in LL]) / ii)
    destroy_pipeline(raw_foreground)
    del namespace
    futures.clear()
    L.clear()
Пример #4
0
def test_main_pipeline(exp_db, fast_tmp_dir, start_uid3, start_uid1,
                       start_uid2, background, exception, pe2):
    namespace = link(
        *pipeline_order,
        raw_source=Stream(stream_name="raw source"),
        db=exp_db,
    )
    iq_em = ToEventStream(
        namespace["mean"].combine_latest(namespace["q"], emit_on=0),
        ("iq", "q"))
    iq_em.sink(print)

    limg = []
    move_to_first(namespace["bg_corrected_img"].sink(lambda x: limg.append(x)))
    lbgc = namespace["mean"].sink_to_list()
    lpdf = namespace["iq_comp"].sink_to_list()
    t0 = time.time()
    if background:
        uid = start_uid1
    elif pe2:
        uid = start_uid2
    else:
        uid = -1
    for nd in exp_db[uid].documents(fill=True):
        name, doc = nd
        if name == "start":
            if exception:
                doc["bt_wavelength"] = "bla"
            nd = (name, doc)
        try:
            namespace["raw_source"].emit(nd)
        except ValueError:
            pass
    t1 = time.time()
    print(t1 - t0)
    n_events = len(list(exp_db[-1].events()))
    assert len(limg) == n_events
    if exception:
        assert_lbgc = 0
    else:
        assert_lbgc = n_events
    assert len(lbgc) == assert_lbgc
    assert len(lpdf) == assert_lbgc
    assert iq_em.state == "stopped"
    destroy_pipeline(namespace["raw_source"])
    del namespace
    limg.clear()
    lbgc.clear()
    lpdf.clear()
Пример #5
0
def test_pca_pipeline():
    ns = dict(data=Stream(), start=Stream())

    ns.update(pca_pipeline(**ns))
    L = ns["scores"].sink_to_list()

    # np.random.seed(42)
    for i in range(10):
        a = np.zeros(10)
        a[i] = 1
        ns["data"].emit(a)

    assert len(L) == 10
    assert L[-1].shape == (10, 9)
    destroy_pipeline(ns["data"])
    del ns
    L.clear()
Пример #6
0
def test_tomo_pipeline_theta():
    ns = dict(qoi=Stream(), theta=Stream(), center=Stream())

    ns.update(tomo_pipeline_theta(**ns))
    L = ns["rec"].sink_to_list()
    # np.random.seed(42)

    th_linspace = np.linspace(0, 180, 6)
    ns["center"].emit(3)

    for th in th_linspace:
        ns["theta"].emit(th)
        ns["qoi"].emit(np.random.random((6, 6)))
    assert len(L) == 6
    assert L[-1].shape == (6, 6, 6)
    destroy_pipeline(ns["qoi"])
    del ns
    L.clear()
Пример #7
0
def test_tomo_piecewise_pipeline(rand_size):
    ns = dict(
        qoi=Stream(),
        x=Stream(),
        th=Stream(),
        th_dim=Stream(),
        x_dim=Stream(),
        th_extents=Stream(),
        x_extents=Stream(),
        center=Stream(),
    )
    x_linspace = np.linspace(0, 5, 6)
    th_linspace = np.linspace(0, 180, 6)

    ns["th_dimension"] = len(th_linspace)
    ns["x_dimension"] = len(x_linspace)

    ns.update(**link(*[tomo_prep, tomo_pipeline_piecewise], **ns))

    L = ns["rec"].sink_to_list()

    ns["th_dim"].emit(len(th_linspace))
    ns["x_dim"].emit(len(x_linspace))
    ns["th_extents"].emit([0, 180])
    ns["x_extents"].emit([x_linspace[0], x_linspace[-1]])
    ns["center"].emit(2.5)

    # np.random.seed(42)

    for x in x_linspace:
        for th in th_linspace:
            ns["x"].emit(x)
            ns["th"].emit(th)
            ns["qoi"].emit(np.random.random(rand_size))

    assert len(L) == len(x_linspace) * len(th_linspace)
    if rand_size:
        assert L[-1].shape == (*rand_size, len(x_linspace), len(th_linspace))
    else:
        assert L[-1].shape == (len(x_linspace), len(th_linspace))
    destroy_pipeline(ns["qoi"])
    del ns
    L.clear()
Пример #8
0
def test_raw_pipeline_parallel(n):
    # caplog.set_level(logging.CRITICAL)
    # link the pipeline up
    gg_namespace = dict(g_namespace)
    s_ns = {
        k: v.scatter(backend="thread")
        for k, v in gg_namespace.items() if isinstance(v, Stream)
    }
    gg_namespace.update(
        {"_" + k: v
         for k, v in gg_namespace.items() if isinstance(v, Stream)})
    gg_namespace.update(s_ns)
    namespace = link(*pipeline_order, **gg_namespace)

    geo_input = namespace["_geo_input"]

    raw_background_dark = namespace["_raw_background_dark"]
    raw_background = namespace["_raw_background"]
    raw_foreground_dark = namespace["_raw_foreground_dark"]
    raw_foreground = namespace["_raw_foreground"]

    a = namespace[n]
    futures = a.sink_to_list()
    b = a.buffer(10)
    g = b.gather()
    g.sink(lambda x: print("gathered data", time.time()))
    L = g.sink_to_list()

    a = geo.getPyFAI()
    yield geo_input.emit(a)
    for s in [raw_background_dark, raw_background, raw_foreground_dark]:
        yield s.emit(np.zeros(img.shape))
    ii = 2
    for i in range(ii):
        rimg = np.random.random(img.shape)
        yield raw_foreground.emit(img + rimg)
    while len(L) < ii:
        yield gen.sleep(.01)

    destroy_pipeline(raw_foreground)
    del namespace
    futures.clear()
    L.clear()
Пример #9
0
def test_qoi_pipeline():
    # link the pipeline up
    namespace = link(*(pipeline_order + [max_intensity_mean, max_gr_mean]),
                     **g_namespace)

    geometry = namespace["geometry"]

    mean_max = namespace["mean_max"]
    raw_background_dark = namespace["raw_background_dark"]
    raw_background = namespace["raw_background"]
    raw_foreground_dark = namespace["raw_foreground_dark"]
    raw_foreground = namespace["raw_foreground"]

    sl = mean_max.sink_to_list()
    geometry.emit(geo)
    for s in [raw_background_dark, raw_background, raw_foreground_dark]:
        s.emit(np.zeros(img.shape))
    raw_foreground.emit(img)
    del namespace
    destroy_pipeline(raw_foreground)
    assert len(sl) == 1
    sl.clear()
Пример #10
0
def test_extra_pipeline():
    # link the pipeline up
    namespace = link(*(pipeline_order + [median_gen, std_gen, z_score_gen]),
                     **g_namespace)

    geometry = namespace["geometry"]

    z_score = namespace["z_score"]
    raw_background_dark = namespace["raw_background_dark"]
    raw_background = namespace["raw_background"]
    raw_foreground_dark = namespace["raw_foreground_dark"]
    raw_foreground = namespace["raw_foreground"]

    sl = z_score.sink_to_list()
    geometry.emit(geo)
    for s in [raw_background_dark, raw_background, raw_foreground_dark]:
        s.emit(np.zeros(img.shape))
    raw_foreground.emit(img)
    del namespace
    destroy_pipeline(raw_foreground)
    assert len(sl) == 1
    sl.clear()
Пример #11
0
    def main(
        poni_file=None,
        image_files=None,
        bg_file=None,
        mask_file=None,
        polarization=.99,
        edge=20,
        lower_thresh=1.,
        upper_thresh=None,
        alpha=3.,
        auto_type="median",
        mask_settings="auto",
        flip_input_mask=True,
        bg_scale=1,
    ):
        """Run the data processing protocol taking raw images to background
        subtracted I(Q) files.

        The data processing steps included in this protocol are:
        background subtraction, polarization correction, automated masking, and
        pixel resolution integration

        Parameters
        ----------
        poni_file: str or None, optional
            File generated from pyFAI's calibration, if None look in the
            current working directory for the poni file, defaults to None.
        image_files: str or None, optional
            File to process, if None use all the valid files in the directory,
            defaults to None.
        bg_file: str or None, optional
            Background image, if None no background subtraction is performed,
            defaults to None.
        mask_file: str or None, optional
            Mask file to include in the data processing, if None don't use one,
            defaults to None.
        polarization: float, optional
            The polzarization factor to use, defaults to .99, if None do not
            perform polarization correction
        edge: int, optional
            The number of pixels from the edge to mask with an edge mask,
            defaults to 20, if None no edge mask used
        lower_thresh: float, optional
            Threshold for lower threshold mask, all pixels with value less than
            this value (after background subtraction if applicable), defaults
            to 1. if None do not apply lower theshold mask
        upper_thresh: float, optional
            Threshold for upper threshold mask, all pixels with value greater
            than this value (after background subtraction if applicable),
            defaults to None if None do not apply upper theshold mask
        alpha: float, optional
            Number of standard deviations away from the ring mean to mask,
            defaults to 3. if None do not apply automated masking
        auto_type : {'median', 'mean'}, optional
            The type of automasking to use, median is faster, mean is more
            accurate. Defaults to 'median'.
        mask_settings: {'auto', 'first', None}, optional
            If auto mask every image, if first only mask first image, if None
            mask no images. Defaults to None
        flip_input_mask: bool, optional
            If True flip the input mask up down, this helps when using fit2d
            defaults to True.
        bg_scale : float, optional
            The scale for the image to image background subtraction, defaults
            to 1

        Returns
        -------
        q_l : list of ndarrays
            The list of q values
        mean_l : list of ndarrays
            The list of mean values
        median_l : list of ndarrays
            The list of median values
        std_l : list of ndarrays
            The list of standard deviation values
        """
        import pyFAI

        ns = make_pipeline(_output_sinks)

        ns["polarization_array"].args = (polarization, )
        ns["dark_corrected_background"].args = (bg_scale, )
        if mask_file:
            if mask_file.endswith(".msk"):
                # TODO: may need to flip this?
                tmsk = read_fit2d_msk(mask_file)
            else:
                tmsk = np.load(mask_file)
            if flip_input_mask:
                tmsk = np.flipud(tmsk)
        else:
            tmsk = None

        # update all the kwargs
        ns["mask_kwargs"].update(
            tmsk=tmsk,
            edge=edge,
            lower_thresh=lower_thresh,
            upper_thresh=upper_thresh,
            alpha=alpha,
            auto_type=auto_type,
        )
        print(ns["mask_kwargs"])
        ns["mask_setting"].update(setting=mask_settings)

        # Load calibration
        if poni_file is None:
            poni_file = [f for f in os.listdir(".") if f.endswith(".poni")]
            if len(poni_file) != 1:
                raise RuntimeError("There can only be one poni file")
            else:
                poni_file = poni_file[0]
        geo = pyFAI.load(poni_file)

        bg = None
        img_filenames = None

        if image_files is None:
            img_filenames = [
                i for i in os.listdir(".")
                if os.path.splitext(i)[-1] in img_extensions
            ]
            # TODO: Test non tiff files
            if all([
                    f.endswith(".tiff") or f.endswith(".tif")
                    for f in img_filenames
            ]):
                imgs = (tifffile.imread(i) for i in img_filenames)
            else:
                imgs = (fabio.open(i).data.astype(float)
                        for i in os.listdir(".")
                        if os.path.splitext(i)[-1] in img_extensions)
        else:
            if isinstance(image_files, str):
                image_files = (image_files, )
                img_filenames = image_files
            imgs = (fabio.open(i).data.astype(float) for i in image_files)

        if bg_file is not None:
            bg = fabio.open(bg_file).data.astype(float)

        for k in ns.get("out_tup", []):
            k.clear()

        ns["geometry"].emit(geo)

        for i, (fn, img) in enumerate(zip(img_filenames, imgs)):
            ns["filename_source"].emit(fn)
            if bg is None:
                bg = np.zeros(img.shape)
            ns["dark_corrected_background"].emit(bg)
            ns["dark_corrected_foreground"].emit(img)

        destroy_pipeline(ns["dark_corrected_foreground"])
        res = tuple([tuple(x) for x in ns.get("out_tup", [])])
        del ns
        return res