Exemple #1
0
def test_filenames(db_with_dark_and_scan, tmpdir):
    """Test exported file names and sizes from Exporter."""
    db = db_with_dark_and_scan
    config = an.AnalysisConfig()
    config.read(fn)
    ld = an.AnalysisStream(config)
    ld.db = db.v1
    ep_config = pdfstream.callbacks.analysis.ExportConfig()
    ep_config.read(fn)
    ep_config.tiff_base = str(tmpdir)
    ep = pdfstream.callbacks.analysis.Exporter(ep_config)
    ld.subscribe(ep)
    for name, doc in db[-1].canonical(fill="yes", strict_order=True):
        ld(name, doc)
    tiff_base = Path(ep_config.tiff_base)
    # test tiff names and sizes
    tiffs = list(tiff_base.rglob("*.tiff"))
    for tiff in tiffs:
        size_in_mb = tiff.stat().st_size // (2**20)
        assert size_in_mb == 16
        print(tiff.name)
    # test array names
    txts = list(tiff_base.rglob("*.txt"))
    for txt in txts:
        print(txt.name)
Exemple #2
0
def test_user_mask1(db_with_img_and_bg_img):
    db = db_with_img_and_bg_img
    config = an.AnalysisConfig()
    config.read(fn)
    ld = an.AnalysisStream(config)
    ld.db = db.v1
    for name, doc in db[-1].canonical(fill="yes", strict_order=True):
        if name == "start":
            doc = dict(**doc, user_config={"auto_mask": False})
        ld(name, doc)
Exemple #3
0
def test_Visualizer(db_with_dark_and_scan):
    db = db_with_dark_and_scan
    config = an.AnalysisConfig()
    config.read(fn)
    ld = an.AnalysisStream(config)
    ld.db = db.v1
    config1 = pdfstream.callbacks.analysis.VisConfig()
    config1.read(fn)
    config1.fig = plt.figure()
    cb = pdfstream.callbacks.analysis.Visualizer(config1)
    ld.subscribe(cb)
    for name, doc in db[-1].canonical(fill="yes", strict_order=True):
        ld(name, doc)
    cb.show_figs()
Exemple #4
0
def test_AnalysisStream(db_with_img_and_bg_img, use_db):
    db = db_with_img_and_bg_img
    config = an.AnalysisConfig()
    config.read(fn)
    config.raw_db = db
    ld = an.AnalysisStream(config)
    # validate that output data
    out_validator = Validator(analysis_out_schemas)
    ld.subscribe(out_validator)
    # validate the input data
    in_validator = Validator(analysis_in_schemas)
    for name, doc in db[-1].canonical(fill="yes", strict_order=True):
        in_validator(name, doc)
        ld(name, doc)
Exemple #5
0
def test_AnalysisStream_with_UserConfig(db_with_img_and_bg_img, user_config):
    """Test the analysis stream with user configuration of masking."""
    db = db_with_img_and_bg_img
    config = an.AnalysisConfig()
    config.read(fn)
    ld = an.AnalysisStream(config)
    ld.db = db.v1
    # validate that output data
    out_validator = Validator(analysis_out_schemas)
    ld.subscribe(out_validator)
    # validate the input data
    in_validator = Validator(analysis_in_schemas)
    for name, doc in db[-1].canonical(fill="yes", strict_order=True):
        if name == "start":
            doc = dict(**doc, user_config=user_config)
        in_validator(name, doc)
        ld(name, doc)
Exemple #6
0
def test_Exporter(db_with_dark_and_scan, tmpdir):
    db = db_with_dark_and_scan
    config = an.AnalysisConfig()
    config.read(fn)
    config.raw_db = db
    ld = an.AnalysisStream(config)
    ep_config = pdfstream.callbacks.analysis.ExportConfig()
    ep_config.read(fn)
    ep_config.tiff_base = str(tmpdir)
    ep = pdfstream.callbacks.analysis.Exporter(ep_config)
    ld.subscribe(ep)
    for name, doc in db[-1].canonical(fill="yes", strict_order=True):
        ld(name, doc)
    tiff_base = Path(ep_config.tiff_base)
    assert len(list(tiff_base.rglob("*.tiff"))) > 0
    assert len(list(tiff_base.rglob("*.csv"))) > 0
    assert len(list(tiff_base.rglob("*.json"))) > 0
Exemple #7
0
def test_AnalysisStream(db_with_img_and_bg_img, use_db):
    db = db_with_img_and_bg_img
    config = an.AnalysisConfig()
    config.read(fn)
    ld = an.AnalysisStream(config)
    if use_db:
        ld.db = db.v1
    # validate that output data
    out_validator = Validator(analysis_out_schemas)
    ld.subscribe(out_validator)
    ld.subscribe(
        lambda name, doc: print(list(map(type, doc["data"].values()))),
        name="event")
    # validate the input data
    in_validator = Validator(analysis_in_schemas)
    for name, doc in db[-1].canonical(fill="yes", strict_order=True):
        in_validator(name, doc)
        # test no numpy array
        ld(name, doc)
Exemple #8
0
def test_ExporterXpdan(db_with_dark_and_scan, tmpdir):
    """Test ExporterXpaan. It should output the correct files in a two layer directory."""
    db = db_with_dark_and_scan
    config = an.AnalysisConfig()
    config.read(fn)
    config.raw_db = db
    ld = an.AnalysisStream(config)
    ep_config = pdfstream.callbacks.analysis.ExportConfig()
    ep_config.read(fn)
    ep_config.tiff_base = str(tmpdir)
    ep = pdfstream.callbacks.analysis.ExporterXpdan(ep_config)
    ld.subscribe(ep)
    for name, doc in db[-1].canonical(fill="yes", strict_order=True):
        ld(name, doc)
    tiff_base = Path(ep_config.tiff_base)
    data_folder = tiff_base.joinpath("Ni")
    assert data_folder.is_dir()
    for dir_name in ("dark_sub", "integration", "meta", "mask", "iq", "sq",
                     "fq", "pdf", "scalar_data"):
        assert data_folder.joinpath(dir_name).is_dir()
        assert len(list(data_folder.joinpath(dir_name).glob("*.*"))) > 0