Beispiel #1
0
def open_acq(fn):
    """
    Read the content of an acquisition file
    return (list of DataArray, list of DataArray):
        list of the data in the file
        thumbnail (if available, might be empty)
    """
    fmt_mng = dataio.find_fittest_converter(fn, default=None, mode=os.O_RDONLY)
    if fmt_mng is None:
        logging.warning("Failed to find a fitting importer for file %s", fn)
        # TODO: try all the formats?
        fmt_mng = dataio.hdf5

    if not hasattr(fmt_mng, "read_data"):
        raise NotImplementedError("No support for importing format %s" % fmt_mng.FORMAT)

    try:
        data = fmt_mng.read_data(fn)
    except Exception:
        raise ValueError("Failed to open the file '%s' as %s" % (fn, fmt_mng.FORMAT))

    if not data:
        logging.warning("Couldn't load any data from file '%s' as %s",
                        fn, fmt_mng.FORMAT)

    try:
        thumb = fmt_mng.read_thumbnail(fn)
    except Exception:
        logging.exception("Failed to read the thumbnail of file '%s' as %s",
                          fn, fmt_mng.FORMAT)
        # doesn't matter that much
        thumb = []

    return data, thumb
Beispiel #2
0
def acquire_timelapse(num, period, filename):

    # find components by their role
#    ebeam = model.getComponent(role="ebeam")
    sed = model.getComponent(role="se-detector")

    images = []
    try:
        for i in range(num):
            logging.info("Acquiring image %d/%d", i + 1, num)
            start = time.time()
            images.append(sed.data.get())
            left = period - (time.time() - start)
            if left < 0:
                logging.warning("Acquisition took longer than the period (%g s overdue)", -left)
            else:
                logging.info("Sleeping for another %g s", left)
                time.sleep(left)
    except KeyboardInterrupt:
        logging.info("Closing after only %d images acquired", i + 1)
    except Exception:
        logging.exception("Failed to acquire all the images, will try to save anyway")
    
    # save the file
    exporter = dataio.find_fittest_converter(filename)
    exporter.export(filename, images)
Beispiel #3
0
    def save(self, dlg):

        if not hasattr(self._spec_stream, "_orig_raw"):
            box = wx.MessageDialog(self.main_app.main_frame,
                   "No correction was applied",
                   "No correction", wx.OK | wx.ICON_STOP)
            box.ShowModal()
            box.Destroy()
            return

        fn = self.tab_data.acq_fileinfo.value.file_name

        # Store all the data present in the original file => just open it again.
        das_orig = open_acquisition(fn)
        das = []
        for da in das_orig:
            # Is it the stream that we've corrected?
            if (self._spec_stream.raw[0].metadata == da.metadata and
                 self._spec_stream.raw[0].shape == da.shape):
                das.append(self._spec_stream.raw[0])
            else:
                das.append(da)

        # Ask for filename, with default to original filename + _corrected
        # TODO: smart naming scheme if file already exists.
        basefn, ext = os.path.splitext(fn)
        cfn = basefn + "_corrected" + ext
        cfn = ShowAcquisitionFileDialog(dlg, cfn)
        exporter = dataio.find_fittest_converter(cfn)
        if cfn is not None:
            exporter.export(cfn, das)
        else:
            logging.debug("Saving cancelled")

        dlg.Close()
Beispiel #4
0
    def acquire(self, dlg):
        # Stop the spot stream and any other stream playing to not interfere with the acquisition
        try:
            str_ctrl = self.main_app.main_data.tab.value.streambar_controller
        except AttributeError: # Odemis v2.6 and earlier versions
            str_ctrl = self.main_app.main_data.tab.value.stream_controller
        stream_paused = str_ctrl.pauseStreams()

        strs = []
        if self._survey_s:
            strs.append(self._survey_s)

        strs.append(self._ARspectral_s)

        fn = self.filename.value
        exporter = dataio.find_fittest_converter(fn)

        try:
            f = acq.acquire(strs)
            dlg.showProgress(f)
            das, e = f.result()  # blocks until all the acquisitions are finished
        except CancelledError:
            pass
        finally:
            pass

        if not f.cancelled() and das:
            if e:
                logging.warning("AR spectral scan partially failed: %s", e)
            logging.debug("Will save data to %s", fn)
            logging.debug("Going to export data: %s", das)
            exporter.export(fn, das)

        dlg.Close()
Beispiel #5
0
def acquire_timelapse(num, period, filename):

    # find components by their role
    #    ebeam = model.getComponent(role="ebeam")
    sed = model.getComponent(role="se-detector")

    images = []
    try:
        for i in range(num):
            logging.info("Acquiring image %d/%d", i + 1, num)
            start = time.time()
            images.append(sed.data.get())
            left = period - (time.time() - start)
            if left < 0:
                logging.warning(
                    "Acquisition took longer than the period (%g s overdue)",
                    -left)
            else:
                logging.info("Sleeping for another %g s", left)
                time.sleep(left)
    except KeyboardInterrupt:
        logging.info("Closing after only %d images acquired", i + 1)
    except Exception:
        logging.exception(
            "Failed to acquire all the images, will try to save anyway")

    # save the file
    exporter = dataio.find_fittest_converter(filename)
    exporter.export(filename, images)
Beispiel #6
0
 def test_find_fittest_converter_read(self):
     # input args -> format name
     test_io = [
         (("coucou.h5", ), "HDF5"),
         (("coucou.le monde.hdf5", ), "HDF5"),
         (("coucou.H5", ), "HDF5"),
         (("some/fancy/../path/file.tiff", ), "TIFF"),
         (("some/fancy/../.hdf5/h5.ome.tiff", ), "TIFF"),
         (("catmaids://fafb.catmaid.virtualflybrain.org/?pid=1&sid0=1", ),
          "Catmaid"),
         (("catmaid://catmaid.neurodata.io/catmaid/", ), "Catmaid"),
         (("CATMAID://catmaid.neurodata.io/catmaid/", ), "Catmaid"),
         (("a/b/d.tiff", ), "TIFF"),
         (("a/b/d.ome.tiff", ), "TIFF"),
         (("a/b/d.OME.tiff", ), "TIFF"),
         (("a/b/d.OME.TIFF", ), "TIFF"),
         (("a/b/d.h5", ), "HDF5"),
         (("a/b/d.b", ), "TIFF"),  # fallback to tiff
         (("d.hdf5", ), "HDF5"),
         (("d.HDF5", ), "HDF5"),
         (("a/b/d.0.ome.tiff", ),
          "TIFF"),  # Serialised TIFF must be opened by TIFF
     ]
     for args, fmt_exp in test_io:
         fmt_mng = find_fittest_converter(*args, mode=os.O_RDONLY)
         self.assertEqual(
             fmt_mng.FORMAT, fmt_exp,
             "For '%s', expected format %s but got %s" %
             (args[0], fmt_exp, fmt_mng.FORMAT))
Beispiel #7
0
 def test_find_fittest_converter_write(self):
     # input args -> format name
     test_io = [
         (("coucou.h5", ), "HDF5"),
         (("coucou.le monde.hdf5", ), "HDF5"),
         (("coucou.H5", ), "HDF5"),
         (("some/fancy/../path/file.tiff", ), "TIFF"),
         (("some/fancy/../.hdf5/h5.ome.tiff", ), "TIFF"),
         (("a/b/d.tiff", ), "TIFF"),
         (("a/b/d.ome.tiff", ), "TIFF"),
         (("a/b/d.OME.tiff", ), "TIFF"),
         (("a/b/d.OME.TIFF", ), "TIFF"),
         (("a/b/d.h5", ), "HDF5"),
         (("a/b/d.b", ), "TIFF"),  # fallback to tiff
         (("d.hdf5", ), "HDF5"),
         (("d.HDF5", ), "HDF5"),
         (("a/b/d.0.ome.tiff", ), "Serialized TIFF"),
         (("a/b/d.0.ome.TIFF", ), "Serialized TIFF"),
     ]
     for args, fmt_exp in test_io:
         fmt_mng = find_fittest_converter(*args)
         self.assertEqual(
             fmt_mng.FORMAT, fmt_exp,
             "For '%s', expected format %s but got %s" %
             (args[0], fmt_exp, fmt_mng.FORMAT))
Beispiel #8
0
    def acquire(self, dlg):
        # Stop the spot stream and any other stream playing to not interfere with the acquisition
        str_ctrl = self._tab.streambar_controller
        stream_paused = str_ctrl.pauseStreams()

        strs = []
        if self._survey_s:
            strs.append(self._survey_s)

        strs.append(self._ARspectral_s)

        fn = self.filename.value
        exporter = dataio.find_fittest_converter(fn)

        try:
            f = acqmng.acquire(strs, self.main_app.main_data.settings_obs)
            dlg.showProgress(f)
            das, e = f.result(
            )  # blocks until all the acquisitions are finished
        except CancelledError:
            pass
        finally:
            pass

        if not f.cancelled() and das:
            if e:
                logging.warning("AR spectral scan partially failed: %s", e)
            logging.debug("Will save data to %s", fn)
            logging.debug("Going to export data: %s", das)
            exporter.export(fn, das)

        dlg.Close()
Beispiel #9
0
def acquire_zstack(num, interval, filename):
    """
    Acquire a focus stack of num slices centered around current position, with
    given interval and save to file.
    """
    logging.info("Preparing to acquire z-stack of %d images with interval "
                 "%.3f µm giving total stack size of %.3f µm.",
                 num, interval, num * interval)

    # find component by their role
    ccd = model.getComponent(role="ccd")
    focus = model.getComponent(role="focus")

    origpos = focus.position.value['z']
    interval *= 1.0e-6  # convert to µm

    images = []
    try:
        for i in range(num):
            islice = i - num // 2  # Make the stack centered around the origpos
            pos = origpos + islice * interval
            logging.info("Request move to target position %.8f", pos)
            focus.moveAbs({'z': pos}).result()
            logging.info("Acquiring image %d of %d", i + 1, num)
            images.append(ccd.data.get())
    finally:
        # return to original position
        focus.moveAbs({'z': origpos})

    # save the file
    exporter = dataio.find_fittest_converter(filename)
    exporter.export(filename, images)
Beispiel #10
0
    def __init__(self, fn, number):
        self.number = number

        # get the components
        self.light = model.getComponent(role="light")
        self.ccd = model.getComponent(role="ccd")

        # TODO: only support TIFF
        # prepare the data export
        self.exporter = dataio.find_fittest_converter(fn)

        # Make the name "fn" -> "~/Pictures/fn-XXXXXX.ext"
        path, base = os.path.split(fn)
        bn, ext = os.path.splitext(base)
        tmpl = os.path.join(path, bn + "-%06d" + ext)
        if path.startswith("/"):
            # if fn starts with / => don't add ~/Pictures
            self.fntmpl = tmpl
        else:
            self.fntmpl = os.path.join(get_picture_folder(), tmpl)

        self._acq_done = threading.Event()
        self._n = 0
        self._startt = 0  # starting time of acquisition

        self._q = Queue.Queue()  # queue of tuples (str, DataArray) for saving data
        # TODO: find the right number of threads, based on CPU numbers (but with
        # python threading that might be a bit overkill)
        for i in range(4):
            t = threading.Thread(target=self._saving_thread, args=(i,))
            t.daemon = True
            t.start()
Beispiel #11
0
    def save(self, dlg):
        """
        Stores the current CL data into a TIFF/HDF5 file
        """
        f = model.ProgressiveFuture()

        try:
            das = self._acquire(dlg, f)
        except CancelledError:
            logging.debug("Stopping acquisition + export, as it was cancelled")
            return
        except Exception as e:
            logging.exception("Failed to acquire CL data: %s", e)
            return

        fn = self.filename.value
        bn, ext = splitext(fn)
        if ext == ".png":
            logging.debug("Using HDF5 instead of PNG")
            fn = bn + ".h5"
        exporter = dataio.find_fittest_converter(fn)

        try:
            exporter.export(fn, das)
        except Exception:
            logging.exception("Failed to store data in %s", fn)

        f.set_result(None)  # Indicate it's over
        self._update_filename()
Beispiel #12
0
    def _prepare_hardware(self, ebeam_kwargs=None, ebeam_mag=2000, ccd_img=None):
        if ccd_img is None:
            localpath = os.path.dirname(andorcam2.__file__)
            imgpath = os.path.abspath(os.path.join(localpath, "andorcam2-fake-spots-4x4.h5"))
        else:
            # Force absolute path, to be able to accept path relative from here
            localpath = os.path.dirname(__file__)
            imgpath = os.path.abspath(os.path.join(localpath, ccd_img))
        fakeccd = andorcam2.AndorCam2(name="camera", role="ccd", device="fake", image=imgpath)
        # Set the pixel size from the image (as there is no lens + md_updater)
        converter = dataio.find_fittest_converter(imgpath, mode=os.O_RDONLY)
        img = converter.read_data(imgpath)[0]
        fakeccd.updateMetadata({model.MD_PIXEL_SIZE: img.metadata[model.MD_PIXEL_SIZE]})
        self.ccd = fakeccd

        # Force a ratio and hfw_nomag
        conf_scan = CONFIG_SCANNER.copy()
        if ebeam_kwargs:
            conf_scan.update(ebeam_kwargs)
        conf_sem = CONFIG_SEM.copy()
        conf_sem["children"]["scanner"] = conf_scan

        self.sem = semcomedi.SEMComedi(**conf_sem)
        for child in self.sem.children.value:
            if child.name == CONFIG_SED["name"]:
                self.sed = child
            elif child.name == CONFIG_SCANNER["name"]:
                self.ebeam = child
                self.ebeam.magnification.value = ebeam_mag
Beispiel #13
0
    def __init__(self, fn, number):
        self.number = number

        # get the components
        self.light = model.getComponent(role="light")
        self.ccd = model.getComponent(role="ccd")

        # TODO: only support TIFF
        # prepare the data export
        self.exporter = dataio.find_fittest_converter(fn)

        # Make the name "fn" -> "~/Pictures/fn-XXXXXX.ext"
        path, base = os.path.split(fn)
        bn, ext = os.path.splitext(base)
        tmpl = os.path.join(path, bn + "-%06d" + ext)
        if path.startswith("/"):
            # if fn starts with / => don't add ~/Pictures
            self.fntmpl = tmpl
        else:
            self.fntmpl = os.path.join(get_picture_folder(), tmpl)

        self._acq_done = threading.Event()
        self._n = 0
        self._startt = 0  # starting time of acquisition

        self._q = queue.Queue()  # queue of tuples (str, DataArray) for saving data
        # TODO: find the right number of threads, based on CPU numbers (but with
        # python threading that might be a bit overkill)
        for i in range(4):
            t = threading.Thread(target=self._saving_thread, args=(i,))
            t.daemon = True
            t.start()
Beispiel #14
0
    def test_real_perfect_overlap(self):
        """
        Test on decomposed image
        """

        numTiles = [2, 3, 4]
        overlap = [0.4]

        for img in IMGS:
            conv = find_fittest_converter(img)
            data = conv.read_data(img)[0]
            img = ensure2DImage(data)

            for n in numTiles:
                for o in overlap:
                    [tiles, _] = decompose_image(
                        img, o, n, "horizontalZigzag", False)

                    weaver = CollageWeaverReverse()
                    for t in tiles:
                        weaver.addTile(t)

                    sz = len(weaver.getFullImage())
                    w = weaver.getFullImage()

                    numpy.testing.assert_allclose(w, img[:sz, :sz], rtol=1)
Beispiel #15
0
    def test_shift_real(self):
        """ Test on decomposed image with known shift """
        numTiles = [2, 3, 4]
        overlap = [0.5, 0.4, 0.3, 0.2]
        acq = ["horizontalLines", "horizontalZigzag", "verticalLines"]

        for img, num, o, a in itertools.product(IMGS, numTiles, overlap, acq):
            _, img_name = os.path.split(img)
            conv = find_fittest_converter(img)
            data = conv.read_data(img)[0]
            data = ensure2DImage(data)

            # Create artificial tiled image
            [tiles, real_pos] = decompose_image(data, o, num, a)
            px_size = tiles[0].metadata[model.MD_PIXEL_SIZE]
            registrar = GlobalShiftRegistrar()

            # Register tiles
            for tile in tiles:
                registrar.addTile(tile)
            # Compare positions to real positions, allow 5 px offset
            registered_pos = registrar.getPositions()[0]
            diff = numpy.absolute(numpy.subtract(registered_pos, real_pos))
            allowed_px_offset = numpy.repeat(numpy.multiply(px_size, 5),
                                             len(diff))
            numpy.testing.assert_array_less(
                diff.flatten(), allowed_px_offset.flatten(),
                "Position %s pxs off for image '%s', " %
                (max(diff.flatten()) / px_size[0], img_name) +
                "%s x %s tiles, %s ovlp, %s method." % (num, num, o, a))
Beispiel #16
0
def open_acquisition(filename, fmt=None):
    """
    Opens the data according to the type of file, and returns the opened data.
    If it's a pyramidal image, do not fetch the whole data from the image. If the image
    is not pyramidal, it reads the entire image and returns it
    filename (string): Name of the file where the image is
    fmt (string): The format of the file
    return (list of DataArrays or DataArrayShadows): The opened acquisition source
    """
    if fmt:
        converter = dataio.get_converter(fmt)
    else:
        converter = dataio.find_fittest_converter(filename, mode=os.O_RDONLY)
    data = []
    try:
        if hasattr(converter, 'open_data'):
            acd = converter.open_data(filename)
            data = acd.content
        else:
            data = converter.read_data(filename)
    except Exception:
        logging.exception("Failed to open file '%s' with format %s", filename,
                          fmt)

    return data
    def acquire(self, dlg):
        # Configure the monochromator stream according to the settings
        # TODO: read the value from spotPosition instead?
        self._mchr_s.emtTranslation.value = self.ebeam.translation.value
        strs = []
        if self._survey_s:
            strs.append(self._survey_s)
        strs.append(self._mchr_s)

        fn = self.filename.value
        exporter = dataio.find_fittest_converter(fn)

        # Stop the spot stream and any other stream playing to not interfere with the acquisition
        str_ctrl = self.main_app.main_data.tab.value.stream_controller
        stream_paused = str_ctrl.pauseStreams()

        try:
            # opm is the optical path manager, that ensures the path is set to the monochromator
            f = acq.acquire(strs, opm=self.main_app.main_data.opm)
            dlg.showProgress(f)
            das, e = f.result()
        except CancelledError:
            pass
        finally:
            str_ctrl.resumeStreams(stream_paused)

        if not f.cancelled() and das:
            if e:
                logging.warning("Monochromator scan partially failed: %s", e)
            logging.debug("Will save data to %s", fn)
            exporter.export(fn, das)

            self.showAcquisition(fn)

        dlg.Destroy()
    def acquire(self, dlg):
        # Configure the monochromator stream according to the settings
        # TODO: read the value from spotPosition instead?
        self._mchr_s.emtTranslation.value = self.ebeam.translation.value
        strs = []
        if self._survey_s:
            strs.append(self._survey_s)
        strs.append(self._mchr_s)

        fn = self.filename.value
        exporter = dataio.find_fittest_converter(fn)

        # Stop the spot stream and any other stream playing to not interfere with the acquisition
        str_ctrl = self.main_app.main_data.tab.value.stream_controller
        stream_paused = str_ctrl.pauseStreams()

        try:
            # opm is the optical path manager, that ensures the path is set to the monochromator
            f = acq.acquire(strs, opm=self.main_app.main_data.opm)
            dlg.showProgress(f)
            das, e = f.result()
        finally:
            str_ctrl.resumeStreams(stream_paused)

        if not f.cancelled() and das:
            if e:
                logging.warning("Monochromator scan partially failed: %s", e)
            logging.debug("Will save data to %s", fn)
            exporter.export(fn, das)

            self.showAcquisition(fn)

        dlg.Destroy()
Beispiel #19
0
    def test_shift_real_manual(self):
        """ Test case not generated by decompose.py file and manually cropped """

        for img in IMGS:
            conv = find_fittest_converter(img)
            data = conv.read_data(img)[0]
            img = ensure2DImage(data)
            cropped1 = img[0:400, 0:400]
            cropped2 = img[4:404, 322:722]

            registrar = GlobalShiftRegistrar()
            tile1 = model.DataArray(
                numpy.array(cropped1),
                {
                    model.MD_PIXEL_SIZE: [1 / 20, 1 / 20],  # m/px
                    model.MD_POS:
                    (200 / 20, img.shape[1] / 20 - 200 / 20),  # m
                })
            tile2 = model.DataArray(
                numpy.array(cropped2),
                {
                    model.MD_PIXEL_SIZE: [1 / 20, 1 / 20],  # m/px
                    model.MD_POS:
                    (520 / 20, img.shape[1] / 20 - 200 / 20),  # m
                })
            registrar.addTile(tile1)
            registrar.addTile(tile2)
            calculatedPositions = registrar.getPositions()[0]
            diff1 = calculatedPositions[1][0] - 522 / 20
            self.assertLessEqual(diff1, 1 / 20)
            diff2 = calculatedPositions[1][1] - img.shape[1] / 20 - 204 / 20
            self.assertLessEqual(diff2, 1 / 20)
Beispiel #20
0
    def save(self, dlg):
        """
        Stores the current CL data into a TIFF/HDF5 file
        """
        f = model.ProgressiveFuture()

        try:
            das = self._acquire(dlg, f)
        except CancelledError:
            logging.debug("Stopping acquisition + export, as it was cancelled")
            return
        except Exception as e:
            logging.exception("Failed to acquire CL data: %s", e)
            return

        fn = self.filename.value
        bn, ext = splitext(fn)
        if ext == ".png":
            logging.debug("Using HDF5 instead of PNG")
            fn = bn + ".h5"
        exporter = dataio.find_fittest_converter(fn)

        try:
            exporter.export(fn, das)
        except Exception:
            logging.exception("Failed to store data in %s", fn)

        f.set_result(None)  # Indicate it's over
        self._update_filename()
Beispiel #21
0
    def test_real_perfect_overlap(self):
        """
        Test on decomposed image
        """

        numTiles = [2, 3, 4]
        overlap = [0.4]

        for img in IMGS:
            conv = find_fittest_converter(img)
            data = conv.read_data(img)[0]
            img = ensure2DImage(data)

            for n in numTiles:
                for o in overlap:
                    [tiles, _] = decompose_image(img, o, n, "horizontalZigzag",
                                                 False)

                    weaver = CollageWeaverReverse()
                    for t in tiles:
                        weaver.addTile(t)

                    sz = len(weaver.getFullImage())
                    w = weaver.getFullImage()

                    numpy.testing.assert_allclose(w, img[:sz, :sz], rtol=1)
Beispiel #22
0
    def test_shift_real_manual(self):
        """ Test case not generated by decompose.py file and manually cropped """

        for img in IMGS:
            conv = find_fittest_converter(img)
            data = conv.read_data(img)[0]
            img = ensure2DImage(data)
            cropped1 = img[0:400, 0:400]
            cropped2 = img[4:404, 322:722]

            registrar = GlobalShiftRegistrar()
            tile1 = model.DataArray(numpy.array(cropped1), {
                model.MD_PIXEL_SIZE: [1 / 20, 1 / 20],  # m/px
                model.MD_POS: (200 / 20, img.shape[1] / 20 - 200 / 20),  # m
            })
            tile2 = model.DataArray(numpy.array(cropped2), {
                model.MD_PIXEL_SIZE: [1 / 20, 1 / 20],  # m/px
                model.MD_POS: (520 / 20, img.shape[1] / 20 - 200 / 20),  # m
            })
            registrar.addTile(tile1)
            registrar.addTile(tile2)
            calculatedPositions = registrar.getPositions()[0]
            diff1 = calculatedPositions[1][0] - 522 / 20
            self.assertLessEqual(diff1, 1 / 20)
            diff2 = calculatedPositions[1][1] - img.shape[1] / 20 - 204 / 20
            self.assertLessEqual(diff2, 1 / 20)
Beispiel #23
0
def open_acq(fn):
    """
    Read the content of an acquisition file
    return (list of DataArray, list of DataArray):
        list of the data in the file
        thumbnail (if available, might be empty)
    """
    fmt_mng = dataio.find_fittest_converter(fn, default=None, mode=os.O_RDONLY)
    if fmt_mng is None:
        logging.warning("Failed to find a fitting importer for file %s", fn)
        # TODO: try all the formats?
        fmt_mng = dataio.hdf5

    if not hasattr(fmt_mng, "read_data"):
        raise NotImplementedError("No support for importing format %s" % fmt_mng.FORMAT)

    try:
        data = fmt_mng.read_data(fn)
    except Exception:
        raise ValueError("Failed to open the file '%s' as %s" % (fn, fmt_mng.FORMAT))

    if not data:
        logging.warning("Couldn't load any data from file '%s' as %s",
                        fn, fmt_mng.FORMAT)

    try:
        thumb = fmt_mng.read_thumbnail(fn)
    except Exception:
        logging.exception("Failed to read the thumbnail of file '%s' as %s",
                          fn, fmt_mng.FORMAT)
        # doesn't matter that much
        thumb = []

    return data, thumb
Beispiel #24
0
    def test_shift_real(self):
        """ Test on decomposed image with known shift """
        numTiles = [2, 3, 4]
        overlap = [0.5, 0.4, 0.3, 0.2]
        acq = ["horizontalLines", "horizontalZigzag", "verticalLines"]

        for img, num, o, a in itertools.product(IMGS, numTiles, overlap, acq):
            _, img_name = os.path.split(img)
            conv = find_fittest_converter(img)
            data = conv.read_data(img)[0]
            data = ensure2DImage(data)

            # Create artificial tiled image
            [tiles, real_pos] = decompose_image(data, o, num, a)
            px_size = tiles[0].metadata[model.MD_PIXEL_SIZE]
            registrar = GlobalShiftRegistrar()

            # Register tiles
            for tile in tiles:
                registrar.addTile(tile)
            # Compare positions to real positions, allow 5 px offset
            registered_pos = registrar.getPositions()[0]
            diff = numpy.absolute(numpy.subtract(registered_pos, real_pos))
            allowed_px_offset = numpy.repeat(numpy.multiply(px_size, 5), len(diff))
            numpy.testing.assert_array_less(diff.flatten(), allowed_px_offset.flatten(),
                        "Position %s pxs off for image '%s', " % (max(diff.flatten()) / px_size[0], img_name) +
                        "%s x %s tiles, %s ovlp, %s method." % (num, num, o, a))
Beispiel #25
0
    def test_shift_real(self):
        """ Test on decomposed image with known shift """
        numTiles = [2, 3]
        overlap = [0.2, 0.3, 0.4]
        acq = ["horizontalLines", "verticalLines", "horizontalZigzag"]

        for img in IMGS:
            conv = find_fittest_converter(img)
            data = conv.read_data(img)[0]
            data = ensure2DImage(data)
            for num in numTiles:
                for o in overlap:
                    for a in acq:
                        [tiles, pos] = decompose_image(data, o, num, a, False)
                        registrar = IdentityRegistrar()
                        for i in range(len(pos)):
                            registrar.addTile(tiles[i])
                            calculatedPositions = registrar.getPositions()[0]
                            diff1 = abs(calculatedPositions[i][0] - pos[i][0])
                            diff2 = abs(calculatedPositions[i][1] - pos[i][1])
                            # allow difference of 10% of overlap
                            px_size = tiles[i].metadata[model.MD_PIXEL_SIZE]
                            # allow error of 1% of tileSize
                            margin1 = 0.01 * tiles[i].shape[0] * px_size[0]
                            margin2 = 0.01 * tiles[i].shape[1] * px_size[1]

                            self.assertLessEqual(diff1, margin1,
                                                 "Failed for %s tiles, %s overlap and %s method," % (num, o, a) +
                                                 " %f != %f" % (calculatedPositions[i][0], pos[i][0]))
                            self.assertLessEqual(diff2, margin2,
                                                 "Failed for %s tiles, %s overlap and %s method," % (num, o, a) +
                                                 " %f != %f" % (calculatedPositions[i][1], pos[i][1]))
Beispiel #26
0
    def test_no_seam(self):
        """
        Test on decomposed image
        """

        numTiles = [2, 3, 4]
        overlap = [0.4]
        for img in IMGS:
            conv = find_fittest_converter(img)
            data = conv.read_data(img)[0]
            img = ensure2DImage(data)
            for n in numTiles:
                for o in overlap:
                    [tiles, _] = decompose_image(img, o, n, "horizontalZigzag",
                                                 False)

                    weaver = CollageWeaver(adjust_brightness=False)
                    for t in tiles:
                        weaver.addTile(t)

                    sz = len(weaver.getFullImage())
                    w = weaver.getFullImage()

                    numpy.testing.assert_array_almost_equal(w,
                                                            img[:sz, :sz],
                                                            decimal=1)
Beispiel #27
0
def acquire(comp_name, dataflow_names, filename):
    """
    Acquire an image from one (or more) dataflow
    comp_name (string): name of the detector to find
    dataflow_names (list of string): name of each dataflow to access
    filename (unicode): name of the output file (format depends on the extension)
    """
    component = get_detector(comp_name)

    # check the dataflow exists
    dataflows = []
    for df_name in dataflow_names:
        try:
            df = getattr(component, df_name)
        except AttributeError:
            raise ValueError("Failed to find data-flow '%s' on component %s" %
                             (df_name, comp_name))

        if not isinstance(df, model.DataFlowBase):
            raise ValueError("%s.%s is not a data-flow" % (comp_name, df_name))

        dataflows.append(df)

    images = []
    for df in dataflows:
        try:
            # Note: currently, get() uses Pyro, which is not as memory efficient
            # as .subscribe(), which uses ZMQ. So would need to use
            # _get_big_image() if very large image is requested.
            image = df.get()
        except Exception as exc:
            raise IOError("Failed to acquire image from component %s: %s" %
                          (comp_name, exc))

        logging.info("Acquired an image of dimension %r.", image.shape)
        images.append(image)

        try:
            if model.MD_PIXEL_SIZE in image.metadata:
                pxs = image.metadata[model.MD_PIXEL_SIZE]
                dim = (image.shape[0] * pxs[0], image.shape[1] * pxs[1])
                logging.info("Physical dimension of image is %s.",
                             units.readable_str(dim, unit="m", sig=3))
            else:
                logging.warning("Physical dimension of image is unknown.")

            if model.MD_SENSOR_PIXEL_SIZE in image.metadata:
                spxs = image.metadata[model.MD_SENSOR_PIXEL_SIZE]
                dim_sens = (image.shape[0] * spxs[0], image.shape[1] * spxs[1])
                logging.info("Physical dimension of sensor is %s.",
                             units.readable_str(dim_sens, unit="m", sig=3))
        except Exception as exc:
            logging.exception("Failed to read image information.")

    exporter = dataio.find_fittest_converter(filename)
    try:
        exporter.export(filename, images)
    except IOError as exc:
        raise IOError(u"Failed to save to '%s': %s" % (filename, exc))
Beispiel #28
0
    def acquire(self, dlg):
        main_data = self.main_app.main_data
        str_ctrl = main_data.tab.value.streambar_controller
        stream_paused = str_ctrl.pauseStreams()
        dlg.pauseSettings()

        nb = self.numberOfAcquisitions.value
        p = self.period.value
        ss, last_ss = self._get_acq_streams()

        fn = self.filename.value
        exporter = dataio.find_fittest_converter(fn)
        bs, ext = splitext(fn)
        fn_pat = bs + "-%.5d" + ext

        sacqt = acq.estimateTime(ss)
        intp = max(0, p - sacqt)
        if p < sacqt:
            logging.warning(
                "Acquisition will take %g s, but period between acquisition must be only %g s",
                sacqt, p
            )

        # TODO: if drift correction, use it over all the time

        f = model.ProgressiveFuture()
        f.task_canceller = lambda l: True  # To allow cancelling while it's running
        f.set_running_or_notify_cancel()  # Indicate the work is starting now
        dlg.showProgress(f)

        for i in range(nb):
            left = nb - i
            dur = sacqt * left + intp * (left - 1)
            if left == 1 and last_ss:
                ss += last_ss
                dur += acq.estimateTime(ss) - sacqt

            startt = time.time()
            f.set_progress(end=startt + dur)
            das, e = acq.acquire(ss).result()
            if f.cancelled():
                dlg.resumeSettings()
                return

            exporter.export(fn_pat % (i,), das)

            # Wait the period requested, excepted the last time
            if left > 1:
                sleept = (startt + p) - time.time()
                if sleept > 0:
                    time.sleep(sleept)
                else:
                    logging.info("Immediately starting next acquisition, %g s late", -sleept)

        f.set_result(None)  # Indicate it's over

        # self.showAcquisition(self.filename.value)
        dlg.Close()
Beispiel #29
0
    def testRename(self):
        """
        Check it's at least possible to open one DataArray, when the files are
        renamed
        """
        # create a simple greyscale image
        size = (512, 256)
        white = (12, 52)  # non symmetric position
        dtype = numpy.uint16
        ldata = []
        self.no_of_images = 2
        metadata = [{
                     model.MD_IN_WL: (500e-9, 520e-9),  # m
                     model.MD_EXP_TIME: 0.2, # s
                    },
                    {
                     model.MD_EXP_TIME: 1.2, # s
                    },
                   ]

        # Add metadata
        for i in range(self.no_of_images):
            a = model.DataArray(numpy.zeros(size[::-1], dtype), metadata[i])
            a[white[::-1]] = 124 + i
            ldata.append(a)

        # export
        orig_name = "boo.0.tiff"
        stiff.export(orig_name, ldata)

        tokens = orig_name.split(".0.", 1)
        ntokens = FILENAME.split(".0.", 1)
        # Renaming the file
        for i in range(self.no_of_images):
            fname = tokens[0] + "." + str(i) + "." + tokens[1]
            new_fname = ntokens[0] + "." + str(i) + "." + ntokens[1]
            os.rename(fname, new_fname)

        # Iterate through the new files
        for i in range(self.no_of_images):
            fname = ntokens[0] + "." + str(i) + "." + ntokens[1]

            fmt_mng = dataio.find_fittest_converter(fname, mode=os.O_RDONLY)
            self.assertEqual(fmt_mng.FORMAT, "TIFF",
                   "For '%s', expected format TIFF but got %s" % (fname, fmt_mng.FORMAT))
            rdata = fmt_mng.read_data(fname)
            # Assert that at least one DA is there
            # In practice, currently, we expected precisely 1
            self.assertGreaterEqual(len(rdata), 1)

            # Check the correct metadata is present
            for j in range(self.no_of_images):
                self.assertAlmostEqual(rdata[j].metadata[model.MD_EXP_TIME],
                                       ldata[j].metadata[model.MD_EXP_TIME])

            rthumbnail = fmt_mng.read_thumbnail(fname)
            # No thumbnail handling for now, so assert that is empty
            self.assertEqual(rthumbnail, [])
Beispiel #30
0
    def on_acquisition_done(self, future):
        """ Callback called when the acquisition is finished (either successfully or cancelled) """
        if self._main_data_model.opm:
            self._main_data_model.opm.setAcqQuality(path.ACQ_QUALITY_FAST)

        # bind button back to direct closure
        self.btn_cancel.Bind(wx.EVT_BUTTON, self.on_close)
        self._resume_settings()

        self.acquiring = False

        self.acq_future = None  # To avoid holding the ref in memory
        self._acq_future_connector = None

        try:
            data = future.result(1)  # timeout is just for safety
            self.conf.fn_count = update_counter(self.conf.fn_count)
        except CancelledError:
            # put back to original state:
            # re-enable the acquire button
            self.btn_secom_acquire.Enable()

            # hide progress bar (+ put pack estimated time)
            self.update_acquisition_time()
            self.gauge_acq.Hide()
            self.Layout()
            return
        except Exception:
            # We cannot do much: just warn the user and pretend it was cancelled
            logging.exception("Acquisition failed")
            self.btn_secom_acquire.Enable()
            self.lbl_acqestimate.SetLabel("Acquisition failed.")
            self.lbl_acqestimate.Parent.Layout()
            # leave the gauge, to give a hint on what went wrong.
            return

        # Now store the data (as pyramidal data), and open it again (but now it's
        # backed with the persistent storage.
        try:
            exporter = dataio.find_fittest_converter(self.filename)
            if exporter.CAN_SAVE_PYRAMID:
                exporter.export(self.filename, data, pyramid=True)
            else:
                logging.warning(
                    "File format doesn't support saving image in pyramidal form"
                )
                exporter.export(self.filename)
            self.data = exporter.open_data(self.filename).content
        except Exception:
            # We cannot do much: just warn the user and pretend it was cancelled
            logging.exception("Storage failed")
            self.btn_secom_acquire.Enable()
            self.lbl_acqestimate.SetLabel("Storage failed.")
            self.lbl_acqestimate.Parent.Layout()
            return

        self.terminate_listeners()
        self.EndModal(wx.ID_OPEN)
Beispiel #31
0
def get_data(fn):
    reader = dataio.find_fittest_converter(fn)
    das = reader.read_data(fn)
    if len(das) == 0:
        raise LookupError("File %s has no data" % (fn,))
    elif len(das) > 1:
        logging.warning("File %s has more than one data, will only use the first one", fn)

    return das[0]
Beispiel #32
0
def get_data(fn):
    reader = dataio.find_fittest_converter(fn)
    das = reader.read_data(fn)
    if len(das) == 0:
        raise LookupError("File %s has no data" % (fn,))
    elif len(das) > 1:
        logging.warning("File %s has more than one data, will only use the first one", fn)

    return das[0]
Beispiel #33
0
def acquire(comp_name, dataflow_names, filename):
    """
    Acquire an image from one (or more) dataflow
    comp_name (string): name of the detector to find
    dataflow_names (list of string): name of each dataflow to access
    filename (unicode): name of the output file (format depends on the extension)
    """
    component = get_detector(comp_name)

    # check the dataflow exists
    dataflows = []
    for df_name in dataflow_names:
        try:
            df = getattr(component, df_name)
        except AttributeError:
            raise ValueError("Failed to find data-flow '%s' on component %s" % (df_name, comp_name))

        if not isinstance(df, model.DataFlowBase):
            raise ValueError("%s.%s is not a data-flow" % (comp_name, df_name))

        dataflows.append(df)

    images = []
    for df in dataflows:
        try:
            # Note: currently, get() uses Pyro, which is not as memory efficient
            # as .subscribe(), which uses ZMQ. So would need to use
            # _get_big_image() if very large image is requested.
            image = df.get()
        except Exception as exc:
            raise IOError("Failed to acquire image from component %s: %s" % (comp_name, exc))

        logging.info("Acquired an image of dimension %r.", image.shape)
        images.append(image)

        try:
            if model.MD_PIXEL_SIZE in image.metadata:
                pxs = image.metadata[model.MD_PIXEL_SIZE]
                dim = (image.shape[0] * pxs[0], image.shape[1] * pxs[1])
                logging.info("Physical dimension of image is %s.",
                             units.readable_str(dim, unit="m", sig=3))
            else:
                logging.warning("Physical dimension of image is unknown.")

            if model.MD_SENSOR_PIXEL_SIZE in image.metadata:
                spxs = image.metadata[model.MD_SENSOR_PIXEL_SIZE]
                dim_sens = (image.shape[0] * spxs[0], image.shape[1] * spxs[1])
                logging.info("Physical dimension of sensor is %s.",
                             units.readable_str(dim_sens, unit="m", sig=3))
        except Exception as exc:
            logging.exception("Failed to read image information.")

    exporter = dataio.find_fittest_converter(filename)
    try:
        exporter.export(filename, images)
    except IOError as exc:
        raise IOError(u"Failed to save to '%s': %s" % (filename, exc))
Beispiel #34
0
def stitch(infns, registration_method, weaving_method):
    """
    Stitches a set of tiles.
    infns: file names of tiles
    method: weaving method (WEAVER_MEAN or WEAVER_COLLAGE)
    returns list of data arrays containing the stitched images for every stream
    """

    def leader_quality(da):
        """
        Function for sorting different streams. Use largest EM stream first, then other EM streams,
        then other types of streams sorted by their size.
        return int: The bigger the more leadership
        """
        # For now, we prefer a lot the EM images, because they are usually the
        # one with the smallest FoV and the most contrast
        if da.metadata[model.MD_ACQ_TYPE] == model.MD_AT_EM:  # SEM stream
            return numpy.prod(da.shape)  # More pixel to find the overlap
        else:
            # A lot less likely
            return numpy.prod(da.shape) / 100

    da_streams = []  # for each stream, a list of DataArrays
    for fn in infns:
        # Read data
        converter = dataio.find_fittest_converter(fn)
        # TODO: use open_data/DataArrayShadow when converter support it
        das = converter.read_data(fn)
        logging.debug("Got %d streams from file %s", len(das), fn)

        # Remove the DAs we don't want to (cannot) stitch
        das = add_acq_type_md(das)
        das = [da for da in das if da.metadata[model.MD_ACQ_TYPE] not in \
               (model.MD_AT_AR, model.MD_AT_SPECTRUM)]

        # Add sorted DAs to list
        das = sorted(das, key=leader_quality, reverse=True)
        da_streams.append(tuple(das))

    def get_acq_time(das):
        return das[0].metadata.get(model.MD_ACQ_DATE, 0)

    da_streams = sorted(da_streams, key=get_acq_time)

    das_registered = stitching.register(da_streams, registration_method)

    # Weave every stream
    st_data = []
    for s in range(len(das_registered[0])):
        streams = []
        for da in das_registered:
            streams.append(da[s])
        da = stitching.weave(streams, weaving_method)
        da.metadata[model.MD_DIMS] = "YX"
        st_data.append(da)

    return st_data
Beispiel #35
0
def stitch(infns, registration_method, weaving_method):
    """
    Stitches a set of tiles.
    infns: file names of tiles
    method: weaving method (WEAVER_MEAN or WEAVER_COLLAGE)
    returns list of data arrays containing the stitched images for every stream
    """

    def leader_quality(da):
        """
        Function for sorting different streams. Use largest EM stream first, then other EM streams,
        then other types of streams sorted by their size.
        return int: The bigger the more leadership
        """
        # For now, we prefer a lot the EM images, because they are usually the
        # one with the smallest FoV and the most contrast
        if da.metadata[model.MD_ACQ_TYPE] == model.MD_AT_EM:  # SEM stream
            return numpy.prod(da.shape)  # More pixel to find the overlap
        else:
            # A lot less likely
            return numpy.prod(da.shape) / 100

    da_streams = []  # for each stream, a list of DataArrays
    for fn in infns:
        # Read data
        converter = dataio.find_fittest_converter(fn)
        # TODO: use open_data/DataArrayShadow when converter support it
        das = converter.read_data(fn)
        logging.debug("Got %d streams from file %s", len(das), fn)

        # Remove the DAs we don't want to (cannot) stitch
        das = add_acq_type_md(das)
        das = [da for da in das if da.metadata[model.MD_ACQ_TYPE] not in \
               (model.MD_AT_AR, model.MD_AT_SPECTRUM)]

        # Add sorted DAs to list
        das = sorted(das, key=leader_quality, reverse=True)
        da_streams.append(tuple(das))

    def get_acq_time(das):
        return das[0].metadata.get(model.MD_ACQ_DATE, 0)

    da_streams = sorted(da_streams, key=get_acq_time)

    das_registered = stitching.register(da_streams, registration_method)

    # Weave every stream
    st_data = []
    for s in range(len(das_registered[0])):
        streams = []
        for da in das_registered:
            streams.append(da[s])
        da = stitching.weave(streams, weaving_method)
        da.metadata[model.MD_DIMS] = "YX"
        st_data.append(da)

    return st_data
Beispiel #36
0
    def _fast_acquire_one(self, dlg, st, last_ss):
        """
        Acquires one stream, *as fast as possible* (ie, the period is not used).
        Only works with LiveStreams (and not with MDStreams)
        st (LiveStream)
        last_ss (list of Streams): all the streams to be acquire on the last time
        """
        # Essentially, we trick a little bit the stream, by convincing it that
        # we want a live view, but instead of display the data, we store them.
        # It's much faster because we don't have to stop/start the detector between
        # each acquisition.
        nb = self.numberOfAcquisitions.value

        fn = self.filename.value
        self._exporter = dataio.find_fittest_converter(fn)
        bs, ext = splitext(fn)
        fn_pat = bs + "-%.5d" + ext

        self._acq_completed = threading.Event()

        f = model.ProgressiveFuture()
        f.task_canceller = self._cancel_fast_acquire
        f._stream = st
        if last_ss:
            nb -= 1
            extra_dur = acq.estimateTime([st] + last_ss)
        else:
            extra_dur = 0
        self._hijack_live_stream(st, f, nb, fn_pat, extra_dur)

        try:
            # Start acquisition and wait until it's done
            f.set_running_or_notify_cancel(
            )  # Indicate the work is starting now
            dlg.showProgress(f)
            st.is_active.value = True
            self._acq_completed.wait()

            if f.cancelled():
                dlg.resumeSettings()
                return
        finally:
            st.is_active.value = False  # just to be extra sure it's stopped
            logging.debug("Restoring stream %s", st)
            self._restore_live_stream(st)

        # last "normal" acquisition, if needed
        if last_ss:
            logging.debug("Acquiring last acquisition, with all the streams")
            ss = [st] + last_ss
            f.set_progress(end=time.time() + acq.estimateTime(ss))
            das, e = acq.acquire(
                ss, self.main_app.main_data.settings_obs).result()
            self._save_data(fn_pat % (nb, ), das)

        self._stop_saving_threads()  # Wait for all the data to be stored
        f.set_result(None)  # Indicate it's over
Beispiel #37
0
    def __init__(self, name, role, children, image=None, drift_period=None,
                 daemon=None, **kwargs):
        '''
        children (dict string->kwargs): parameters setting for the children.
            Known children are "scanner", "detector0", and the optional "focus"
            They will be provided back in the .children VA
        image (str or None): path to a file to use as fake image (relative to
         the directory of this class)
        drift_period (None or 0<float): time period for drift updating in seconds
        Raise an exception if the device cannot be opened
        '''
        # fake image setup
        if image is None:
            image = u"simsem-fake-output.h5"
        image = str(image)
        # ensure relative path is from this file
        if not os.path.isabs(image):
            image = os.path.join(os.path.dirname(__file__), image)
        converter = dataio.find_fittest_converter(image, mode=os.O_RDONLY)
        self.fake_img = img.ensure2DImage(converter.read_data(image)[0])

        self._drift_period = drift_period

        # we will fill the set of children with Components later in ._children
        model.HwComponent.__init__(self, name, role, daemon=daemon, **kwargs)

        self._metadata[model.MD_HW_NAME] = "FakeSEM"

        # create the scanner child
        try:
            ckwargs = children["scanner"]
        except (KeyError, TypeError):
            raise KeyError("SimSEM was not given a 'scanner' child")
        self._scanner = Scanner(parent=self, daemon=daemon, **ckwargs)
        self.children.value.add(self._scanner)

        # create the detector children
        self._detectors = []
        for c, ckwargs in children.items():
            if c.startswith("detector"):
                self._detectors.append(Detector(parent=self, daemon=daemon, **ckwargs))

        if not self._detectors:
            raise KeyError("SimSEM was not given a 'detector0' child")
        self.children.value.update(set(self._detectors))

        try:
            ckwargs = children["focus"]
        except (KeyError, TypeError):
            logging.info("Will not simulate focus")
            self._focus = None
        else:
            self._focus = EbeamFocus(parent=self, daemon=daemon, **ckwargs)
            self.children.value.add(self._focus)
Beispiel #38
0
    def __init__(self, name, role, children, image=None, drift_period=None,
                 daemon=None, **kwargs):
        '''
        children (dict string->kwargs): parameters setting for the children.
            Known children are "scanner", "detector0", and the optional "focus"
            They will be provided back in the .children VA
        image (str or None): path to a file to use as fake image (relative to
         the directory of this class)
        drift_period (None or 0<float): time period for drift updating in seconds
        Raise an exception if the device cannot be opened
        '''
        # fake image setup
        if image is None:
            image = u"simsem-fake-output.h5"
        image = unicode(image)
        # ensure relative path is from this file
        if not os.path.isabs(image):
            image = os.path.join(os.path.dirname(__file__), image)
        converter = dataio.find_fittest_converter(image, mode=os.O_RDONLY)
        self.fake_img = img.ensure2DImage(converter.read_data(image)[0])

        self._drift_period = drift_period

        # we will fill the set of children with Components later in ._children
        model.HwComponent.__init__(self, name, role, daemon=daemon, **kwargs)

        self._metadata[model.MD_HW_NAME] = "FakeSEM"

        # create the scanner child
        try:
            ckwargs = children["scanner"]
        except (KeyError, TypeError):
            raise KeyError("SimSEM was not given a 'scanner' child")
        self._scanner = Scanner(parent=self, daemon=daemon, **ckwargs)
        self.children.value.add(self._scanner)

        # create the detector children
        self._detectors = []
        for c, ckwargs in children.items():
            if c.startswith("detector"):
                self._detectors.append(Detector(parent=self, daemon=daemon, **ckwargs))

        if not self._detectors:
            raise KeyError("SimSEM was not given a 'detector0' child")
        self.children.value.update(set(self._detectors))

        try:
            ckwargs = children["focus"]
        except (KeyError, TypeError):
            logging.info("Will not simulate focus")
            self._focus = None
        else:
            self._focus = EbeamFocus(parent=self, daemon=daemon, **ckwargs)
            self.children.value.add(self._focus)
Beispiel #39
0
    def testMissing(self):
        """
        Check it's at least possible to open one DataArray, when the other parts
        are missing.
        """
        # create a simple greyscale image
        size = (512, 256)
        white = (12, 52)  # non symmetric position
        dtype = numpy.uint16
        ldata = []
        self.no_of_images = 2
        metadata = [
            {
                model.MD_IN_WL: (500e-9, 520e-9),  # m
                model.MD_EXP_TIME: 0.2,  # s
            },
            {
                model.MD_EXP_TIME: 1.2,  # s
            },
        ]

        # Add metadata
        for i in range(self.no_of_images):
            a = model.DataArray(numpy.zeros(size[::-1], dtype), metadata[i])
            a[white[::-1]] = 124 + i
            ldata.append(a)

        # export
        stiff.export(FILENAME, ldata)

        # Ooops, the first file is gone => it should still be possible to open
        # the other files
        os.remove(FILENAME)
        tokens = FILENAME.split(".0.", 1)
        for i in range(1, self.no_of_images):
            fname = tokens[0] + "." + str(i) + "." + tokens[1]

            fmt_mng = dataio.find_fittest_converter(fname, mode=os.O_RDONLY)
            self.assertEqual(
                fmt_mng.FORMAT, "TIFF",
                "For '%s', expected format TIFF but got %s" %
                (fname, fmt_mng.FORMAT))
            rdata = fmt_mng.read_data(fname)
            # Assert that at least one DA is there
            # In practice, currently, we expected precisely 1
            self.assertGreaterEqual(len(rdata), 1)

            # Check the correct metadata is present
            self.assertAlmostEqual(rdata[0].metadata[model.MD_EXP_TIME],
                                   ldata[i].metadata[model.MD_EXP_TIME])

            rthumbnail = fmt_mng.read_thumbnail(fname)
            # No thumbnail handling for now, so assert that is empty
            self.assertEqual(rthumbnail, [])
Beispiel #40
0
def save_acq(fn, data, thumbs):
    """
    Saves to a file the data and thumbnail
    """
    exporter = dataio.find_fittest_converter(fn)

    # For now the exporter supports only one thumbnail
    if thumbs:
        thumb = thumbs[0]
    else:
        thumb = None
    exporter.export(fn, data, thumb)
Beispiel #41
0
    def testMissing(self):
        """
        Check it's at least possible to open one DataArray, when the other parts
        are missing.
        """
        # create a simple greyscale image
        size = (512, 256)
        white = (12, 52)  # non symmetric position
        dtype = numpy.uint16
        ldata = []
        self.no_of_images = 2
        metadata = [{
                     model.MD_IN_WL: (500e-9, 520e-9),  # m
                     model.MD_EXP_TIME: 0.2,  # s
                    },
                    {
                     model.MD_EXP_TIME: 1.2,  # s
                    },
                   ]

        # Add metadata
        for i in range(self.no_of_images):
            a = model.DataArray(numpy.zeros(size[::-1], dtype), metadata[i])
            a[white[::-1]] = 124 + i
            ldata.append(a)

        # export
        stiff.export(FILENAME, ldata)

        # Ooops, the first file is gone => it should still be possible to open
        # the other files
        os.remove(FILENAME)
        tokens = FILENAME.split(".0.", 1)
        for i in range(1, self.no_of_images):
            fname = tokens[0] + "." + str(i) + "." + tokens[1]

            fmt_mng = dataio.find_fittest_converter(fname, mode=os.O_RDONLY)
            self.assertEqual(fmt_mng.FORMAT, "TIFF",
                   "For '%s', expected format TIFF but got %s" % (fname, fmt_mng.FORMAT))
            rdata = fmt_mng.read_data(fname)
            # Assert that at least one DA is there
            # In practice, currently, we expected precisely 1
            self.assertGreaterEqual(len(rdata), 1)

            # Check the correct metadata is present
            self.assertAlmostEqual(rdata[0].metadata[model.MD_EXP_TIME],
                                   ldata[i].metadata[model.MD_EXP_TIME])

            rthumbnail = fmt_mng.read_thumbnail(fname)
            # No thumbnail handling for now, so assert that is empty
            self.assertEqual(rthumbnail, [])
Beispiel #42
0
def save_data(das, filename):
    """
    Saves a series of spectra
    das (list of DataArray): data to save
    filename (str)
    """
    exporter = dataio.find_fittest_converter(filename)

    if os.path.exists(filename):
        # mostly to warn if multiple ypos/xpos are rounded to the same value
        logging.warning("Overwriting file '%s'.", filename)
    else:
        logging.info("Saving file '%s", filename)

    exporter.export(filename, das)
Beispiel #43
0
def save_data(das, filename):
    """
    Saves a series of spectra
    das (list of DataArray): data to save
    filename (str)
    """
    exporter = dataio.find_fittest_converter(filename)

    if os.path.exists(filename):
        # mostly to warn if multiple ypos/xpos are rounded to the same value
        logging.warning("Overwriting file '%s'.", filename)
    else:
        logging.info("Saving file '%s", filename)

    exporter.export(filename, das)
Beispiel #44
0
    def save_data(self, data, fn):
        """
        Saves the data into a file
        data (model.DataArray or list of model.DataArray): the data to save
        fn (unicode): filename of the file to save
        """
        exporter = dataio.find_fittest_converter(fn)

        if os.path.exists(fn):
            # mostly to warn if multiple ypos/xpos are rounded to the same value
            logging.warning("Overwriting file '%s'.", fn)
        else:
            logging.info("Saving file '%s'", fn)

        exporter.export(unicode(fn), data)
Beispiel #45
0
def acquire_timelapse(num, filename):

    ccd = None
    # find components by their role
    for c in model.getComponents():
        if c.role == "ccd":
            ccd = c

    images = []
    for i in range(num):
        logging.info("Acquiring image %d", i + 1)
        images.append(ccd.data.get())
    
    # save the file
    exporter = dataio.find_fittest_converter(filename)
    exporter.export(filename, images)
Beispiel #46
0
def acquire_timelapse(num, filename):

    ccd = None
    # find components by their role
    for c in model.getComponents():
        if c.role == "ccd":
            ccd = c

    images = []
    for i in range(num):
        logging.info("Acquiring image %d", i + 1)
        images.append(ccd.data.get())

    # save the file
    exporter = dataio.find_fittest_converter(filename)
    exporter.export(filename, images)
Beispiel #47
0
    def testExportOpener(self):
        # create a simple greyscale image
        size = (512, 256)
        white = (12, 52)  # non symmetric position
        dtype = numpy.uint16
        ldata = []
        self.no_of_images = 2
        metadata = [
            {
                model.MD_IN_WL: (500e-9, 520e-9),  # m
            },
            {
                model.MD_EXP_TIME: 1.2,  # s
            },
        ]

        # Add wavelength metadata just to group them
        for i in range(self.no_of_images):
            a = model.DataArray(numpy.zeros(size[::-1], dtype), metadata[i])
            a[white[::-1]] = 124
            ldata.append(a)

        # export
        stiff.export(FILENAME, ldata)

        tokens = FILENAME.split(".0.", 1)

        # Iterate through the files generated. Opening any of them should be
        # returning _all_ the DAs
        for file_index in range(self.no_of_images):
            fname = tokens[0] + "." + str(file_index) + "." + tokens[1]

            fmt_mng = dataio.find_fittest_converter(fname, mode=os.O_RDONLY)
            self.assertEqual(
                fmt_mng.FORMAT, "TIFF",
                "For '%s', expected format TIFF but got %s" %
                (fname, fmt_mng.FORMAT))
            rdata = fmt_mng.read_data(fname)
            # Assert all the DAs are there
            self.assertEqual(len(rdata), len(ldata))
            for da in rdata:
                self.assertEqual(da[white[::-1]], 124)

            rthumbnail = fmt_mng.read_thumbnail(fname)
            # No thumbnail handling for now, so assert that is empty
            self.assertEqual(rthumbnail, [])
Beispiel #48
0
 def test_find_fittest_converter_read(self):
     # input args -> format name
     test_io = [(("coucou.h5",), "HDF5"),
                (("coucou.le monde.hdf5",), "HDF5"),
                (("some/fancy/../path/file.tiff",), "TIFF"),
                (("some/fancy/../.hdf5/h5.ome.tiff",), "TIFF"),
                (("a/b/d.tiff",), "TIFF"),
                (("a/b/d.ome.tiff",), "TIFF"),
                (("a/b/d.h5",), "HDF5"),
                (("a/b/d.b",), "TIFF"),  # fallback to tiff
                (("d.hdf5",), "HDF5"),
                (("a/b/d.0.ome.tiff",), "TIFF"),  # Serialised TIFF must be opened by TIFF
                ]
     for args, fmt_exp in test_io:
         fmt_mng = find_fittest_converter(*args, mode=os.O_RDONLY)
         self.assertEqual(fmt_mng.FORMAT, fmt_exp,
                "For '%s', expected format %s but got %s" % (args[0], fmt_exp, fmt_mng.FORMAT))
Beispiel #49
0
 def save_data(self, data, fn):
     """
     Saves the data into a file
     data (model.DataArray or list of model.DataArray): the data to save
     fn (unicode): filename of the file to save
     """
     exporter = dataio.find_fittest_converter(fn)
     
     # TODO: put the first data in a StaticStream to get a thumbnail
 
     if os.path.exists(fn):
         # mostly to warn if multiple ypos/xpos are rounded to the same value
         logging.warning("Overwriting file '%s'.", fn)
     else:
         logging.info("Saving file '%s'", fn)
 
     exporter.export(fn, data)
Beispiel #50
0
    def testExportOpener(self):
        # create a simple greyscale image
        size = (512, 256)
        white = (12, 52)  # non symmetric position
        dtype = numpy.uint16
        ldata = []
        self.no_of_images = 2
        metadata = [{
                     model.MD_IN_WL: (500e-9, 520e-9),  # m
                    },
                    {
                     model.MD_EXP_TIME: 1.2,  # s
                    },
                   ]

        # Add wavelength metadata just to group them
        for i in range(self.no_of_images):
            a = model.DataArray(numpy.zeros(size[::-1], dtype), metadata[i])
            a[white[::-1]] = 124
            ldata.append(a)

        # export
        stiff.export(FILENAME, ldata)

        tokens = FILENAME.split(".0.", 1)

        # Iterate through the files generated. Opening any of them should be
        # returning _all_ the DAs
        for file_index in range(self.no_of_images):
            fname = tokens[0] + "." + str(file_index) + "." + tokens[1]

            fmt_mng = dataio.find_fittest_converter(fname, mode=os.O_RDONLY)
            self.assertEqual(fmt_mng.FORMAT, "TIFF",
                   "For '%s', expected format TIFF but got %s" % (fname, fmt_mng.FORMAT))
            rdata = fmt_mng.read_data(fname)
            # Assert all the DAs are there
            self.assertEqual(len(rdata), len(ldata))
            for da in rdata:
                self.assertEqual(da[white[::-1]], 124)

            rthumbnail = fmt_mng.read_thumbnail(fname)
            # No thumbnail handling for now, so assert that is empty
            self.assertEqual(rthumbnail, [])
Beispiel #51
0
    def acquire(self, dlg):
        nb = self.numberOfAcquisitions.value
        p = self.period.value
        sacqt = self._stream.estimateAcquisitionTime()
        intp = max(0, p - sacqt)
        if p < sacqt:
            logging.warning(
                "Acquisition will take %g s, but period between acquisition must be only %g s",
                sacqt, p)

        exporter = dataio.find_fittest_converter(self.filename.value)

        f = model.ProgressiveFuture()
        f.task_canceller = lambda l: True  # To allow cancelling while it's running
        f.set_running_or_notify_cancel()  # Indicate the work is starting now
        dlg.showProgress(f)

        das = []
        for i in range(nb):
            left = nb - i
            dur = sacqt * left + intp * (left - 1)
            startt = time.time()
            f.set_progress(end=startt + dur)
            d, e = acq.acquire([self._stream]).result()
            das.extend(d)
            if f.cancelled():
                return

            # Wait the period requested, excepted the last time
            if left > 1:
                sleept = (startt + p) - time.time()
                if sleept > 0:
                    time.sleep(sleept)
                else:
                    logging.info(
                        "Immediately starting next acquisition, %g s late",
                        -sleept)

        exporter.export(self.filename.value, das)
        f.set_result(None)  # Indicate it's over

        # self.showAcquisition(self.filename.value)
        dlg.Destroy()
Beispiel #52
0
    def test_real_images_identity(self):
        """
        Test register wrapper function
        """
        for img in IMGS:
            conv = find_fittest_converter(img)
            data = conv.read_data(img)[0]
            img = ensure2DImage(data)
            num = 2
            o = 0.2
            a = "horizontalZigzag"
            [tiles, pos] = decompose_image(img, o, num, a, False)

            upd_tiles = register(tiles, method=REGISTER_IDENTITY)

            for i in range(len(upd_tiles)):
                calculatedPosition = upd_tiles[i].metadata[model.MD_POS]
                self.assertAlmostEqual(calculatedPosition[0], pos[i][0], places=1)
                self.assertAlmostEqual(calculatedPosition[1], pos[i][1], places=1)
Beispiel #53
0
    def acquire(self, dlg):
        nb = self.numberOfAcquisitions.value
        p = self.period.value
        sacqt = self._stream.estimateAcquisitionTime()
        intp = max(0, p - sacqt)
        if p < sacqt:
            logging.warning(
                "Acquisition will take %g s, but period between acquisition must be only %g s",
                sacqt, p
            )

        exporter = dataio.find_fittest_converter(self.filename.value)

        f = model.ProgressiveFuture()
        f.task_canceller = lambda l: True  # To allow cancelling while it's running
        f.set_running_or_notify_cancel()  # Indicate the work is starting now
        dlg.showProgress(f)

        das = []
        for i in range(nb):
            left = nb - i
            dur = sacqt * left + intp * (left - 1)
            startt = time.time()
            f.set_progress(end=startt + dur)
            d, e = acq.acquire([self._stream]).result()
            das.extend(d)
            if f.cancelled():
                return

            # Wait the period requested, excepted the last time
            if left > 1:
                sleept = (startt + p) - time.time()
                if sleept > 0:
                    time.sleep(sleept)
                else:
                    logging.info("Immediately starting next acquisition, %g s late", -sleept)

        exporter.export(self.filename.value, das)
        f.set_result(None)  # Indicate it's over

        # self.showAcquisition(self.filename.value)
        dlg.Destroy()
Beispiel #54
0
    def test_no_seam(self):
        """
        Test on decomposed image
        """

        for img in IMGS:
            conv = find_fittest_converter(img)
            data = conv.read_data(img)[0]
            img = ensure2DImage(data)
            numTiles = [2, 3, 4]
            overlap = [0.2, 0.3, 0.4]

            for n in numTiles:
                for o in overlap:
                    [tiles, _] = decompose_image(
                        img, o, n, "horizontalZigzag", False)

                    w = weave(tiles, WEAVER_MEAN)
                    sz = len(w)
                    numpy.testing.assert_allclose(w, img[:sz, :sz], rtol=1)
Beispiel #55
0
    def testExportOpener(self):
        # create a simple greyscale image
        size = (512, 256)
        white = (12, 52)  # non symmetric position
        dtype = numpy.uint16
        ldata = []
        num = 2
        metadata = {
                    model.MD_IN_WL: (500e-9, 520e-9),  # m
                    }
        for i in range(num):
            a = model.DataArray(numpy.zeros(size[::-1], dtype), metadata)
            a[white[::-1]] = 124
            ldata.append(a)

        # export
        stiff.export(FILENAME, ldata)

        tokens = FILENAME.split(".0.", 1)
        self.no_of_images = 1
        # Iterate through the files generated
        for file_index in range(self.no_of_images):
            fname = tokens[0] + "." + str(file_index) + "." + tokens[1]
            
            fmt_mng = dataio.find_fittest_converter(fname, mode=os.O_RDONLY)
            self.assertEqual(fmt_mng.FORMAT, "TIFF",
                   "For '%s', expected format TIFF but got %s" % (fname, fmt_mng.FORMAT))
            rdata = fmt_mng.read_data(fname)
            # Assert all the DAs are there
            self.assertEqual(len(rdata[file_index]), len(ldata))
            
            rthumbnail = fmt_mng.read_thumbnail(fname)
            # No thumbnail handling for now, so assert that is empty
            self.assertEqual(rthumbnail, [])
        
        self.no_of_images = 1
        # Iterate through the files generated
        for file_index in range(self.no_of_images):
            fname = tokens[0] + "." + str(file_index) + "." + tokens[1]
            os.remove(fname)
Beispiel #56
0
    def __init__(self, fn, number):
        self.number = number

        # get the components
        self.light = model.getComponent(role="light")
        self.ccd = model.getComponent(role="ccd")

        # prepare the data export
        self.exporter = dataio.find_fittest_converter(fn)
        
        # Make the name "fn" -> "~/Pictures + fn + fn-XXXX.ext"
        path, base = os.path.split(fn)
        bn, ext = os.path.splitext(base)
        tmpl = os.path.join(path, bn, bn + "-%05d." + ext)
        if path.startswith("/"):
            # if fn starts with / => don't add ~/Pictures
            self.fntmpl = tmpl
        else:
            self.fntmpl = os.path.join(get_picture_folder(), tmpl)


        self._n = 0