Beispiel #1
0
    def test_chronogram_raw(self):

        filename = "test-spec-spot.csv"

        self.spec_stream.selectionWidth.value = 1
        proj = SinglePointTemporalProjection(self.spec_stream)
        exported_data = img.chronogram_to_export_data(proj, True)
        self.assertEqual(
            exported_data.shape[0],
            self.spec_data.shape[1])  # exported image includes only raw data

        # Save into a CSV file
        exporter = dataio.get_converter("CSV")
        exporter.export(filename, exported_data)
        st = os.stat(filename)  # this test also that the file is created
        self.assertGreater(st.st_size, 10)

        self.spec_stream.selectionWidth.value = 3
        exported_data = img.chronogram_to_export_data(proj, True)
        self.assertEqual(
            exported_data.shape[0],
            self.spec_data.shape[1])  # exported image includes only raw data

        # Save into a CSV file
        exporter = dataio.get_converter("CSV")
        exporter.export(filename, exported_data)
        st = os.stat(filename)  # this test also that the file is created
        self.assertGreater(st.st_size, 10)

        # clean up
        try:
            os.remove(filename)
        except Exception:
            pass
Beispiel #2
0
    def test_line_raw(self):
        filename = "test-spec-line.csv"

        self.spec_stream.selectionWidth.value = 1
        proj = LineSpectrumProjection(self.spec_stream)
        exported_data = img.line_to_export_data(proj, True)
        self.assertEqual(exported_data.shape[1], self.spec_data.shape[0])
        self.assertGreater(exported_data.shape[0], 64)  # at least 65-1 px
        self.assertEqual(exported_data.metadata[model.MD_DIMS],"XC")

        # Save into a CSV file
        exporter = dataio.get_converter("CSV")
        exporter.export(filename, exported_data)
        st = os.stat(filename)  # this test also that the file is created
        self.assertGreater(st.st_size, 100)

        self.spec_stream.selectionWidth.value = 4
        exported_data = img.line_to_export_data(proj, True)
        self.assertEqual(exported_data.shape[1], self.spec_data.shape[0])
        self.assertGreater(exported_data.shape[0], 64)  # at least 65-1 px
        self.assertEqual(exported_data.metadata[model.MD_DIMS],"XC")

        # Save into a CSV file
        exporter = dataio.get_converter("CSV")
        exporter.export(filename, exported_data)
        st = os.stat(filename)  # this test also that the file is created
        self.assertGreater(st.st_size, 100)

        # clean up
        try:
            os.remove(filename)
        except Exception:
            pass
Beispiel #3
0
    def test_chronogram_raw(self):

        filename = "test-spec-spot.csv"

        self.spec_stream.selectionWidth.value = 1
        proj = SinglePointTemporalProjection(self.spec_stream)
        exported_data = img.chronogram_to_export_data(proj, True)
        self.assertEqual(exported_data.shape[0], self.spec_data.shape[1])  # exported image includes only raw data

        # Save into a CSV file
        exporter = dataio.get_converter("CSV")
        exporter.export(filename, exported_data)
        st = os.stat(filename)  # this test also that the file is created
        self.assertGreater(st.st_size, 10)

        self.spec_stream.selectionWidth.value = 3
        exported_data = img.chronogram_to_export_data(proj, True)
        self.assertEqual(exported_data.shape[0], self.spec_data.shape[1])  # exported image includes only raw data

        # Save into a CSV file
        exporter = dataio.get_converter("CSV")
        exporter.export(filename, exported_data)
        st = os.stat(filename)  # this test also that the file is created
        self.assertGreater(st.st_size, 10)

        # clean up
        try:
            os.remove(filename)
        except Exception:
            pass
Beispiel #4
0
    def test_line_raw(self):
        filename = "test-spec-line.csv"

        self.spec_stream.selectionWidth.value = 1
        proj = LineSpectrumProjection(self.spec_stream)
        exported_data = img.line_to_export_data(proj, True)
        self.assertEqual(exported_data.shape[1], self.spec_data.shape[0])
        self.assertGreater(exported_data.shape[0], 64)  # at least 65-1 px
        self.assertEqual(exported_data.metadata[model.MD_DIMS], "XC")

        # Save into a CSV file
        exporter = dataio.get_converter("CSV")
        exporter.export(filename, exported_data)
        st = os.stat(filename)  # this test also that the file is created
        self.assertGreater(st.st_size, 100)

        self.spec_stream.selectionWidth.value = 4
        exported_data = img.line_to_export_data(proj, True)
        self.assertEqual(exported_data.shape[1], self.spec_data.shape[0])
        self.assertGreater(exported_data.shape[0], 64)  # at least 65-1 px
        self.assertEqual(exported_data.metadata[model.MD_DIMS], "XC")

        # Save into a CSV file
        exporter = dataio.get_converter("CSV")
        exporter.export(filename, exported_data)
        st = os.stat(filename)  # this test also that the file is created
        self.assertGreater(st.st_size, 100)

        # clean up
        try:
            os.remove(filename)
        except Exception:
            pass
Beispiel #5
0
    def get_export_formats(self, export_type):
        """
        Find the available file formats for the given export_type
        export_type (string): spatial, AR, spectrum or spectrum-line
        return (dict string -> (string, list of strings)):
             name of each format -> nice name of the format, list of extensions.
        """
        pr_formats, pp_formats = EXPORTERS[export_type]

        export_formats = OrderedDict()
        # Look dynamically which format is available
        # First the print-ready formats
        for format_data in pr_formats:
            exporter = get_converter(format_data)
            export_formats[exporter.FORMAT] = (PR_PREFIX + " " +
                                               exporter.FORMAT,
                                               exporter.EXTENSIONS)
        # Now for post-processing formats
        for format_data in pp_formats:
            exporter = get_converter(format_data)
            export_formats[exporter.FORMAT] = (PP_PREFIX + " " +
                                               exporter.FORMAT,
                                               exporter.EXTENSIONS)

        if not export_formats:
            logging.error("No file converter found!")

        return export_formats
Beispiel #6
0
    def _get_export_info(self):
        """
        Return str, str, str: full filename, exporter name, export type
          Full filename is None if cancelled by user
        """
        # Set default to the first of the list
        view = self._data_model.focussedView.value
        export_type = self.get_export_type(view)
        formats = EXPORTERS[export_type]
        if self._conf.export_raw:
            default_exporter = get_converter(formats[1][0])
        else:
            default_exporter = get_converter(formats[0][0])
        extension = default_exporter.EXTENSIONS[0]

        batch_export = False

        # Suggested name= current file name + stream/view name + extension of default format
        fi = self._data_model.acq_fileinfo.value
        if fi is not None and fi.file_name:
            basename = os.path.basename(fi.file_name)
            # Remove the extension
            basename, _ = udataio.splitext(basename)

            # Use stream name, if there is just one stream, otherwise use the view name
            streams = view.getStreams()
            if len(streams) == 1:
                basename += " " + streams[0].name.value
            else:
                # TODO: remove numbers from the view name?
                basename += " " + view.name.value

            # Special batch export for AR view of polarization or polarimetry,
            # as they contain typically 6 or 24 images.
            if (export_type == 'AR' and streams
                    and (hasattr(streams[0], "polarimetry") or
                         (hasattr(streams[0], "polarization")
                          and len(streams[0].polarization.choices) > 1))):
                batch_export = True

        else:
            basename = time.strftime("%Y%m%d-%H%M%S", time.localtime())

        filepath = os.path.join(self._conf.last_export_path,
                                basename + extension)
        # filepath will be None if cancelled by user

        return self.ShowExportFileDialog(filepath, default_exporter,
                                         batch_export)
Beispiel #7
0
    def export_viewport(self, filepath, export_format, export_type):
        """ Export the image from the focused view to the filesystem.

        :param filepath: (str) full path to the destination file
        :param export_format: (str) the format name
        :param export_type: (str) spatial, AR, spectrum or spectrum-line
        """
        try:
            exporter = get_converter(export_format)
            raw = export_format in EXPORTERS[export_type][1]
            self._conf.export_raw = raw
            self._conf.last_export_path = os.path.dirname(filepath)

            exported_data = self.export(export_type, raw)
            # record everything to a file
            exporter.export(filepath, exported_data)

            popup.show_message(self._main_frame,
                               "Exported in %s" % (filepath,),
                               timeout=3
                               )

            logging.info("Exported a %s view into file '%s'.", export_type, filepath)
        except LookupError as ex:
            logging.info("Export of a %s view as %s seems to contain no data.",
                         export_type, export_format, exc_info=True)
            dlg = wx.MessageDialog(self._main_frame,
                                   "Failed to export: %s\n"
                                   "Please make sure that at least one stream is visible in the current view." % (ex,),
                                   "No data to export",
                                   wx.OK | wx.ICON_WARNING)
            dlg.ShowModal()
            dlg.Destroy()
        except Exception:
            logging.exception("Failed to export a %s view as %s", export_type, export_format)
Beispiel #8
0
def open_acquisition(filename, fmt=None):
    """
    Opens the data according to the type of file, and returns the opened data.
    If it's a pyramidal image, do not fetch the whole data from the image. If the image
    is not pyramidal, it reads the entire image and returns it
    filename (string): Name of the file where the image is
    fmt (string): The format of the file
    return (list of DataArrays or DataArrayShadows): The opened acquisition source
    """
    if fmt:
        converter = dataio.get_converter(fmt)
    else:
        converter = dataio.find_fittest_converter(filename, mode=os.O_RDONLY)
    data = []
    try:
        if hasattr(converter, 'open_data'):
            acd = converter.open_data(filename)
            data = acd.content
        else:
            data = converter.read_data(filename)
    except Exception:
        logging.exception("Failed to open file '%s' with format %s", filename,
                          fmt)

    return data
Beispiel #9
0
    def export_viewport(self, filepath, export_format, export_type):
        """ Export the image from the focused view to the filesystem.

        :param filepath: (str) full path to the destination file
        :param export_format: (str) the format name
        :param export_type: (str) spatial, AR or spectrum
        """
        try:
            exporter = get_converter(export_format)
            raw = export_format in EXPORTERS[export_type][1]
            exported_data = self.export(export_type, raw)
            # record everything to a file
            exporter.export(filepath, exported_data)

            popup.show_message(self._main_frame,
                               "Exported in %s" % (filepath,),
                               timeout=3
                               )

            logging.info("Exported a %s view into file '%s'.", export_type, filepath)
        except LookupError as ex:
            logging.info("Export of a %s view as %s seems to contain no data.",
                         export_type, export_format, exc_info=True)
            dlg = wx.MessageDialog(self._main_frame,
                                   "Failed to export: %s\n"
                                   "Please make sure that at least one stream is visible in the current view." % (ex,),
                                   "No data to export",
                                   wx.OK | wx.ICON_WARNING)
            dlg.ShowModal()
            dlg.Destroy()
        except Exception:
            logging.exception("Failed to export a %s view as %s", export_type, export_format)
Beispiel #10
0
    def save_data(self, data, **kwargs):
        """
        Saves the data into a file.
        :param data: (model.DataArray or list of model.DataArray) The data to save.
        :param kwargs: (dict (str->value)) Values to substitute in the file name.
        """
        # export to single tiff files
        exporter = dataio.get_converter(FMT)

        for d in data[:2]:  # only care about the sem ones, the optical images are already saved
            if d.metadata.get(model.MD_DESCRIPTION) == "Anchor region":
                kwargs["type"] = "drift"
            elif d.metadata.get(model.MD_DESCRIPTION) == "Secondary electrons concurrent":
                kwargs["type"] = "concurrent"
            else:
                kwargs["type"] = "survey"

            kwargs["xpos"] = 0
            kwargs["ypos"] = 0
            fn = FN_FMT % kwargs

            # The data is normally ordered: survey, concurrent, drift
            # => first 2 files are the ones we care about
            if kwargs["idx"] < 2:
                self.fns.append(fn)

            if os.path.exists(fn):
                # mostly to warn if multiple ypos/xpos are rounded to the same value
                logging.warning("Overwriting file '%s'.", fn)
            else:
                logging.info("Saving file '%s", fn)

            exporter.export(fn, d)
            kwargs["idx"] += 1
Beispiel #11
0
    def _get_export_info(self):
        """
        Return str, str, str: full filename, exporter name, export type
          Full filename is None if cancelled by user
        """
        # Set default to the first of the list
        view = self._data_model.focussedView.value
        export_type = self.get_export_type(view)
        formats = EXPORTERS[export_type]
        if self._conf.export_raw:
            default_exporter = get_converter(formats[1][0])
        else:
            default_exporter = get_converter(formats[0][0])
        extension = default_exporter.EXTENSIONS[0]

        # Suggested name= current file name + stream/view name + extension of default format
        fi = self._data_model.acq_fileinfo.value
        if fi is not None and fi.file_name:
            basename = os.path.basename(fi.file_name)
            # Remove the extension
            formats_to_ext = dataio.get_available_formats()
            all_exts = sum(formats_to_ext.values(), [])
            fexts = sorted((ext for ext in all_exts if basename.endswith(ext)),
                           key=lambda s: len(s))
            if fexts:
                # Remove the biggest extension
                basename = basename[:-len(fexts[-1])]
            else:
                # Try to remove whichever extension there is
                basename, _ = os.path.splitext(basename)

            # Use stream name, if there is just one stream, otherwise use the view name
            streams = view.getStreams()
            if len(streams) == 1:
                basename += " " + streams[0].name.value
            else:
                # TODO: remove numbers from the view name?
                basename += " " + view.name.value
        else:
            basename = time.strftime("%Y%m%d-%H%M%S", time.localtime())

        filepath = os.path.join(self._conf.last_export_path, basename + extension)

        # filepath will be None if cancelled by user
        return self.ShowExportFileDialog(filepath, default_exporter)
Beispiel #12
0
    def test_load_full(self):
        """
        Check the whole sequence: saving calibration data to file, loading it
        back from file, finding it.
        """
        # Background data
        dbckg = numpy.array([1, 2, 2, 3, 4, 5, 4, 6, 9], dtype=numpy.uint16)
        dbckg.shape += (1, 1, 1, 1)
        wl_calib = 400e-9 + numpy.array(range(dbckg.shape[0])) * 10e-9
        bckg = model.DataArray(dbckg, metadata={model.MD_WL_LIST: wl_calib})

        # Give one DA, the correct one, so expect to get it back

        # Compensation data
        dcalib = numpy.array([1, 1.3, 2, 3.5, 4, 5, 0.1, 6, 9.1], dtype=numpy.float)
        dcalib.shape = (dcalib.shape[0], 1, 1, 1, 1)
        wl_calib = 400e-9 + numpy.array(range(dcalib.shape[0])) * 10e-9
        calib = model.DataArray(dcalib, metadata={model.MD_WL_LIST: wl_calib})

        # More DataArrays, just to make it slightly harder to find the data
        data1 = model.DataArray(numpy.ones((1, 1, 1, 520, 230), dtype=numpy.uint16))
        data2 = model.DataArray(numpy.zeros((3, 1, 1, 520, 230), dtype=numpy.uint16))

        # RGB image
        thumb = model.DataArray(numpy.ones((520, 230, 3), dtype=numpy.uint8))

        full_coef = [data1, calib, data2]
        full_bckg = [data1, bckg, data2]

        for fmt in dataio.get_available_formats(os.O_WRONLY):
            exporter = dataio.get_converter(fmt)
            logging.info("Trying to export/import with %s", fmt)
            fn_coef = u"test_spec" + exporter.EXTENSIONS[0]
            exporter.export(fn_coef, full_coef, thumb)
            fn_bckg = u"test_bckg" + exporter.EXTENSIONS[0]
            exporter.export(fn_bckg, full_bckg, thumb)

            if fmt in dataio.get_available_formats(os.O_RDONLY):
                data_bckg = exporter.read_data(fn_bckg)
                ibckg = calibration.get_spectrum_data(data_bckg)
                data_coef = exporter.read_data(fn_coef)
                icoef = calibration.get_spectrum_efficiency(data_coef)
                numpy.testing.assert_equal(icoef, calib)
                numpy.testing.assert_almost_equal(icoef.metadata[model.MD_WL_LIST],
                                                  calib.metadata[model.MD_WL_LIST])
                numpy.testing.assert_equal(ibckg, bckg)
                numpy.testing.assert_almost_equal(ibckg.metadata[model.MD_WL_LIST],
                                                  bckg.metadata[model.MD_WL_LIST])
            try:
                os.remove(fn_coef)
            except OSError:
                logging.exception("Failed to delete the file %s", fn_coef)
            try:
                os.remove(fn_bckg)
            except OSError:
                logging.exception("Failed to delete the file %s", fn_bckg)
Beispiel #13
0
    def test_load_full(self):
        """
        Check the whole sequence: saving calibration data to file, loading it
        back from file, finding it.
        """
        # Background data
        dbckg = numpy.array([1, 2, 2, 3, 4, 5, 4, 6, 9], dtype=numpy.uint16)
        dbckg.shape += (1, 1, 1, 1)
        wl_calib = 400e-9 + numpy.array(range(dbckg.shape[0])) * 10e-9
        bckg = model.DataArray(dbckg, metadata={model.MD_WL_LIST: wl_calib})

        # Give one DA, the correct one, so expect to get it back

        # Compensation data
        dcalib = numpy.array([1, 1.3, 2, 3.5, 4, 5, 0.1, 6, 9.1], dtype=numpy.float)
        dcalib.shape = (dcalib.shape[0], 1, 1, 1, 1)
        wl_calib = 400e-9 + numpy.array(range(dcalib.shape[0])) * 10e-9
        calib = model.DataArray(dcalib, metadata={model.MD_WL_LIST: wl_calib})

        # More DataArrays, just to make it slightly harder to find the data
        data1 = model.DataArray(numpy.ones((1, 1, 1, 520, 230), dtype=numpy.uint16))
        data2 = model.DataArray(numpy.zeros((3, 1, 1, 520, 230), dtype=numpy.uint16))

        # RGB image
        thumb = model.DataArray(numpy.ones((520, 230, 3), dtype=numpy.uint8))

        full_coef = [data1, calib, data2]
        full_bckg = [data1, bckg, data2]

        for fmt in dataio.get_available_formats(os.O_WRONLY):
            exporter = dataio.get_converter(fmt)
            logging.info("Trying to export/import with %s", fmt)
            fn_coef = u"test_spec" + exporter.EXTENSIONS[0]
            exporter.export(fn_coef, full_coef, thumb)
            fn_bckg = u"test_bckg" + exporter.EXTENSIONS[0]
            exporter.export(fn_bckg, full_bckg, thumb)

            if fmt in dataio.get_available_formats(os.O_RDONLY):
                data_bckg = exporter.read_data(fn_bckg)
                ibckg = calibration.get_spectrum_data(data_bckg)
                data_coef = exporter.read_data(fn_coef)
                icoef = calibration.get_spectrum_efficiency(data_coef)
                numpy.testing.assert_equal(icoef, calib)
                numpy.testing.assert_almost_equal(icoef.metadata[model.MD_WL_LIST],
                                                  calib.metadata[model.MD_WL_LIST])
                numpy.testing.assert_equal(ibckg, bckg)
                numpy.testing.assert_almost_equal(ibckg.metadata[model.MD_WL_LIST],
                                                  bckg.metadata[model.MD_WL_LIST])
            try:
                os.remove(fn_coef)
            except OSError:
                logging.exception("Failed to delete the file %s", fn_coef)
            try:
                os.remove(fn_bckg)
            except OSError:
                logging.exception("Failed to delete the file %s", fn_bckg)
Beispiel #14
0
    def test_load_full(self):
        """
        Check the whole sequence: saving calibration data to file, loading it
        back from file, finding it.
        """
        # AR background data
        dcalib = numpy.zeros((512, 1024), dtype=numpy.uint16)
        md = {
            model.MD_SW_VERSION: "1.0-test",
            model.MD_HW_NAME: "fake ccd",
            model.MD_DESCRIPTION: "AR",
            model.MD_ACQ_DATE: time.time(),
            model.MD_BPP: 12,
            model.MD_BINNING: (1, 1),  # px, px
            model.MD_SENSOR_PIXEL_SIZE: (13e-6, 13e-6),  # m/px
            model.MD_PIXEL_SIZE: (1e-6, 2e-5),  # m/px
            model.MD_POS: (1.2e-3, -30e-3),  # m
            model.MD_EXP_TIME: 1.2,  # s
            model.MD_AR_POLE: (253.1, 65.1),
            model.MD_LENS_MAG: 60,  # ratio
        }
        calib = model.DataArray(dcalib, md)

        # Give one DA, the correct one, so expect to get it back
        out = calibration.get_ar_data([calib])
        numpy.testing.assert_equal(out, calib)

        # More DataArrays, just to make it slightly harder to find the data
        data1 = model.DataArray(numpy.ones((1, 1, 1, 520, 230),
                                           dtype=numpy.uint16),
                                metadata={model.MD_POS: (1.2e-3, -30e-3)})
        data2 = model.DataArray(17 * numpy.ones((1, 1), dtype=numpy.uint16),
                                metadata={model.MD_POS: (1.2e-3, -30e-3)})
        # RGB image
        thumb = model.DataArray(numpy.ones((520, 230, 3), dtype=numpy.uint8))

        full_data = [data1, calib, data2]

        for fmt in dataio.get_available_formats(os.O_WRONLY):
            exporter = dataio.get_converter(fmt)
            logging.info("Trying to export/import with %s", fmt)
            fn = u"test_ar" + exporter.EXTENSIONS[0]
            exporter.export(fn, full_data, thumb)

            if fmt in dataio.get_available_formats(os.O_RDONLY):
                idata = exporter.read_data(fn)
                icalib = calibration.get_ar_data(idata)
                icalib2d = img.ensure2DImage(icalib)
                numpy.testing.assert_equal(icalib2d, calib)
                numpy.testing.assert_almost_equal(
                    icalib.metadata[model.MD_AR_POLE],
                    calib.metadata[model.MD_AR_POLE])
            try:
                os.remove(fn)
            except OSError:
                logging.exception("Failed to delete the file %s", fn)
Beispiel #15
0
    def test_load_full(self):
        """
        Check the whole sequence: saving calibration data to file, loading it
        back from file, finding it.
        """
        # AR background data
        dcalib = numpy.zeros((512, 1024), dtype=numpy.uint16)
        md = {model.MD_SW_VERSION: "1.0-test",
             model.MD_HW_NAME: "fake ccd",
             model.MD_DESCRIPTION: "AR",
             model.MD_ACQ_DATE: time.time(),
             model.MD_BPP: 12,
             model.MD_BINNING: (1, 1), # px, px
             model.MD_SENSOR_PIXEL_SIZE: (13e-6, 13e-6), # m/px
             model.MD_PIXEL_SIZE: (1e-6, 2e-5), # m/px
             model.MD_POS: (1.2e-3, -30e-3), # m
             model.MD_EXP_TIME: 1.2, # s
             model.MD_AR_POLE: (253.1, 65.1),
             model.MD_LENS_MAG: 60, # ratio
            }
        calib = model.DataArray(dcalib, md)

        # Give one DA, the correct one, so expect to get it back
        out = calibration.get_ar_data([calib])
        numpy.testing.assert_equal(out, calib)

        # More DataArrays, just to make it slightly harder to find the data
        data1 = model.DataArray(numpy.ones((1, 1, 1, 520, 230), dtype=numpy.uint16),
                                metadata={model.MD_POS: (1.2e-3, -30e-3)})
        data2 = model.DataArray(17 * numpy.ones((1, 1), dtype=numpy.uint16),
                                metadata={model.MD_POS: (1.2e-3, -30e-3)})
        # RGB image
        thumb = model.DataArray(numpy.ones((520, 230, 3), dtype=numpy.uint8))

        full_data = [data1, calib, data2]

        for fmt in dataio.get_available_formats(os.O_WRONLY):
            exporter = dataio.get_converter(fmt)
            logging.info("Trying to export/import with %s", fmt)
            fn = u"test_ar" + exporter.EXTENSIONS[0]
            exporter.export(fn, full_data, thumb)

            if fmt in dataio.get_available_formats(os.O_RDONLY):
                idata = exporter.read_data(fn)
                icalib = calibration.get_ar_data(idata)
                icalib2d = img.ensure2DImage(icalib)
                numpy.testing.assert_equal(icalib2d, calib)
                numpy.testing.assert_almost_equal(icalib.metadata[model.MD_AR_POLE],
                                                  calib.metadata[model.MD_AR_POLE])
            try:
                os.remove(fn)
            except OSError:
                logging.exception("Failed to delete the file %s", fn)
Beispiel #16
0
    def test_ar_export(self):

        filename = "test-ar.csv"

        # Create AR data
        md = {
            model.MD_SW_VERSION: "1.0-test",
            model.MD_HW_NAME: "fake ccd",
            model.MD_DESCRIPTION: "AR",
            model.MD_ACQ_TYPE: model.MD_AT_AR,
            model.MD_ACQ_DATE: time.time(),
            model.MD_BPP: 12,
            model.MD_BINNING: (1, 1),  # px, px
            model.MD_SENSOR_PIXEL_SIZE: (13e-6, 13e-6),  # m/px
            model.MD_PIXEL_SIZE: (2e-5, 2e-5),  # m/px
            model.MD_POS: (1.2e-3, -30e-3),  # m
            model.MD_EXP_TIME: 1.2,  # s
            model.MD_AR_POLE: (253.1, 65.1),
            model.MD_LENS_MAG: 0.4,  # ratio
        }

        md0 = dict(md)
        data0 = model.DataArray(1500 + numpy.zeros((512, 512), dtype=numpy.uint16), md0)
        md1 = dict(md)
        md1[model.MD_POS] = (1.5e-3, -30e-3)
        md1[model.MD_BASELINE] = 300  # AR background should take this into account
        data1 = model.DataArray(3345 + numpy.zeros((512, 512), dtype=numpy.uint16), md1)

        # Create AR stream
        ars = stream.StaticARStream("test", [data0, data1])
        ars.point.value = md1[model.MD_POS]

        # Convert to exportable RGB image
        exdata = img.ar_to_export_data([ars], raw=True)
        # shape = raw data + theta/phi axes values
        self.assertGreater(exdata.shape[0], 50)
        self.assertGreater(exdata.shape[1], 50)

        # Save into a CSV file
        exporter = dataio.get_converter("CSV")
        exporter.export(filename, exdata)
        st = os.stat(filename)  # this test also that the file is created
        self.assertGreater(st.st_size, 100)

        # clean up
        try:
            os.remove(filename)
        except Exception:
            pass
Beispiel #17
0
    def test_ar_export(self):

        filename = "test-ar.csv"

        # Create AR data
        md = {
            model.MD_SW_VERSION: "1.0-test",
            model.MD_HW_NAME: "fake ccd",
            model.MD_DESCRIPTION: "AR",
            model.MD_ACQ_TYPE: model.MD_AT_AR,
            model.MD_ACQ_DATE: time.time(),
            model.MD_BPP: 12,
            model.MD_BINNING: (1, 1),  # px, px
            model.MD_SENSOR_PIXEL_SIZE: (13e-6, 13e-6),  # m/px
            model.MD_PIXEL_SIZE: (2e-5, 2e-5),  # m/px
            model.MD_POS: (1.2e-3, -30e-3),  # m
            model.MD_EXP_TIME: 1.2,  # s
            model.MD_AR_POLE: (253.1, 65.1),
            model.MD_LENS_MAG: 0.4,  # ratio
        }

        md0 = dict(md)
        data0 = model.DataArray(1500 + numpy.zeros((512, 512), dtype=numpy.uint16), md0)
        md1 = dict(md)
        md1[model.MD_POS] = (1.5e-3, -30e-3)
        md1[model.MD_BASELINE] = 300  # AR background should take this into account
        data1 = model.DataArray(3345 + numpy.zeros((512, 512), dtype=numpy.uint16), md1)

        # Create AR stream
        ars = stream.StaticARStream("test", [data0, data1])
        ars.point.value = md1[model.MD_POS]

        # Convert to exportable RGB image
        exdata = img.ar_to_export_data([ars], raw=True)
        # shape = raw data + theta/phi axes values
        self.assertGreater(exdata.shape[0], 50)
        self.assertGreater(exdata.shape[1], 50)

        # Save into a CSV file
        exporter = dataio.get_converter("CSV")
        exporter.export(filename, exdata)
        st = os.stat(filename)  # this test also that the file is created
        self.assertGreater(st.st_size, 100)

        # clean up
        try:
            os.remove(filename)
        except Exception:
            pass
Beispiel #18
0
    def get_export_formats(self, export_type):
        """
        Find the available file formats for the given export_type
        export_type (string): spatial, AR or spectrum
        return (dict string -> list of strings): name of each format -> list of
            extensions.
        """
        pr_formats, pp_formats = EXPORTERS[export_type]

        export_formats = {}
        # Look dynamically which format is available
        # First the print-ready formats
        for format_data in pr_formats:
            exporter = get_converter(format_data)
            export_formats[PR_PREFIX + " " + exporter.FORMAT] = exporter.EXTENSIONS
        # Now for post-processing formats
        for format_data in pp_formats:
            exporter = get_converter(format_data)
            export_formats[PP_PREFIX + " " + exporter.FORMAT] = exporter.EXTENSIONS

        if not export_formats:
            logging.error("No file converter found!")

        return export_formats
Beispiel #19
0
def save_data(data, **kwargs):
    """
    Saves the data into a file
    data (model.DataArray or list of model.DataArray): the data to save
    kwargs (dict (str->value)): values to substitute in the file name
    """
    exporter = dataio.get_converter(FMT)
    fn = FN_FMT % kwargs

    if os.path.exists(fn):
        # mostly to warn if multiple ypos/xpos are rounded to the same value
        logging.warning("Overwriting file '%s'.", fn)
    else:
        logging.info("Saving file '%s", fn)

    exporter.export(fn, data)
Beispiel #20
0
def save_data(data, **kwargs):
    """
    Saves the data into a file
    data (model.DataArray or list of model.DataArray): the data to save
    kwargs (dict (str->value)): values to substitute in the file name
    """
    exporter = dataio.get_converter(FMT)
    fn = FN_FMT % kwargs

    if os.path.exists(fn):
        # mostly to warn if multiple ypos/xpos are rounded to the same value
        logging.warning("Overwriting file '%s'.", fn)
    else:
        logging.info("Saving file '%s", fn)

    exporter.export(fn, data)
Beispiel #21
0
    def _export_to_file(self, acq_future):
        """
        return (list of DataArray, filename): data exported and filename
        """
        st = self._tab_data_model.acquisitionView.stream_tree
        thumb = acq.computeThumbnail(st, acq_future)
        data, exp = acq_future.result()

        if data:
            filename = self.filename.value
            exporter = dataio.get_converter(self.conf.last_format)
            exporter.export(filename, data, thumb)
            logging.info(u"Acquisition saved as file '%s'.", filename)
        else:
            logging.debug("Not saving into file '%s' as there is no data", filename)

        return data, exp, filename
Beispiel #22
0
    def _export_to_file(self, acq_future):
        """
        return (list of DataArray, filename): data exported and filename
        """
        st = self._tab_data_model.acquisitionView.stream_tree
        thumb = acq.computeThumbnail(st, acq_future)
        data, exp = acq_future.result()

        # Handle the case acquisition failed "a bit"
        if exp:
            logging.error("Acquisition failed (after %d streams): %s",
                          len(data), exp)

        filename = self.filename.value
        exporter = dataio.get_converter(self.conf.last_format)
        exporter.export(filename, data, thumb)
        logging.info(u"Acquisition saved as file '%s'.", filename)
        return data, exp, filename
Beispiel #23
0
    def _export_to_file(self, acq_future):
        """
        return (list of DataArray, filename): data exported and filename
        """
        st = self._tab_data_model.acquisitionView.stream_tree
        thumb = acq.computeThumbnail(st, acq_future)
        data, exp = acq_future.result()

        if data:
            filename = self.filename.value
            exporter = dataio.get_converter(self.conf.last_format)
            exporter.export(filename, data, thumb)
            logging.info(u"Acquisition saved as file '%s'.", filename)
        else:
            logging.debug("Not saving into file '%s' as there is no data",
                          filename)

        return data, exp, filename
Beispiel #24
0
    def test_line_raw(self):
        filename = "test-spec-line.csv"

        exported_data = img.line_to_export_data(self.spectrum, True, self.unit, self.spectrum_range)
        self.assertEqual(exported_data.shape[0], self.spectrum.shape[1])
        self.assertEqual(exported_data.shape[1], self.spectrum.shape[0])

        # Save into a CSV file
        exporter = dataio.get_converter("CSV")
        exporter.export(filename, exported_data)
        st = os.stat(filename)  # this test also that the file is created
        self.assertGreater(st.st_size, 100)

        # clean up
        try:
            os.remove(filename)
        except Exception:
            pass
Beispiel #25
0
    def test_spectrum_raw(self):

        filename = "test-spec-spot.csv"

        exported_data = img.spectrum_to_export_data(self.spectrum, True, self.unit, self.spectrum_range)
        self.assertEqual(exported_data.shape[0], len(self.spectrum_range))  # exported image includes only raw data

        # Save into a CSV file
        exporter = dataio.get_converter("CSV")
        exporter.export(filename, exported_data)
        st = os.stat(filename)  # this test also that the file is created
        self.assertGreater(st.st_size, 10)

        # clean up
        try:
            os.remove(filename)
        except Exception:
            pass
Beispiel #26
0
    def _export_to_file(self, acq_future):
        """
        return (list of DataArray, filename): data exported and filename
        """
        st = self._tab_data_model.acquisitionView.stream_tree
        thumb = acq.computeThumbnail(st, acq_future)
        data, exp = acq_future.result()

        # Handle the case acquisition failed "a bit"
        if exp:
            logging.error("Acquisition failed (after %d streams): %s",
                          len(data), exp)

        filename = self.filename.value
        exporter = dataio.get_converter(self.conf.last_format)
        exporter.export(filename, data, thumb)
        logging.info(u"Acquisition saved as file '%s'.", filename)
        return data, exp, filename
Beispiel #27
0
    def _get_export_info(self):
        """
        Return str, str, str: full filename, exporter name, export type
          Full filename is None if cancelled by user
        """
        # Set default to the first of the list
        export_type = self.get_export_type(self._data_model.focussedView.value)
        formats = EXPORTERS[export_type]
        default_exporter = get_converter(formats[0][0])
        extension = default_exporter.EXTENSIONS[0]
        # TODO: default to the same path/filename as current file (but different extension)
        basename = time.strftime("%Y%m%d-%H%M%S", time.localtime())
        filepath = os.path.join(get_picture_folder(), basename + extension)
        # filepath will be None if cancelled by user
        filepath, export_format, export_type = self.ShowExportFileDialog(filepath, default_exporter)
        # get rid of the prefix before you ask for the exporter
        if any(prefix in export_format.split(' ') for prefix in (PR_PREFIX, PP_PREFIX)):
            export_format = export_format.split(' ', 1)[1]

        return filepath, export_format, export_type
Beispiel #28
0
    def save_data(self, data, **kwargs):
        """
        Saves the data into a file.
        :param data: (model.DataArray or list of model.DataArray) The data to save.
        :param kwargs: (dict (str->value)) The values to substitute in the file name.
        """
        # export to single tiff files
        exporter = dataio.get_converter(FMT)

        fn = FN_FMT % kwargs

        # Save first image for display in analysis tab
        if (kwargs["xpos"], kwargs["ypos"]) == (1, 1):
            self.firstOptImg = fn

        if os.path.exists(fn):
            # mostly to warn if multiple ypos/xpos are rounded to the same value
            logging.warning("Overwriting file '%s'.", fn)
        else:
            logging.info("Saving file '%s", fn)

        exporter.export(fn, data)
Beispiel #29
0
    def _get_snapshot_info(self, dialog=False):
        config = conf.get_acqui_conf()

        tab, filepath, exporter = self._main_data_model.tab.value, None, None

        extension = config.last_extension
        basename = time.strftime("%Y%m%d-%H%M%S", time.localtime())
        if dialog:
            filepath = os.path.join(config.last_path, basename + extension)
            # filepath will be None if cancelled by user
            filepath = ShowAcquisitionFileDialog(self._main_frame, filepath)
        else:
            dirname = get_picture_folder()
            filepath = os.path.join(dirname, basename + extension)

            if os.path.exists(filepath):
                msg = "File '%s' already exists, cancelling snapshot"
                logging.warning(msg, filepath)
                tab, filepath = None, None

        exporter = dataio.get_converter(config.last_format)

        return tab, filepath, exporter
Beispiel #30
0
    def _get_snapshot_info(self, dialog=False):
        config = conf.get_acqui_conf()

        tab, filepath, exporter = self._main_data_model.tab.value, None, None

        extension = config.last_extension
        basename = time.strftime("%Y%m%d-%H%M%S", time.localtime())
        if dialog:
            filepath = os.path.join(config.last_path, basename + extension)
            # filepath will be None if cancelled by user
            filepath = ShowAcquisitionFileDialog(self._main_frame, filepath)
        else:
            dirname = get_picture_folder()
            filepath = os.path.join(dirname, basename + extension)

            if os.path.exists(filepath):
                msg = "File '%s' already exists, cancelling snapshot"
                logging.warning(msg, filepath)
                tab, filepath = None, None

        exporter = dataio.get_converter(config.last_format)

        return tab, filepath, exporter
Beispiel #31
0
def open_acquisition(filename, fmt=None):
    """
    Opens the data according to the type of file, and returns the opened data.
    If it's a pyramidal image, do not fetch the whole data from the image. If the image
    is not pyramidal, it reads the entire image and returns it
    filename (string): Name of the file where the image is
    fmt (string): The format of the file
    return (list of DataArrays or DataArrayShadows): The opened acquisition source
    """
    if fmt:
        converter = dataio.get_converter(fmt)
    else:
        converter = dataio.find_fittest_converter(filename, mode=os.O_RDONLY)
    data = []
    try:
        if hasattr(converter, 'open_data'):
            acd = converter.open_data(filename)
            data = acd.content
        else:
            data = converter.read_data(filename)
    except Exception:
        logging.exception("Failed to open file '%s' with format %s", filename, fmt)

    return data
Beispiel #32
0
    def test_big_ar_export(self):

        # Create AR data
        md = {
            model.MD_SW_VERSION: "1.0-test",
            model.MD_HW_NAME: "fake ccd",
            model.MD_DESCRIPTION: "AR",
            model.MD_ACQ_TYPE: model.MD_AT_AR,
            model.MD_ACQ_DATE: time.time(),
            model.MD_BPP: 12,
            model.MD_BINNING: (1, 1),  # px, px
            model.MD_SENSOR_PIXEL_SIZE: (13e-6, 13e-6),  # m/px
            model.MD_PIXEL_SIZE: (4e-5, 4e-5),  # m/px
            model.MD_POS: (1.2e-3, -30e-3),  # m
            model.MD_EXP_TIME: 1.2,  # s
            model.MD_AR_POLE: (500, 500),
        }

        md0 = dict(md)
        data0 = model.DataArray(1500 + numpy.zeros((1080, 1024), dtype=numpy.uint16), md0)
        md1 = dict(md)
        md1[model.MD_POS] = (1.5e-3, -30e-3)
        md1[model.MD_BASELINE] = 300  # AR background should take this into account
        data1 = model.DataArray(500 + numpy.zeros((1080, 1024), dtype=numpy.uint16), md1)

        # Create AR stream
        ars = stream.StaticARStream("test", [data0])
        ars.point.value = md0[model.MD_POS]

        # Wait for the projection to be computed
        tend = time.time() + 90
        while ars.image.value is None:
            self.assertLess(time.time(), tend, "Timeout during AR computation")
            time.sleep(0.1)

        # Convert to exportable RGB image
        exdata = img.ar_to_export_data([ars], raw=False)
        # shape = RGBA
        self.assertGreater(exdata.shape[0], 200)
        self.assertGreater(exdata.shape[1], 200)
        self.assertEqual(exdata.shape[2], 4)

        # The top-left corner should be white
        numpy.testing.assert_equal(exdata[0, 0], [255, 255, 255, 255])
        # There should be some non-white data
        self.assertTrue(numpy.any(exdata != 255))

        # Save into a PNG file
        exporter = dataio.get_converter("PNG")
        exporter.export(self.FILENAME_PR, exdata)
        st = os.stat(self.FILENAME_PR)  # this test also that the file is created
        self.assertGreater(st.st_size, 1000)

        # Convert to equirectangular (RAW) image
        exdata = img.ar_to_export_data([ars], raw=True)
        # shape = raw data + theta/phi axes values
        self.assertGreater(exdata.shape[0], 50)
        self.assertGreater(exdata.shape[1], 50)

        # Save into a CSV file
        exporter = dataio.get_converter("CSV")
        exporter.export(self.FILENAME_RAW, exdata)
        st = os.stat(self.FILENAME_RAW)  # this test also that the file is created
        self.assertGreater(st.st_size, 100)

        # Create AR stream with background image
        ars.background.value = data1

        # Convert to equirectangular (RAW) image
        exdata = img.ar_to_export_data([ars], raw=True)
        # shape = raw data + theta/phi axes values
        self.assertGreater(exdata.shape[0], 50)
        self.assertGreater(exdata.shape[1], 50)
Beispiel #33
0
    def on_acquisition_done(self, future):
        """ Callback called when the acquisition is finished (either successfully or cancelled) """
        self._set_fan(True)  # Turn the fan back on

        # bind button back to direct closure
        self.btn_cancel.Bind(wx.EVT_BUTTON, self.on_close)
        self._resume_settings()

        # re-enable estimation time updates
        view = self._tab_data_model.focussedView.value
        view.lastUpdate.subscribe(self.on_streams_changed)

        try:
            data, exp = future.result(1) # timeout is just for safety
        except CancelledError:
            # put back to original state:
            # re-enable the acquire button
            self.btn_secom_acquire.Enable()

            # hide progress bar (+ put pack estimated time)
            self.update_acquisition_time()
            self.gauge_acq.Hide()
            self.Layout()
            return
        except Exception:
            # We cannot do much: just warn the user and pretend it was cancelled
            logging.exception("Acquisition failed")
            self.btn_secom_acquire.Enable()
            self.lbl_acqestimate.SetLabel("Acquisition failed.")
            # leave the gauge, to give a hint on what went wrong.
            return

        # Handle the case acquisition failed "a bit"
        if exp:
            logging.error("Acquisition failed (after %d streams): %s",
                          len(data), exp)

        # save result to file
        self.lbl_acqestimate.SetLabel("Saving file...")
        try:
            thumb = acq.computeThumbnail(
                            self._tab_data_model.focussedView.value.stream_tree,
                            future)
            filename = self.filename.value
            exporter = dataio.get_converter(self.conf.last_format)
            exporter.export(filename, data, thumb)
            logging.info("Acquisition saved as file '%s'.", filename)
        except Exception:
            logging.exception("Saving acquisition failed")
            self.btn_secom_acquire.Enable()
            self.lbl_acqestimate.SetLabel("Saving acquisition file failed.")
            return

        if exp:
            self.lbl_acqestimate.SetLabel("Acquisition failed (partially).")
        else:
            self.lbl_acqestimate.SetLabel("Acquisition completed.")
            # As the action is complete, rename "Cancel" to "Close"
            self.btn_cancel.SetLabel("Close")

        # Make sure the file is not overridden
        self.filename.value = self._get_default_filename()
        self.btn_secom_acquire.Enable()
Beispiel #34
0
    def test_big_ar_export(self):

        # Create AR data
        md = {
            model.MD_SW_VERSION: "1.0-test",
            model.MD_HW_NAME: "fake ccd",
            model.MD_DESCRIPTION: "AR",
            model.MD_ACQ_TYPE: model.MD_AT_AR,
            model.MD_ACQ_DATE: time.time(),
            model.MD_BPP: 12,
            model.MD_BINNING: (1, 1),  # px, px
            model.MD_SENSOR_PIXEL_SIZE: (13e-6, 13e-6),  # m/px
            model.MD_PIXEL_SIZE: (4e-5, 4e-5),  # m/px
            model.MD_POS: (1.2e-3, -30e-3),  # m
            model.MD_EXP_TIME: 1.2,  # s
            model.MD_AR_POLE: (500, 500),
        }

        md0 = dict(md)
        data0 = model.DataArray(
            1500 + numpy.zeros((1080, 1024), dtype=numpy.uint16), md0)
        md1 = dict(md)
        md1[model.MD_POS] = (1.5e-3, -30e-3)
        md1[model.
            MD_BASELINE] = 300  # AR background should take this into account
        data1 = model.DataArray(
            500 + numpy.zeros((1080, 1024), dtype=numpy.uint16), md1)

        # Create AR stream
        ars = stream.StaticARStream("test", [data0])
        ars.point.value = md0[model.MD_POS]

        # Wait for the projection to be computed
        tend = time.time() + 90
        while ars.image.value is None:
            self.assertLess(time.time(), tend, "Timeout during AR computation")
            time.sleep(0.1)

        # Convert to exportable RGB image
        exdata = img.ar_to_export_data([ars], raw=False)
        # shape = RGBA
        self.assertGreater(exdata.shape[0], 200)
        self.assertGreater(exdata.shape[1], 200)
        self.assertEqual(exdata.shape[2], 4)

        # The top-left corner should be white
        numpy.testing.assert_equal(exdata[0, 0], [255, 255, 255, 255])
        # There should be some non-white data
        self.assertTrue(numpy.any(exdata != 255))

        # Save into a PNG file
        exporter = dataio.get_converter("PNG")
        exporter.export(self.FILENAME_PR, exdata)
        st = os.stat(
            self.FILENAME_PR)  # this test also that the file is created
        self.assertGreater(st.st_size, 1000)

        # Convert to equirectangular (RAW) image
        exdata = img.ar_to_export_data([ars], raw=True)
        # shape = raw data + theta/phi axes values
        self.assertGreater(exdata.shape[0], 50)
        self.assertGreater(exdata.shape[1], 50)

        # Save into a CSV file
        exporter = dataio.get_converter("CSV")
        exporter.export(self.FILENAME_RAW, exdata)
        st = os.stat(
            self.FILENAME_RAW)  # this test also that the file is created
        self.assertGreater(st.st_size, 100)

        # Create AR stream with background image
        ars.background.value = data1

        # Convert to equirectangular (RAW) image
        exdata = img.ar_to_export_data([ars], raw=True)
        # shape = raw data + theta/phi axes values
        self.assertGreater(exdata.shape[0], 50)
        self.assertGreater(exdata.shape[1], 50)
Beispiel #35
0
    def on_acquisition_done(self, future):
        """ Callback called when the acquisition is finished (either successfully or cancelled) """
        self._set_fan(True)  # Turn the fan back on

        # bind button back to direct closure
        self.btn_cancel.Bind(wx.EVT_BUTTON, self.on_close)
        self._resume_settings()

        # re-enable estimation time updates
        self._view.lastUpdate.subscribe(self.on_streams_changed)

        try:
            data, exp = future.result(1) # timeout is just for safety
        except CancelledError:
            # put back to original state:
            # re-enable the acquire button
            self.btn_secom_acquire.Enable()

            # hide progress bar (+ put pack estimated time)
            self.update_acquisition_time()
            self.gauge_acq.Hide()
            self.Layout()
            return
        except Exception:
            # We cannot do much: just warn the user and pretend it was cancelled
            logging.exception("Acquisition failed")
            self.btn_secom_acquire.Enable()
            self.lbl_acqestimate.SetLabel("Acquisition failed.")
            self.lbl_acqestimate.Parent.Layout()
            # leave the gauge, to give a hint on what went wrong.
            return

        # Handle the case acquisition failed "a bit"
        if exp:
            logging.error("Acquisition failed (after %d streams): %s",
                          len(data), exp)

        # save result to file
        self.lbl_acqestimate.SetLabel("Saving file...")
        self.lbl_acqestimate.Parent.Layout()
        try:
            thumb = acq.computeThumbnail(self._view.stream_tree, future)
            filename = self.filename.value
            exporter = dataio.get_converter(self.conf.last_format)
            exporter.export(filename, data, thumb)
            logging.info("Acquisition saved as file '%s'.", filename)
            # Allow to see the acquisition
            self.btn_secom_acquire.SetLabel("VIEW")
            self.last_saved_file = filename
        except Exception:
            logging.exception("Saving acquisition failed")
            self.btn_secom_acquire.Enable()
            self.lbl_acqestimate.SetLabel("Saving acquisition file failed.")
            self.lbl_acqestimate.Parent.Layout()
            return

        if exp:
            self.lbl_acqestimate.SetLabel("Acquisition failed (partially).")
        else:
            self.lbl_acqestimate.SetLabel("Acquisition completed.")
            # As the action is complete, rename "Cancel" to "Close"
            self.btn_cancel.SetLabel("Close")
        self.lbl_acqestimate.Parent.Layout()

        # Make sure the file is not overridden
        self.btn_secom_acquire.Enable()
Beispiel #36
0
 def test_get_converter(self):
     fmts = get_available_formats()
     for fmt in fmts:
         fmt_mng = get_converter(fmt)
         self.assertGreaterEqual(fmt_mng.EXTENSIONS, 1)
Beispiel #37
0
    def on_acquisition_done(self, future):
        """ Callback called when the acquisition is finished (either successfully or cancelled) """
        if self._main_data_model.opm:
            self._main_data_model.opm.setAcqQuality(path.ACQ_QUALITY_FAST)

        # bind button back to direct closure
        self.btn_cancel.Bind(wx.EVT_BUTTON, self.on_close)
        self._resume_settings()
        
        self.acquiring = False

        # re-enable estimation time updates
        self._view.lastUpdate.subscribe(self.on_streams_changed)

        self.acq_future = None  # To avoid holding the ref in memory
        self._acq_future_connector = None

        try:
            data, exp = future.result(1)  # timeout is just for safety
            self.conf.fn_count = update_counter(self.conf.fn_count)
        except CancelledError:
            # put back to original state:
            # re-enable the acquire button
            self.btn_secom_acquire.Enable()

            # hide progress bar (+ put pack estimated time)
            self.update_acquisition_time()
            self.gauge_acq.Hide()
            self.Layout()
            return
        except Exception:
            # We cannot do much: just warn the user and pretend it was cancelled
            logging.exception("Acquisition failed")
            self.btn_secom_acquire.Enable()
            self.lbl_acqestimate.SetLabel("Acquisition failed.")
            self.lbl_acqestimate.Parent.Layout()
            # leave the gauge, to give a hint on what went wrong.
            return

        # Handle the case acquisition failed "a bit"
        if exp:
            logging.warning("Acquisition failed (after %d streams): %s",
                            len(data), exp)

        # save result to file
        self.lbl_acqestimate.SetLabel("Saving file...")
        self.lbl_acqestimate.Parent.Layout()
        try:
            thumb = acqmng.computeThumbnail(self._view.stream_tree, future)
            filename = self.filename.value
            exporter = dataio.get_converter(self.conf.last_format)
            exporter.export(filename, data, thumb)
            logging.info("Acquisition saved as file '%s'.", filename)
            # Allow to see the acquisition
            self.btn_secom_acquire.SetLabel("VIEW")
            self.last_saved_file = filename
        except Exception:
            logging.exception("Saving acquisition failed")
            self.btn_secom_acquire.Enable()
            self.lbl_acqestimate.SetLabel("Saving acquisition file failed.")
            self.lbl_acqestimate.Parent.Layout()
            return

        if exp:
            self.lbl_acqestimate.SetLabel("Acquisition failed (partially).")
        else:
            self.lbl_acqestimate.SetLabel("Acquisition completed.")
            # As the action is complete, rename "Cancel" to "Close"
            self.btn_cancel.SetLabel("Close")
        self.lbl_acqestimate.Parent.Layout()

        # Make sure the file is not overridden
        self.btn_secom_acquire.Enable()
Beispiel #38
0
    def export_viewport(self, filepath, export_format, export_type):
        """ Export the image from the focused view to the filesystem.

        :param filepath: (str) full path to the destination file
        :param export_format: (str) the format name
        :param export_type: (str) spatial, AR, spectrum or spectrum-line
        """
        try:
            exporter = get_converter(export_format)
            raw = export_format in EXPORTERS[export_type][1]
            self._conf.export_raw = raw
            self._conf.last_export_path = os.path.dirname(filepath)

            exported_data = self.export(export_type, raw)

            # batch export
            # TODO: for now we do not create a new folder where all files are saved

            if isinstance(exported_data, dict):
                # get the file names
                filename_dict = {}
                n_exist = 0
                dir_name, base = os.path.split(filepath)
                base_name, file_extension = splitext(base)
                for key in exported_data.keys():
                    # use the filename defined by the user and add the MD_POL_MODE to the filename
                    filename = os.path.join(
                        dir_name, base_name + "_" + key + file_extension)

                    # detect we'd overwrite an existing file => show our own warning
                    if os.path.exists(filename):
                        n_exist += 1

                    filename_dict[key] = filename

                if n_exist:
                    dlg = wx.MessageDialog(
                        self._main_frame,
                        "Some files (%d/%d) already exists.\n"
                        "Do you want to replace them?" %
                        (n_exist, len(filename_dict)), "Files already exist",
                        wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION)
                    ret = dlg.ShowModal()
                    dlg.Destroy()
                    if ret == wx.ID_NO:
                        return

                # record everything to a file
                for key, data in exported_data.items():
                    exporter.export(filename_dict[key], data)

                # TODO need to be adapted for batch export as now redundant
                popup.show_message(
                    self._main_frame,
                    "Images exported",
                    "Stored as %s" % (os.path.join(
                        dir_name, base_name + "_" + "xxx" + file_extension), ),
                    timeout=3)

                logging.info(
                    "Exported a %s view into files with name of type '%s'.",
                    export_type,
                    os.path.join(dir_name,
                                 base_name + "_" + "mode" + file_extension))

            else:  # single file export
                exporter.export(filepath, exported_data)

                popup.show_message(self._main_frame,
                                   "Image exported",
                                   "Stored in %s" % (filepath, ),
                                   timeout=3)

                logging.info("Exported a %s view into file '%s'.", export_type,
                             filepath)
        except LookupError as ex:
            logging.info("Export of a %s view as %s seems to contain no data.",
                         export_type,
                         export_format,
                         exc_info=True)
            dlg = wx.MessageDialog(
                self._main_frame, "Failed to export: %s\n"
                "Please make sure that at least one stream is visible in the current view."
                % (ex, ), "No data to export", wx.OK | wx.ICON_WARNING)
            dlg.ShowModal()
            dlg.Destroy()
        except Exception:
            logging.exception("Failed to export a %s view as %s", export_type,
                              export_format)