Ejemplo n.º 1
0
 def test_time_zone_in_extended_data_to_data_element_to_data_item_conversion(
         self):
     # test the whole path, redundant?
     data = numpy.ones((8, 6), numpy.int)
     metadata = {
         "description": {
             "time_zone": {
                 "tz": "+0300",
                 "dst": "+60"
             }
         },
         "hardware_source": {
             "one": 1,
             "two": "b"
         }
     }
     timestamp = datetime.datetime(2013, 11, 18, 14, 5, 4, 1)
     xdata = DataAndMetadata.new_data_and_metadata(data,
                                                   metadata=metadata,
                                                   timestamp=timestamp)
     data_element = ImportExportManager.create_data_element_from_extended_data(
         xdata)
     data_item = ImportExportManager.create_data_item_from_data_element(
         data_element)
     self.assertEqual(data_item.metadata["description"]["time_zone"]["tz"],
                      "+0300")
     self.assertEqual(data_item.metadata["description"]["time_zone"]["dst"],
                      "+60")
     self.assertEqual("2013-11-18 14:05:04.000001", str(data_item.created))
 def test_data_item_to_data_element_and_back_keeps_large_format_flag(self):
     data_item = DataItem.DataItem(numpy.zeros((4, 4, 4)), large_format=True)
     with contextlib.closing(data_item):
         data_element = ImportExportManager.create_data_element_from_data_item(data_item, include_data=True)
         self.assertTrue(data_element.get("large_format"))
         with contextlib.closing(ImportExportManager.create_data_item_from_data_element(data_element)) as data_item:
             self.assertTrue(data_item.large_format)
 def test_importing_rgb_does_not_set_large_format(self):
     data_item = DataItem.DataItem(numpy.zeros((8, 8, 4), dtype=float))
     with contextlib.closing(data_item):
         data_item_rgb = DataItem.DataItem(numpy.zeros((8, 8, 4), dtype=numpy.uint8))
         with contextlib.closing(data_item_rgb):
             data_element = ImportExportManager.create_data_element_from_data_item(data_item, include_data=True)
             data_element_rgb = ImportExportManager.create_data_element_from_data_item(data_item_rgb, include_data=True)
             data_element.pop("large_format")
             data_element_rgb.pop("large_format")
             with contextlib.closing(ImportExportManager.create_data_item_from_data_element(data_element)) as data_item:
                 with contextlib.closing(ImportExportManager.create_data_item_from_data_element(data_element_rgb)) as data_item_rgb:
                     self.assertTrue(data_item.large_format)
                     self.assertFalse(data_item_rgb.large_format)
Ejemplo n.º 4
0
 def test_data_item_to_data_element_includes_time_zone(self):
     # created/modified are utc; timezone is specified in metadata/description/time_zone
     data_item = DataItem.DataItem(numpy.zeros((16, 16)))
     data_item.created = datetime.datetime(2013, 6, 18, 14, 5, 4,
                                           0)  # always utc
     data_item.timezone = "Europe/Athens"
     data_item.timezone_offset = "+0300"
     data_item._set_modified(datetime.datetime(2013, 6, 18, 14, 5, 4,
                                               0))  # always utc
     data_item.metadata = {
         "description": {
             "time_zone": {
                 "tz": "+0300",
                 "dst": "+60"
             }
         }
     }
     data_element = ImportExportManager.create_data_element_from_data_item(
         data_item, include_data=False)
     self.assertEqual(
         data_element["datetime_modified"], {
             "dst": "+60",
             "local_datetime": "2013-06-18T17:05:04",
             'tz': "+0300",
             'timezone': "Europe/Athens"
         })
Ejemplo n.º 5
0
 def test_build_table_with_two_display_layers_of_same_calibrated_1d_data(
         self):
     with TestContext.create_memory_context() as test_context:
         calibration = Calibration.Calibration(1.0, 2.0, "nm")
         document_model = test_context.create_document_model()
         data_item = DataItem.DataItem(numpy.array([1.1, 1.2, 1.3, 1.4]))
         data_item.set_dimensional_calibration(0, calibration)
         data_item.set_intensity_calibration(
             Calibration.Calibration(0, 1, "e"))
         data_item2 = DataItem.DataItem(numpy.array([2.1, 2.2, 2.3, 2.4]))
         data_item2.set_dimensional_calibration(0, calibration)
         data_item2.set_intensity_calibration(
             Calibration.Calibration(0, 1, "e"))
         document_model.append_data_item(data_item)
         document_model.append_data_item(data_item2)
         display_item = document_model.get_display_item_for_data_item(
             data_item)
         display_item.append_display_data_channel_for_data_item(data_item2)
         display_item._set_display_layer_property(0, "label", "W")
         display_item._set_display_layer_property(1, "label", "T")
         headers, data = ImportExportManager.build_table(display_item)
         self.assertEqual(3, len(headers))
         self.assertEqual(3, len(data))
         self.assertEqual("X (nm)", headers[0])
         self.assertEqual("W (e)", headers[1])
         self.assertEqual("T (e)", headers[2])
         self.assertTrue(
             numpy.array_equal(
                 calibration.convert_to_calibrated_value(
                     numpy.arange(0, data_item.data.shape[0])), data[0]))
         self.assertTrue(numpy.array_equal(data_item.data, data[1]))
         self.assertTrue(numpy.array_equal(data_item2.data, data[2]))
Ejemplo n.º 6
0
 def do_export(self, display_items):
     directory = self.directory
     writer = self.writer
     if directory:
         for index, display_item in enumerate(display_items):
             data_item = display_item.data_item
             try:
                 components = list()
                 if self.options.get("prefix", False):
                     components.append(str(self.prefix_edit_widget.text))
                 if self.options.get("title", False):
                     title = unicodedata.normalize('NFKC', data_item.title)
                     title = re.sub('[^\w\s-]', '', title, flags=re.U).strip()
                     title = re.sub('[-\s]+', '-', title, flags=re.U)
                     components.append(title)
                 if self.options.get("date", False):
                     components.append(data_item.created_local.isoformat().replace(':', ''))
                 if self.options.get("dimensions", False):
                     components.append(
                         "x".join([str(shape_n) for shape_n in data_item.dimensional_shape]))
                 if self.options.get("sequence", False):
                     components.append(str(index))
                 filename = "_".join(components)
                 extension = writer.extensions[0]
                 path = os.path.join(directory, "{0}.{1}".format(filename, extension))
                 ImportExportManager.ImportExportManager().write_display_item_with_writer(self.ui, writer, display_item, path)
             except Exception as e:
                 logging.debug("Could not export image %s / %s", str(data_item), str(e))
                 traceback.print_exc()
                 traceback.print_stack()
Ejemplo n.º 7
0
 def exec_grab():
     # this will execute in a thread; the enclosing async routine will continue when it finishes
     try:
         start_time = time.time()
         max_wait_time = max(
             hardware_source.get_current_frame_time() * 1.5, 3)
         while hardware_source.is_playing:
             if time.time() - start_time > max_wait_time:
                 success_ref[0] = False
                 return
             time.sleep(0.01)
         data_element_groups = hardware_source.get_buffer_data(
             -frame_count, frame_count)
         for data_element_group in data_element_groups:
             if self.cancel_event.is_set():
                 success_ref[0] = False
                 break
             xdata_group = list()
             for data_element in data_element_group:
                 xdata = ImportExportManager.convert_data_element_to_data_and_metadata(
                     data_element)
                 xdata_group.append(xdata)
             xdata_group_list.append(xdata_group)
         self.progress_model.value = 100
     except Exception as e:
         import traceback
         traceback.print_exc()
         success_ref[0] = False
Ejemplo n.º 8
0
        def acquire_stack_and_sum(number_frames, energy_offset_per_frame,
                                  document_controller, final_layout_fn):
            # grab the document model and workspace for convenience
            with document_controller.create_task_context_manager(
                    _("Multiple Shift EELS Acquire"), "table") as task:
                # acquire the stack. it will be added to the document by queueing to the main thread at the end of this method.
                stack_data_item = acquire_series(number_frames,
                                                 energy_offset_per_frame, task)
                stack_data_item.title = _("Spectrum Stack")

                # align and sum the stack
                data_element = dict()
                summed_image, shifts = align_stack(stack_data_item.data, task)
                # add the summed image to Swift
                data_element["data"] = summed_image
                data_element["title"] = "Aligned and summed spectra"
                # strip off the first dimension that we sum over
                for dimensional_calibration in stack_data_item.dimensional_calibrations[
                        1:]:
                    data_element.setdefault(
                        "spatial_calibrations", list()).append({
                            "origin":
                            dimensional_calibration.offset,  # TODO: fix me
                            "scale":
                            dimensional_calibration.scale,
                            "units":
                            dimensional_calibration.units
                        })
                # set the energy dispersive calibration so that the ZLP is at zero eV
                zlp_position_pixels = numpy.sum(summed_image, axis=0).argmax()
                zlp_position_calibrated_units = -zlp_position_pixels * data_element[
                    "spatial_calibrations"][1]["scale"]
                data_element["spatial_calibrations"][1][
                    "offset"] = zlp_position_calibrated_units
                sum_data_item = ImportExportManager.create_data_item_from_data_element(
                    data_element)

                dispersive_sum = numpy.sum(summed_image, axis=1)
                differential = numpy.diff(dispersive_sum)
                top = numpy.argmax(differential)
                bottom = numpy.argmin(differential)
                _midpoint = numpy.mean([bottom, top]) / dispersive_sum.shape[0]
                _integration_width = float(
                    numpy.abs(bottom - top)) / dispersive_sum.shape[
                        0]  #* data_element["spatial_calibrations"][0]["scale"]

                document_controller.queue_task(final_layout_fn)
                document_controller.queue_task(
                    functools.partial(show_in_panel, stack_data_item,
                                      document_controller,
                                      "multiple_shift_eels_stack"))
                document_controller.queue_task(
                    functools.partial(
                        show_in_panel, sum_data_item, document_controller,
                        "multiple_shift_eels_aligned_summed_stack"))
                document_controller.queue_task(
                    functools.partial(add_line_profile, sum_data_item,
                                      document_controller,
                                      "multiple_shift_eels_spectrum",
                                      _midpoint, _integration_width))
Ejemplo n.º 9
0
 def test_data_element_date_gets_set_as_data_item_created_date(self):
     data_element = dict()
     data_element["version"] = 1
     data_element["data"] = numpy.zeros((16, 16), dtype=numpy.double)
     data_element["datetime_modified"] = {
         'tz': '+0300',
         'dst': '+60',
         'local_datetime': '2015-06-10T19:31:52.780511'
     }
     data_item = ImportExportManager.create_data_item_from_data_element(
         data_element)
     self.assertIsNotNone(data_item.created)
     self.assertEqual(data_item.timezone_offset, "+0300")
     local_offset_seconds = int(
         round((datetime.datetime.now() -
                datetime.datetime.utcnow()).total_seconds()))
     # check both matches for DST
     match1 = datetime.datetime(year=2015,
                                month=6,
                                day=10,
                                hour=19 - 3,
                                minute=31,
                                second=52,
                                microsecond=780511) + datetime.timedelta(
                                    seconds=local_offset_seconds)
     match2 = datetime.datetime(year=2015,
                                month=6,
                                day=10,
                                hour=19 - 3,
                                minute=31,
                                second=52,
                                microsecond=780511) + datetime.timedelta(
                                    seconds=local_offset_seconds + 3600)
     self.assertTrue(data_item.created_local == match1
                     or data_item.created_local == match2)
Ejemplo n.º 10
0
 def test_csv1_exporter_handles_multi_layer_display_item_with_different_calibration(self):
     with TestContext.create_memory_context() as test_context:
         document_model = test_context.create_document_model()
         data_item = DataItem.DataItem(numpy.full((50, ), 0, dtype=numpy.uint32))
         data_item.dimensional_calibrations = [Calibration.Calibration(offset=0, scale=1, units="eV")]
         data_item.intensity_calibration = Calibration.Calibration(offset=10, scale=2)
         data_item2 = DataItem.DataItem(numpy.full((100, ), 1, dtype=numpy.uint32))
         data_item2.dimensional_calibrations = [Calibration.Calibration(offset=-10, scale=2, units="eV")]
         data_item2.intensity_calibration = Calibration.Calibration(offset=5, scale=2)
         document_model.append_data_item(data_item)
         document_model.append_data_item(data_item2, False)
         display_item = document_model.get_display_item_for_data_item(data_item)
         display_item.display_type = "line_plot"
         display_item.append_display_data_channel_for_data_item(data_item2)
         current_working_directory = os.getcwd()
         file_path = os.path.join(current_working_directory, "__file.csv")
         handler = ImportExportManager.CSV1ImportExportHandler("csv1-io-handler", "CSV 1D", ["csv"])
         handler.write_display_item(None, display_item, file_path, "csv")
         self.assertTrue(os.path.exists(file_path))
         try:
             saved_data = numpy.genfromtxt(file_path, delimiter=", ")
             self.assertSequenceEqual(saved_data.shape, (max(data_item.xdata.data_shape[0], data_item2.xdata.data_shape[0]), 4))
             self.assertTrue(numpy.allclose(saved_data[:50, 0], numpy.linspace(0, 49, 50)))
             self.assertTrue(numpy.allclose(saved_data[:50, 1], 10))
             self.assertTrue(numpy.allclose(saved_data[:, 2], numpy.linspace(-10, 188, 100)))
             self.assertTrue(numpy.allclose(saved_data[:, 3], 7))
         finally:
             os.remove(file_path)
 def create_result_data_item(
         self, data_dict: typing.Mapping[str, typing.Any]) -> None:
     reset_color_cycle()
     display_item = None
     sorted_indices = numpy.argsort(
         [parms['start_ev'] for parms in data_dict['parameter_list']])
     display_layer_index = 0
     for i in sorted_indices:
         index = data_dict['parameter_list'][i]['index']
         xdata = ImportExportManager.convert_data_element_to_data_and_metadata(
             data_dict['data_element_list'][i])
         start_ev = data_dict['parameter_list'][i]['start_ev']
         end_ev = data_dict['parameter_list'][i]['end_ev']
         number_frames = data_dict['parameter_list'][i]['frames']
         exposure_ms = data_dict['parameter_list'][i]['exposure_ms']
         summed = ' (summed)' if not data_dict['data_element_list'][i].get(
             'is_sequence', False) and number_frames > 1 else ''
         data_item = None
         if i == sorted_indices[
                 0] and xdata.datum_dimension_count == 1 and data_dict[
                     'settings_list'][i]['use_multi_eels_calibration']:
             data_item = self.document_controller.library.create_data_item_from_data_and_metadata(
                 xdata, title='MultiAcquire (stacked)')
             display_item = self.__api.library._document_model.get_display_item_for_data_item(
                 data_item._data_item)
             display_layer_index += 1  # display item has one display layer already
         #else:
         units_str = ' eV' if data_dict['settings_list'][i][
             'use_multi_eels_calibration'] else ''
         new_data_item = self.document_controller.library.create_data_item_from_data_and_metadata(
             xdata,
             title='MultiAcquire #{:d}, {:g}-{:g}{}, {:g}x{:g} ms{}'.format(
                 index + 1, start_ev, end_ev, units_str, number_frames,
                 exposure_ms, summed))
         data_item_metadata = new_data_item.metadata
         metadata = dict(
             data_item_metadata) if data_item_metadata else dict()
         metadata['MultiAcquire.parameters'] = data_dict['parameter_list'][
             i]
         metadata['MultiAcquire.settings'] = data_dict['settings_list'][i]
         new_data_item.set_metadata(metadata)
         if display_item:
             display_item.append_display_data_channel_for_data_item(
                 data_item._data_item if data_item else new_data_item.
                 _data_item)
             start_ev = data_dict['parameter_list'][i]['start_ev']
             end_ev = data_dict['parameter_list'][i]['end_ev']
             display_layer_label = '#{:d}: {:g}-{:g}{}, {:g}x{:g} ms{}'.format(
                 index + 1, start_ev, end_ev, units_str, number_frames,
                 exposure_ms, summed)
             display_item._set_display_layer_properties(
                 display_layer_index,
                 label=display_layer_label,
                 stroke_color=get_next_color(),
                 fill_color=None)
             display_layer_index += 1
     if display_item:
         display_item.set_display_property('legend_position', 'top-right')
         display_item.title = 'MultiAcquire (stacked)'
         show_display_item(self.document_controller, display_item)
Ejemplo n.º 12
0
 def test_data_element_with_uuid_assigns_uuid_to_data_item(self):
     data_element = dict()
     data_element["version"] = 1
     data_element["data"] = numpy.zeros((16, 16), dtype=numpy.double)
     data_element_uuid = uuid.uuid4()
     data_element["uuid"] = str(data_element_uuid)
     with contextlib.closing(ImportExportManager.create_data_item_from_data_element(data_element)) as data_item:
         self.assertEqual(data_item.uuid, data_element_uuid)
Ejemplo n.º 13
0
 def test_convert_data_element_records_time_zone_in_data_item_metadata(self):
     data_element = dict()
     data_element["version"] = 1
     data_element["data"] = numpy.zeros((16, 16), dtype=numpy.double)
     data_element["datetime_modified"] = Utility.get_current_datetime_item()
     with contextlib.closing(ImportExportManager.create_data_item_from_data_element(data_element)) as data_item:
         self.assertIsNotNone(data_item.created)
         self.assertEqual(data_item.timezone_offset, data_element["datetime_modified"]["tz"])
Ejemplo n.º 14
0
 def test_data_element_to_extended_data_includes_time_zone(self):
     data_element = dict()
     data_element["version"] = 1
     data_element["data"] = numpy.zeros((16, 16), dtype=numpy.double)
     data_element["datetime_modified"] = {'tz': '+0300', 'dst': '+60', 'local_datetime': '2015-06-10T19:31:52.780511'}
     xdata = ImportExportManager.convert_data_element_to_data_and_metadata(data_element)
     self.assertEqual(xdata.timezone_offset, "+0300")
     self.assertEqual(str(xdata.timestamp), "2015-06-10 16:31:52.780511")
Ejemplo n.º 15
0
 def test_data_element_to_data_item_includes_time_zone(self):
     data_element = dict()
     data_element["version"] = 1
     data_element["data"] = numpy.zeros((16, 16), dtype=numpy.double)
     data_element["datetime_modified"] = {'tz': '+0300', 'dst': '+60', 'local_datetime': '2015-06-10T19:31:52.780511'}
     with contextlib.closing(ImportExportManager.create_data_item_from_data_element(data_element)) as data_item:
         self.assertEqual(data_item.timezone_offset, "+0300")
         self.assertEqual(str(data_item.created), "2015-06-10 16:31:52.780511")
Ejemplo n.º 16
0
 def test_extended_data_to_data_element_includes_time_zone(self):
     # extended data timestamp is utc; timezone is specified in metadata/description/time_zone
     data = numpy.ones((8, 6), int)
     metadata = {"description": {"time_zone": {"tz": "+0300", "dst": "+60"}}}
     timestamp = datetime.datetime(2013, 11, 18, 14, 5, 4, 0)
     xdata = DataAndMetadata.new_data_and_metadata(data, metadata=metadata, timestamp=timestamp)
     data_element = ImportExportManager.create_data_element_from_extended_data(xdata)
     self.assertEqual(data_element["datetime_modified"], {"dst": "+60", "local_datetime": "2013-11-18T17:05:04", 'tz': "+0300"})
Ejemplo n.º 17
0
 def test_convert_data_element_sets_timezone_and_timezone_offset_if_present(self):
     data_element = dict()
     data_element["version"] = 1
     data_element["data"] = numpy.zeros((16, 16), dtype=numpy.double)
     data_element["datetime_modified"] = {'tz': '+0300', 'dst': '+60', 'local_datetime': '2015-06-10T19:31:52.780511', 'timezone': 'Europe/Athens'}
     with contextlib.closing(ImportExportManager.create_data_item_from_data_element(data_element)) as data_item:
         self.assertIsNotNone(data_item.created)
         self.assertEqual(data_item.timezone, "Europe/Athens")
         self.assertEqual(data_item.timezone_offset, "+0300")
Ejemplo n.º 18
0
 def test_data_item_with_numpy_bool_to_data_element_produces_json_compatible_dict(
         self):
     data_item = DataItem.DataItem(numpy.zeros((16, 16)))
     data_item.large_format = numpy.product(
         (2, 3, 4), dtype=numpy.int64) > 10  # produces a numpy.bool_
     with contextlib.closing(data_item):
         data_element = ImportExportManager.create_data_element_from_data_item(
             data_item, include_data=False)
         json.dumps(data_element)
Ejemplo n.º 19
0
 def create_result_data_item(self, data_dict):
     display_layers = []
     reset_color_cycle()
     display_item = None
     sorted_indices = numpy.argsort(
         [parms['start_ev'] for parms in data_dict['parameter_list']])
     for i in sorted_indices:
         index = data_dict['parameter_list'][i]['index']
         xdata = ImportExportManager.convert_data_element_to_data_and_metadata(
             data_dict['data_element_list'][i])
         start_ev = data_dict['parameter_list'][i]['start_ev']
         end_ev = data_dict['parameter_list'][i]['end_ev']
         number_frames = data_dict['parameter_list'][i]['frames']
         exposure_ms = data_dict['parameter_list'][i]['exposure_ms']
         summed = ' (summed)' if not data_dict['data_element_list'][i].get(
             'is_sequence', False) and number_frames > 1 else ''
         data_item = None
         if i == sorted_indices[0] and xdata.datum_dimension_count == 1:
             data_item = self.document_controller.library.create_data_item_from_data_and_metadata(
                 xdata, title='MultiAcquire (stacked)')
             display_item = self.__api.library._document_model.get_display_item_for_data_item(
                 data_item._data_item)
             #new_data_item = data_item
         #else:
         new_data_item = self.document_controller.library.create_data_item_from_data_and_metadata(
             xdata,
             title='MultiAcquire #{:d}, {:g}-{:g} eV, {:g}x{:g} ms{}'.
             format(index + 1, start_ev, end_ev, number_frames, exposure_ms,
                    summed))
         metadata = new_data_item.metadata
         metadata['MultiAcquire.parameters'] = data_dict['parameter_list'][
             i]
         metadata['MultiAcquire.settings'] = data_dict['settings_list'][i]
         new_data_item.set_metadata(metadata)
         if display_item:
             display_item.append_display_data_channel_for_data_item(
                 data_item._data_item if data_item else new_data_item.
                 _data_item)
             start_ev = data_dict['parameter_list'][i]['start_ev']
             end_ev = data_dict['parameter_list'][i]['end_ev']
             display_layers.append({
                 'label':
                 '#{:d}: {:g}-{:g} eV, {:g}x{:g} ms{}'.format(
                     index + 1, start_ev, end_ev, number_frames,
                     exposure_ms, summed),
                 'data_index':
                 len(display_layers),
                 'stroke_color':
                 get_next_color(),
                 'fill_color':
                 None
             })
     if display_item:
         display_item.display_layers = display_layers
         display_item.set_display_property('legend_position', 'top-right')
         display_item.title = 'MultiAcquire (stacked)'
         show_display_item(self.document_controller, display_item)
Ejemplo n.º 20
0
 def test_data_element_to_extended_data_conversion(self):
     data = numpy.ones((8, 6), int)
     intensity_calibration = Calibration.Calibration(offset=1, scale=1.1, units="one")
     dimensional_calibrations = [Calibration.Calibration(offset=2, scale=2.1, units="two"), Calibration.Calibration(offset=3, scale=2.2, units="two")]
     metadata = {"hardware_source": {"one": 1, "two": "b"}}
     timestamp = datetime.datetime.now()
     data_descriptor = DataAndMetadata.DataDescriptor(is_sequence=False, collection_dimension_count=1, datum_dimension_count=1)
     xdata = DataAndMetadata.new_data_and_metadata(data, intensity_calibration=intensity_calibration, dimensional_calibrations=dimensional_calibrations, metadata=metadata, timestamp=timestamp, data_descriptor=data_descriptor)
     data_element = ImportExportManager.create_data_element_from_extended_data(xdata)
     new_xdata = ImportExportManager.convert_data_element_to_data_and_metadata(data_element)
     self.assertTrue(numpy.array_equal(data, new_xdata.data))
     self.assertNotEqual(id(new_xdata.intensity_calibration), id(intensity_calibration))
     self.assertEqual(new_xdata.intensity_calibration, intensity_calibration)
     self.assertNotEqual(id(new_xdata.dimensional_calibrations[0]), id(dimensional_calibrations[0]))
     self.assertEqual(new_xdata.dimensional_calibrations, dimensional_calibrations)
     self.assertNotEqual(id(new_xdata.metadata), id(metadata))
     self.assertEqual(new_xdata.metadata, metadata)
     self.assertNotEqual(id(new_xdata.data_descriptor), id(data_descriptor))
     self.assertEqual(new_xdata.data_descriptor, data_descriptor)
 def test_data_item_to_data_element_produces_json_compatible_dict(self):
     data_item = DataItem.DataItem(numpy.zeros((16, 16)))
     data_item.created = datetime.datetime(2013, 6, 18, 14, 5, 4, 0)  # always utc
     data_item.timezone = "Europe/Athens"
     data_item.timezone_offset = "+0300"
     data_item.source_file_path = "/path/to/source/file"
     data_item._set_modified(datetime.datetime(2013, 6, 18, 14, 5, 4, 0))  # always utc
     data_item.metadata = {"description": {"time_zone": {"tz": "+0300", "dst": "+60"}}}
     data_element = ImportExportManager.create_data_element_from_data_item(data_item, include_data=False)
     json.dumps(data_element)
Ejemplo n.º 22
0
 def test_sub_area_size_change(self):
     document_model = DocumentModel.DocumentModel()
     with contextlib.closing(document_model):
         data_element = dict()
         data_element["version"] = 1
         data_element["data"] = numpy.zeros((16, 16), dtype=numpy.double)
         data_item = ImportExportManager.create_data_item_from_data_element(data_element)
         document_model.append_data_item(data_item)
         self.assertEqual(data_item.dimensional_shape, (16, 16))
         self.assertEqual(data_item.data_dtype, numpy.double)
         data_element["data"] = numpy.zeros((8, 8), dtype=numpy.double)
         data_element["sub_area"] = ((0,0), (4, 8))
         ImportExportManager.update_data_item_from_data_element(data_item, data_element)
         self.assertEqual(data_item.dimensional_shape, (8, 8))
         self.assertEqual(data_item.data_dtype, numpy.double)
         data_element["data"] = numpy.zeros((8, 8), dtype=numpy.float)
         data_element["sub_area"] = ((0,0), (4, 8))
         ImportExportManager.update_data_item_from_data_element(data_item, data_element)
         self.assertEqual(data_item.dimensional_shape, (8, 8))
         self.assertEqual(data_item.data_dtype, numpy.float)
Ejemplo n.º 23
0
 def test_sub_area_size_change(self):
     with TestContext.create_memory_context() as test_context:
         document_model = test_context.create_document_model()
         data_element = dict()
         data_element["version"] = 1
         data_element["data"] = numpy.zeros((16, 16), dtype=numpy.double)
         data_item = ImportExportManager.create_data_item_from_data_element(data_element)
         document_model.append_data_item(data_item)
         self.assertEqual(data_item.dimensional_shape, (16, 16))
         self.assertEqual(data_item.data_dtype, numpy.double)
         data_element["data"] = numpy.zeros((8, 8), dtype=numpy.double)
         data_element["sub_area"] = ((0,0), (4, 8))
         ImportExportManager.update_data_item_from_data_element(data_item, data_element)
         self.assertEqual(data_item.dimensional_shape, (8, 8))
         self.assertEqual(data_item.data_dtype, numpy.double)
         data_element["data"] = numpy.zeros((8, 8), dtype=float)
         data_element["sub_area"] = ((0,0), (4, 8))
         ImportExportManager.update_data_item_from_data_element(data_item, data_element)
         self.assertEqual(data_item.dimensional_shape, (8, 8))
         self.assertEqual(data_item.data_dtype, float)
Ejemplo n.º 24
0
 def test_creating_data_element_with_sequence_and_implicit_datum_size_data_makes_correct_data_item(self):
     data_element = dict()
     data_element["version"] = 1
     data_element["data"] = numpy.zeros((4, 16, 16), dtype=numpy.double)
     data_element["is_sequence"] = True
     with contextlib.closing(ImportExportManager.create_data_item_from_data_element(data_element)) as data_item:
         self.assertEqual(data_item.is_sequence, True)
         self.assertEqual(data_item.collection_dimension_count, 0)
         self.assertEqual(data_item.datum_dimension_count, 2)
         self.assertEqual(data_item.xdata.is_sequence, True)
         self.assertEqual(data_item.xdata.collection_dimension_count, 0)
         self.assertEqual(data_item.xdata.datum_dimension_count, 2)
Ejemplo n.º 25
0
 def test_importing_large_numpy_file_sets_large_format_flag(self):
     current_working_directory = os.getcwd()
     file_path_npy = os.path.join(current_working_directory, "__file.npy")
     numpy.save(file_path_npy, numpy.zeros((4, 4, 4)))
     handler = ImportExportManager.NumPyImportExportHandler(
         "numpy-io-handler", "npy", ["npy"])
     try:
         data_items = handler.read_data_items(None, "npy", file_path_npy)
         self.assertEqual(len(data_items), 1)
         data_item = data_items[0]
         self.assertTrue(data_item.large_format)
     finally:
         os.remove(file_path_npy)
Ejemplo n.º 26
0
 def test_creating_data_element_with_sequence_data_makes_correct_data_item(self):
     data_element = dict()
     data_element["version"] = 1
     data_element["data"] = numpy.zeros((4, 16, 16), dtype=numpy.double)
     data_element["is_sequence"] = True
     data_element["collection_dimension_count"] = 0
     data_element["datum_dimension_count"] = 2
     data_item = ImportExportManager.create_data_item_from_data_element(data_element)
     self.assertEqual(data_item.is_sequence, True)
     self.assertEqual(data_item.collection_dimension_count, 0)
     self.assertEqual(data_item.datum_dimension_count, 2)
     self.assertEqual(data_item.xdata.is_sequence, True)
     self.assertEqual(data_item.xdata.collection_dimension_count, 0)
     self.assertEqual(data_item.xdata.datum_dimension_count, 2)
 def grab_sequence(
     self, count: int, **kwargs
 ) -> typing.Optional[typing.List[DataAndMetadata.DataAndMetadata]]:
     self.start_playing()
     frames = self.acquire_sequence(count)
     if frames is not None:
         xdatas = list()
         for data_element in frames:
             data_element["is_sequence"] = True
             data_element["collection_dimension_count"] = 0
             data_element["datum_dimension_count"] = len(
                 data_element["data"].shape) - 1
             xdata = ImportExportManager.convert_data_element_to_data_and_metadata(
                 data_element)
             xdatas.append(xdata)
         return xdatas
     return None
Ejemplo n.º 28
0
 def test_ndata_write_to_then_read_from_temp_file(self):
     document_model = DocumentModel.DocumentModel()
     with contextlib.closing(document_model):
         current_working_directory = os.getcwd()
         file_path = os.path.join(current_working_directory, "__file.ndata")
         handler = ImportExportManager.NDataImportExportHandler("ndata1-io-handler", "ndata", ["ndata"])
         data_item = DataItem.DataItem(numpy.zeros((16, 16), dtype=numpy.double))
         document_model.append_data_item(data_item)
         display_item = document_model.get_display_item_for_data_item(data_item)
         handler.write_display_item(None, display_item, file_path, "ndata")
         self.assertTrue(os.path.exists(file_path))
         try:
             data_items = handler.read_data_items(None, "ndata", file_path)
             self.assertEqual(len(data_items), 1)
             data_item = data_items[0]
         finally:
             os.remove(file_path)
Ejemplo n.º 29
0
 def test_npy_write_to_then_read_from_temp_file(self):
     document_model = DocumentModel.DocumentModel()
     with contextlib.closing(document_model):
         current_working_directory = os.getcwd()
         file_path_npy = os.path.join(current_working_directory, "__file.npy")
         file_path_json = os.path.join(current_working_directory, "__file.json")
         numpy.save(file_path_npy, numpy.zeros((16, 16)))
         with open(file_path_json, "w") as f:
             json.dump({"version": 1}, f)
         handler = ImportExportManager.NumPyImportExportHandler("numpy-io-handler", "npy", ["npy"])
         try:
             data_items = handler.read_data_items(None, "npy", file_path_npy)
             self.assertEqual(len(data_items), 1)
             data_item = data_items[0]
         finally:
             os.remove(file_path_npy)
             os.remove(file_path_json)
Ejemplo n.º 30
0
 def grab_partial(
         self,
         *,
         update_period: float = 1.0) -> typing.Tuple[bool, bool, int]:
     # updates the full readout data, returns a tuple of is complete, is canceled, and
     # the number of valid rows.
     start = self.__start
     frame_parameters = self.__camera_frame_parameters
     exposure = frame_parameters.exposure_ms / 1000.0
     n = min(max(int(update_period / exposure), 1), self.__count - start)
     is_complete = start + n == self.__count
     # print(f"{start=} {n=} {self.__count=} {is_complete=}")
     data_element = self.__camera_device._acquire_sequence((n))
     if data_element and not self.__aborted:
         xdata = ImportExportManager.convert_data_element_to_data_and_metadata(
             data_element)
         dimensional_calibrations = tuple(
             Calibration.Calibration()
             for _ in range(len(self.__collection_shape))) + tuple(
                 xdata.dimensional_calibrations[1:])
         assert self.__xdata
         self.__xdata._set_intensity_calibration(
             xdata.intensity_calibration)
         self.__xdata._set_dimensional_calibrations(
             dimensional_calibrations)
         if len(self.__collection_shape) > 1:
             row_size = self.__collection_shape[-1]
             start_row = start // row_size
             rows = n // row_size
             metadata = dict(xdata.metadata)
             metadata.setdefault("hardware_source",
                                 dict())["valid_rows"] = start_row + rows
             self.__xdata._set_metadata(metadata)
         else:
             row_size = 1
         # convert from a sequence to a collection.
         assert self.__data is not None
         self.__data.reshape(
             (self.__data.shape[0] * row_size, ) +
             self.__data.shape[len(self.__collection_shape):])[
                 start:start + n, ...] = xdata._data_ex
         self.__start = start + n
         return is_complete, False, start + n
     self.__start = 0
     return True, True, 0