def test_read_chunk_fail_bad_header(self): with ArtificialND2(self.test_file) as artificial: fh = artificial.file_handle chunk_location = artificial.locations['image_attributes'][0] with self.assertRaises(ValueError) as context: read_chunk(fh, chunk_location + 1) self.assertEquals(str(context.exception), "The ND2 file seems to be corrupted.")
def roi_metadata(self): """Contains information about the defined ROIs: shape, position and type (reference/background/stimulation). Returns: dict: ROI metadata dictionary """ return read_metadata(read_chunk(self._fh, self._label_map.roi_metadata), 1)
def image_attributes(self): """Image attributes Returns: dict: containing the image attributes """ return read_metadata(read_chunk(self._fh, self._label_map.image_attributes), 1)
def image_calibration(self): """The amount of pixels per micron. Returns: dict: pixels per micron """ return read_metadata(read_chunk(self._fh, self._label_map.image_calibration), 1)
def app_info(self): """NIS elements application info Returns: dict: (Version) information of the NIS Elements application """ return xmltodict.parse(read_chunk(self._fh, self._label_map.app_info))
def lut_data(self): """LUT information Returns: dict: LUT information """ return xmltodict.parse(read_chunk(self._fh, self._label_map.lut_data))
def custom_data(self): """Custom user data Returns: dict: custom user data """ return xmltodict.parse(read_chunk(self._fh, self._label_map.custom_data))
def grabber_settings(self): """Grabber settings Returns: dict: Acquisition settings """ return xmltodict.parse(read_chunk(self._fh, self._label_map.grabber_settings))
def image_metadata_sequence(self): """Image metadata of the sequence Returns: dict: containing the metadata """ return read_metadata(read_chunk(self._fh, self._label_map.image_metadata_sequence), 1)
def image_text_info(self): """Textual image information Returns: dict: containing the textual image info """ return read_metadata(read_chunk(self._fh, self._label_map.image_text_info), 1)
def image_metadata(self): """Image metadata Returns: dict: Extra image metadata """ if self._label_map.image_metadata: return read_metadata(read_chunk(self._fh, self._label_map.image_metadata), 1)
def _get_raw_image_data(self, image_group_number, channel_offset, height, width): """Reads the raw bytes and the timestamp of an image. Args: image_group_number: the image group number (see _calculate_image_group_number) channel_offset: the number of the color channel height: the height of the image width: the width of the image Returns: """ chunk = self._label_map.get_image_data_location(image_group_number) data = read_chunk(self._fh, chunk) # All images in the same image group share the same timestamp! So if you have complicated image data, # your timestamps may not be entirely accurate. Practically speaking though, they'll only be off by a few # seconds unless you're doing something super weird. timestamp = struct.unpack("d", data[:8])[0] image_group_data = array.array("H", data) image_data_start = 4 + channel_offset # The images for the various channels are interleaved within the same array. For example, the second image # of a four image group will be composed of bytes 2, 6, 10, etc. If you understand why someone would design # a data structure that way, please send the author of this library a message. number_of_true_channels = int( len(image_group_data[4:]) / (height * width)) try: image_data = np.reshape( image_group_data[image_data_start::number_of_true_channels], (height, width)) except ValueError: image_data = np.reshape( image_group_data[image_data_start::number_of_true_channels], (height, int( round( len(image_group_data[ image_data_start::number_of_true_channels]) / height)))) # Skip images that are all zeros! This is important, since NIS Elements creates blank "gap" images if you # don't have the same number of images each cycle. We discovered this because we only took GFP images every # other cycle to reduce phototoxicity, but NIS Elements still allocated memory as if we were going to take # them every cycle. if np.any(image_data): return timestamp, image_data # If a blank "gap" image is encountered, generate an array of corresponding height and width to avoid # errors with ND2-files with missing frames. Array is filled with nan to reflect that data is missing. else: empty_frame = np.full((height, width), np.nan) warnings.warn( 'ND2 file contains gap frames which are represented by np.nan-filled arrays; to convert to zeros use e.g. np.nan_to_num(array)' ) return timestamp, image_data
def _parse_events(self): """Extract events """ # list of event names manually extracted from an ND2 file that contains all manually # insertable events from NIS-Elements software (4.60.00 (Build 1171) Patch 02) event_names = { 1: 'Autofocus', 7: 'Command Executed', 9: 'Experiment Paused', 10: 'Experiment Resumed', 11: 'Experiment Stopped by User', 13: 'Next Phase Moved by User', 14: 'Experiment Paused for Refocusing', 16: 'External Stimulation', 33: 'User 1', 34: 'User 2', 35: 'User 3', 36: 'User 4', 37: 'User 5', 38: 'User 6', 39: 'User 7', 40: 'User 8', 44: 'No Acquisition Phase Start', 45: 'No Acquisition Phase End', 46: 'Hardware Error', 47: 'N-STORM', 48: 'Incubation Info', 49: 'Incubation Error' } self._metadata_parsed['events'] = [] events = read_metadata( read_chunk(self._fh, self._label_map.image_events), 1) if events is None or six.b('RLxExperimentRecord') not in events: return events = events[six.b('RLxExperimentRecord')][six.b('pEvents')] if len(events) == 0: return for event in events[six.b('')]: event_info = { 'index': event[six.b('I')], 'time': event[six.b('T')], 'type': event[six.b('M')], } if event_info['type'] in event_names.keys(): event_info['name'] = event_names[event_info['type']] self._metadata_parsed['events'].append(event_info)
def _get_raw_image_data(self, image_group_number, channel_offset, height, width, memmap=False): """Reads the raw bytes and the timestamp of an image. Args: image_group_number: the image group number (see _calculate_image_group_number) channel_offset: the number of the color channel height: the height of the image width: the width of the image Returns: """ chunk = self._label_map.get_image_data_location(image_group_number) data = read_chunk(self._fh, chunk, memmap=memmap) # All images in the same image group share the same timestamp! So if you have complicated image data, # your timestamps may not be entirely accurate. Practically speaking though, they'll only be off by a few # seconds unless you're doing something super weird. timestamp = struct.unpack("d", data[:8])[0] if memmap: image_group_data = data[8:].view(np.uint16) else: image_group_data = array.array("H", data[8:]) # The images for the various channels are interleaved within the same array. For example, the second image # of a four image group will be composed of bytes 2, 6, 10, etc. If you understand why someone would design # a data structure that way, please send the author of this library a message. number_of_true_channels = int(len(image_group_data) / (height * width)) image_data = np.reshape( image_group_data[channel_offset::number_of_true_channels], (height, width)) # Skip images that are all zeros! This is important, since NIS Elements creates blank "gap" images if you # don't have the same number of images each cycle. We discovered this because we only took GFP images every # other cycle to reduce phototoxicity, but NIS Elements still allocated memory as if we were going to take # them every cycle. return timestamp, image_data
def test_read_chunk(self): with ArtificialND2(self.test_file) as artificial: fh = artificial.file_handle chunk_location = artificial.locations['image_attributes'][0] chunk_read = read_chunk(fh, chunk_location) real_data = six.BytesIO(artificial.raw_text) real_data.seek(chunk_location) # The chunk metadata is always 16 bytes long chunk_metadata = real_data.read(16) header, relative_offset, data_length = struct.unpack("IIQ", chunk_metadata) self.assertEquals(header, 0xabeceda) # We start at the location of the chunk metadata, skip over the metadata, and then proceed to the # start of the actual data field, which is at some arbitrary place after the metadata. real_data.seek(chunk_location + 16 + relative_offset) real_chunk = real_data.read(data_length) self.assertEqual(real_chunk, chunk_read)