def process_experiment(_experiment, _overwrite=False): _arguments = [(_experiment, int(_series.split('_')[1]), _overwrite) for _series in paths.image_files(paths.serieses(_experiment)) ] _p = Pool(CPUS_TO_USE) _p.starmap(process_series, _arguments) _p.close()
def process_series(_experiment, _series_id, _overwrite=False): _series_image_path = paths.serieses(_experiment, _series_id) _image_properties = load.image_properties(_experiment, _series_id) _series_image = tifffile.imread(_series_image_path) _cells_coordinates = load.cell_coordinates_tracked_series_file_data( _experiment, _series_id) _series_image_by_time_frames = [ np.array([ _z[IMAGE_FIBER_CHANNEL_INDEX] for _z in _series_image[_time_frame] ]) for _time_frame in range(_series_image.shape[0]) ] _tuples = load.experiment_groups_as_tuples(_experiment) _tuples = organize.by_experiment(_tuples)[_experiment] _tuples = filtering.by_real_pairs(_tuples) _tuples = filtering.by_real_fake_pairs(_tuples, _real_fake_pairs=False) _tuples = filtering.by_series_id(_tuples, _series_id) for _tuple in _tuples: _experiment, _series_id, _group = _tuple _cell_1_id, _cell_2_id = [ int(_value) for _value in _group.split('_')[1:] ] process_group( _experiment=_experiment, _series_id=_series_id, _cells_coordinates=_cells_coordinates, _cell_1_id=_cell_1_id, _cell_2_id=_cell_2_id, _series_image_by_time_frames=_series_image_by_time_frames, _resolutions=_image_properties['resolutions'], _image_properties=_image_properties, _overwrite=_overwrite)
def process_fake_following(_experiment, _series_id, _cell_1_id, _cell_2_id, _x_change, _y_change, _z_change=0, _overwrite=False): _series_image_path = paths.serieses(_experiment, _series_id) _image_properties = load.image_properties(_experiment, _series_id) _series_image = tifffile.imread(_series_image_path) _cells_coordinates = load.cell_coordinates_tracked_series_file_data( _experiment, _series_id) _series_image_by_time_frames = [ np.array([ _z[IMAGE_FIBER_CHANNEL_INDEX] for _z in _series_image[_time_frame] ]) for _time_frame in range(_series_image.shape[0]) ] process_group(_experiment=_experiment, _series_id=_series_id, _cells_coordinates=_cells_coordinates, _cell_1_id=_cell_1_id, _cell_2_id=_cell_2_id, _series_image_by_time_frames=_series_image_by_time_frames, _resolutions=_image_properties['resolutions'], _real_cells=False, _x_change=_x_change, _y_change=_y_change, _z_change=_z_change, _overwrite=_overwrite)
def process_fake_static(_experiment, _series_id, _cell_1_id, _cell_2_id, _x1, _y1, _z1, _x2, _y2, _z2, _overwrite=False): _series_image_path = paths.serieses(_experiment, _series_id) _image_properties = load.image_properties(_experiment, _series_id) _series_image = tifffile.imread(_series_image_path) _cells_coordinates = [[ (_x1, _y1, _z1) for _time_frame in range(_series_image.shape[0]) ], [(_x2, _y2, _z2) for _time_frame in range(_series_image.shape[0])]] _series_image_by_time_frames = [ np.array([ _z[IMAGE_FIBER_CHANNEL_INDEX] for _z in _series_image[_time_frame] ]) for _time_frame in range(_series_image.shape[0]) ] process_group(_experiment=_experiment, _series_id=_series_id, _cells_coordinates=_cells_coordinates, _cell_1_id=0, _cell_2_id=1, _series_image_by_time_frames=_series_image_by_time_frames, _resolutions=_image_properties['resolutions'], _real_cells=False, _fake_cell_1_id=_cell_1_id, _fake_cell_2_id=_cell_2_id, _overwrite=_overwrite)
def process_series(_experiment, _series_id, _overwrite=False): _experiment_path = paths.image_properties(_experiment) _properties_path = os.path.join(_experiment_path, 'series_' + str(_series_id) + '.json') if not _overwrite and os.path.isfile(_properties_path): return print('Creating image properties for:', _experiment, 'Series ' + str(_series_id), sep='\t') _series_image = load.series_image(_experiment, _series_id) _path = paths.serieses(_experiment, _series_id) with Image.open(_path) as _img: _meta_dict = {TAGS[_key]: _img.tag[_key] for _key in _img.tag} _properties = { 'experiment': _experiment, 'series': int(_series_id), 'dimensions': { 'width': _meta_dict['ImageWidth'][0], 'height': _meta_dict['ImageLength'][0] }, 'resolutions': { 'x': _meta_dict['XResolution'][0][1] / _meta_dict['XResolution'][0][0], 'y': _meta_dict['YResolution'][0][1] / _meta_dict['YResolution'][0][0], 'z': float( str(_meta_dict['ImageDescription'][0].split('spacing=') [1]).split()[0]) }, 'slices': int( str(_meta_dict['ImageDescription'][0].split('slices=') [1]).split()[0]), 'frames': int( str(_meta_dict['ImageDescription'][0].split('frames=') [1]).split()[0]), 'frames_interval': float( str(_meta_dict['ImageDescription'][0].split('finterval=') [1]).split()[0]), 'time_frames': [{ 'mean': np.mean(_time_frame_image), 'std': np.std(_time_frame_image) } for _time_frame_image in _series_image] # TODO: add location position X, Y & Z } save.image_properties(_experiment, _series_id, _properties)
def series_image(_experiment, _series_id, _fiber_channel=True): _series_image_path = paths.serieses(_experiment, _series_id) _series_image = tifffile.imread(_series_image_path) if _fiber_channel: return np.array([ np.array([ _z[IMAGE_FIBER_CHANNEL_INDEX] for _z in _series_image[_time_frame] ]) for _time_frame in range(_series_image.shape[0]) ]) else: return _series_image
def main(): _experiment = 'SN41' _series_id = 3 _group = 'static_0_1' _x1, _y1, _z1 = 23, 226, 10 _x2, _y2, _z2 = 228, 226, 10 _time_frame = 34 _series_image_path = paths.serieses(_experiment, _series_id) _image_properties = load.image_properties(_experiment, _series_id) _series_image = tifffile.imread(_series_image_path) _series_image_by_time_frames = [ np.array([ _z[IMAGE_FIBER_CHANNEL_INDEX] for _z in _series_image[_time_frame] ]) for _time_frame in range(_series_image.shape[0]) ] plt.imshow(_series_image_by_time_frames[_time_frame][30]) plt.show()
def process_experiment(_experiment, _overwrite=False): _arguments = [] for _series in paths.image_files(paths.serieses(_experiment)): _series_id = int(_series.split('_')[1]) _image_properties = load.image_properties(_experiment, _series_id) _cells_coordinates = load.cell_coordinates_tracked_series_file_data( _experiment, _series_id) for _cell_id in range(len(_cells_coordinates)): for _degrees_xy, _degrees_z in product(DEGREES_XY, DEGREES_Z): _arguments.append({ 'experiment': _experiment, 'series_id': _series_id, 'cell_id': _cell_id, 'degrees_xy': _degrees_xy, 'degrees_z': _degrees_z, 'cell_coordinates': _cells_coordinates, 'cell_type': 'real', 'resolutions': _image_properties['resolutions'], 'overwrite': _overwrite }) with Pool(CPUS_TO_USE) as _p: for _ in tqdm(_p.imap_unordered(process_group, _arguments), total=len(_arguments), desc='Creating'): pass _p.close() _p.join()
def process_group(_arguments): _time_frames_data = [] _time_frames_amount = \ len([_value for _value in _arguments['cell_coordinates'][_arguments['cell_id']] if _value is not None]) # smooth coordinates _cells_coordinates_cell_smoothed = compute.smooth_coordinates_in_time( [ _value for _value in _arguments['cell_coordinates'][_arguments['cell_id']] if _value is not None ], _n=SMOOTH_AMOUNT) if _arguments['cell_type'] == 'real': _group = 'cell_' + str(_arguments['cell_id']) + '_' + str(_arguments['degrees_xy']) + '_' + \ str(_arguments['degrees_z']) elif _arguments['cell_type'] == 'fake': _group = 'fake_' + str(_arguments['cell_id']) + '_' + str(_arguments['degrees_xy']) + '_' + \ str(_arguments['degrees_z']) elif _arguments['cell_type'] == 'static': _group = 'static_' + str(_arguments['cell_id']) + '_' + str(_arguments['degrees_xy']) + '_' + \ str(_arguments['degrees_z']) else: raise Exception('No such cell type') # check if needed (missing time-point / properties file) if not _arguments['overwrite']: _missing = False for _time_frame in range(_time_frames_amount): _time_frame_pickle_path = \ paths.structured(_arguments['experiment'], _arguments['series_id'], _group, _time_frame) if not os.path.isfile(_time_frame_pickle_path): _missing = True break _group_structured_path = paths.structured(_arguments['experiment'], _arguments['series_id'], _group) _properties_json_path = os.path.join(_group_structured_path, 'properties.json') if not os.path.isfile(_properties_json_path): _missing = True if not _missing: return # load image if needed if 'series_image_by_time_frames' not in _arguments: _series_image_path = \ paths.serieses(_arguments['experiment'], _arguments['series_id']) _series_image = tifffile.imread(_series_image_path) _arguments['series_image_by_time_frames'] = [ np.array([ _z[IMAGE_FIBER_CHANNEL_INDEX] for _z in _series_image[_time_frame] ]) for _time_frame in range(_series_image.shape[0]) ] # running for each time point for _time_frame in range(_time_frames_amount): _time_frame_image = _arguments['series_image_by_time_frames'][ _time_frame] _cell_coordinates = [ _value for _value in _cells_coordinates_cell_smoothed[_time_frame] ] # update coordinates if needed if 'x_change' in _arguments: _cell_coordinates[0] += _arguments['x_change'] if 'y_change' in _arguments: _cell_coordinates[1] += _arguments['y_change'] if 'z_change' in _arguments: _cell_coordinates[2] += _arguments['z_change'] # compute padding xy _padding_x, _padding_y = \ compute.axes_padding(_2d_image_shape=_time_frame_image[0].shape, _angle=_arguments['degrees_xy']) _cell_coordinates[0] += _padding_x _cell_coordinates[1] += _padding_y # rotate image and change axes _time_frame_image_rotated = np.array( [rotate(_z, _arguments['degrees_xy']) for _z in _time_frame_image]) _time_frame_image_swapped = np.swapaxes(_time_frame_image_rotated, 0, 1) if SHOW_PLOTS: plt.imshow(_time_frame_image_rotated[int( round(_cell_coordinates[2]))]) plt.show() # update coordinates _image_center = compute.image_center_coordinates( _image_shape=reversed(_time_frame_image_rotated[0].shape)) _cell_coordinates = compute.rotate_point_around_another_point( _point=_cell_coordinates, _angle_in_radians=math.radians(_arguments['degrees_xy']), _around_point=_image_center) # y is now z, z is now y _cell_coordinates[1], _cell_coordinates[2] = _cell_coordinates[ 2], _cell_coordinates[1] if SHOW_PLOTS: plt.imshow(_time_frame_image_swapped[int( round(_cell_coordinates[2]))]) plt.show() # swap resolutions _new_resolutions = { 'x': _arguments['resolutions']['x'], 'y': _arguments['resolutions']['z'], 'z': _arguments['resolutions']['y'] } # second rotate, compute padding z _padding_x, _padding_y = \ compute.axes_padding(_2d_image_shape=_time_frame_image_swapped[0].shape, _angle=_arguments['degrees_z']) _cell_coordinates[0] += _padding_x _cell_coordinates[1] += _padding_y # rotate image _time_frame_image_swapped_rotated = \ np.array([rotate(_z, _arguments['degrees_z']) for _z in _time_frame_image_swapped]) # update coordinates _image_center = compute.image_center_coordinates( _image_shape=reversed(_time_frame_image_swapped_rotated[0].shape)) _cell_coordinates = compute.rotate_point_around_another_point( _point=_cell_coordinates, _angle_in_radians=math.radians(_arguments['degrees_z']), _around_point=_image_center) if SHOW_PLOTS: if _time_frame == 0 or _time_frame == 50 or _time_frame == 150: plt.imshow(_time_frame_image_swapped_rotated[int( round(_cell_coordinates[2]))]) plt.show() # update resolutions _angle = abs(_arguments['degrees_z']) _new_resolution_x = (_angle / 90) * _new_resolutions['y'] + ( (90 - _angle) / 90) * _new_resolutions['x'] _new_resolution_y = (_angle / 90) * _new_resolutions['x'] + ( (90 - _angle) / 90) * _new_resolutions['y'] _new_resolutions['x'] = _new_resolution_x _new_resolutions['y'] = _new_resolution_y _image_z, _image_y, _image_x = _time_frame_image_swapped_rotated.shape if not 0 <= _cell_coordinates[0] < _image_x or not \ 0 <= _cell_coordinates[1] < _image_y or not \ 0 <= _cell_coordinates[2] < _image_z: break # add to array _time_frames_data.append({ 'cell': { 'coordinates': { 'x': _cell_coordinates[0], 'y': _cell_coordinates[1], 'z': _cell_coordinates[2] } }, 'resolutions': _new_resolutions }) # save to pickle _time_frame_pickle_path = \ paths.structured(_arguments['experiment'], _arguments['series_id'], _group, _time_frame) save_lib.to_pickle(_time_frame_image_swapped_rotated, _time_frame_pickle_path) # save properties if _arguments['cell_type'] == 'real': _fake = False _static = False elif _arguments['cell_type'] == 'fake': _based_on_properties = \ load.group_properties(_arguments['experiment'], _arguments['series_id'], 'cell_' + _group.split('fake_')[1]) _fake = True _static = False elif _arguments['cell_type'] == 'static': _fake = True _static = True else: raise Exception('No such cell type') _properties_data = { 'experiment': _arguments['experiment'], 'series_id': _arguments['series_id'], 'cell_id': _arguments['cell_id'], 'time_points': _time_frames_data, 'fake': _fake, 'static': _static } _group_structured_path = paths.structured(_arguments['experiment'], _arguments['series_id'], _group) _properties_json_path = os.path.join(_group_structured_path, 'properties.json') save_lib.to_json(_properties_data, _properties_json_path)
def process_experiment(_experiment, _overwrite=False): for _series in paths.image_files(paths.serieses(_experiment)): process_series(_experiment, int(_series.split('_')[1]), _overwrite)