def right_id(self, value): if value is not None: try: assert value > 0 and isinstance(value, int) except AssertionError: print_date("Invalid value: {}".format(value)) self._right_id = value
def as_omero_rois(self, orientation, image, args): """Convert an ROISegmentation object to a set of OMERO ROIs""" from ..omero.handlers import OMEROROIList if args.verbose: print_date("Creating iterator of OMERO ROIs for %s..." % orientation, newline=False) omero_rois = OMEROROIList(self, orientation, image, args) if args.verbose: print_date("OK", incl_date=False) return omero_rois
def colour(self): """ :TODO: currently only handles only RGBA colours """ colour = list(self._sff_segment.colour.value) if colour[0] is not None and colour[1] is not None and colour[ 2] is not None and colour[3] is not None: colour[3] = self._vtk_args.transparency return colour else: colour = random(), random(), random(), 1 print_date("Warning: random colour {} for segment {}".format( tuple(map(lambda x: round(x, 4), colour)), self._sff_segment.id)) return colour
def export(self, fn, args, configs, *_args, **_kwargs): """Export ROIs as a file The file extension determines the file format: * ``.roi`` outputs an XML file according to ``roi.xsd``, which ships with ``sfftk-plus`` * ``.json`` outputs one JSON file for each orientation slice. For example, if there are 20 slices in the x-direction, 30 slices in the y-direction and 40 slices in the z-direction then there will be 90 JSON files in all :param fn: the output file name; the extension is important :param args: positional arguments to be passed on :param _kwargs: keyword arguments to be passed on :return: None """ import re # create the path if it doesn't exist path = os.path.dirname(os.path.abspath(fn)) if not os.path.exists(path): if args.verbose: print_date( "Path not found: {}. It will be created".format(path)) os.makedirs(path, mode=0o0755) else: if args.verbose: print_date("Path found: {}".format(path)) if re.match(r".*\.roi$", fn, re.IGNORECASE): with open(fn, 'w') as f: self.roi_seg.set_image_ids(self.header.convert()) version = _kwargs.get( 'version') if 'version' in _kwargs else "1.0" encoding = _kwargs.get( 'encoding') if 'encoding' in _kwargs else "UTF-8" f.write('<?xml version="{}" encoding="{}"?>\n'.format( version, encoding)) self.roi_seg.export(f, 0) exit_status = os.EX_OK elif re.match(r".*\.json$", fn, re.IGNORECASE): # in the case of outputing JSON the provided filename is not the actual filename into which data will be # written; rather, the filename conveys: i) the filename base; ii) the output format # the fn_root var is constructed from the fn argument fn_base = os.path.basename(fn) fn_root = '.'.join(fn_base.split('.')[:-1]) + '-{}.json' return self._export_json(path, fn_root, args, configs, *_args, **_kwargs)
def read(self): super(SFFPConfigs, self).read() # CONNECT_WITH must be defined in sffp.conf connect_with_values = ['LOCAL', 'REMOTE'] try: assert 'CONNECT_WITH' in self # CONNECT_WITH can only have specified values try: assert self['CONNECT_WITH'] in connect_with_values except AssertionError: print_date( "Invalid value for var CONNECT_WITH: {}; must be one of: {}" .format(self['CONNECT_WITH'], ", ".join(connect_with_values))) except AssertionError: print_date( "CONNECT_WITH not found in current configs (with value: {}". format(", ".join(connect_with_values)))
def export(self, fn, args, configs): json_data = dict() json_data['segments'] = list() center_point = self.center_point for segment in self.segments: segment_data = dict() segment_data['id'] = segment.id segment_data['colour'] = list(map(float, segment.colour)) segment_data['meshes'] = list() for j, mesh in enumerate(segment.meshes): if args.center: # center x, y, z = center_point to = (-x, -y, -z) mesh2 = mesh.translate(to) else: mesh2 = mesh.vtk_obj # decimate decimateMesh = vtk.vtkDecimatePro() decimateMesh.SetInputData(mesh2) decimateMesh.SetTargetReduction(0.9) decimateMesh.PreserveTopologyOn() decimateMesh.Update() decimateMeshOutput = decimateMesh.GetOutput() writer = vtk.vtkXMLPolyDataWriter() out_fn = os.path.join( args.output_path, '{}_{}_m{}.vtp'.format(fn, segment.id, j)) writer.SetFileName(out_fn) print_date("Exporting segment to {}".format(out_fn)) writer.SetInputData(decimateMeshOutput) writer.SetDataModeToBinary() writer.SetHeaderTypeToUInt64() writer.SetCompressorTypeToZLib() writer.Write() segment_data['meshes'].append(os.path.basename(out_fn)) json_data['segments'].append(segment_data) json_data['segment_count'] = len(self.segments) json_f = os.path.join(args.output_path, '{}_vtp_segments.json'.format(fn)) with open(json_f, 'w') as f: json.dump(json_data, f, indent=4, sort_keys=True) if args.verbose: print_date("Exported metadata to {}".format(json_f))
def reset_ids(self, args, configs, *args_, **kwargs_): try: if args.image_name_root is not None: cw = configs['CONNECT_WITH'] # either LOCAL or REMOTE # server settings conn_str = "dbname='{}' user='******' password='******' host='{}' port='{}'".format( configs['IMAGE_DB_{}_NAME'.format(cw)], configs['IMAGE_DB_{}_USER'.format(cw)], configs['IMAGE_DB_{}_PASS'.format(cw)], configs['IMAGE_DB_{}_HOST'.format(cw)], configs['IMAGE_DB_{}_PORT'.format(cw)], ) conn = psycopg2.connect(conn_str) cur = conn.cursor() self.top_id = get_image_id(cur, args.image_name_root, 'top', quick_pick=args.quick_pick) self.front_id = get_image_id(cur, args.image_name_root, 'front', quick_pick=args.quick_pick) self.right_id = get_image_id(cur, args.image_name_root, 'right', quick_pick=args.quick_pick) # sanity check assert self.top_id != self.front_id and self.right_id != self.front_id elif args.top_front_right is not None: self.top_id, self.front_id, self.right_id = args.top_front_right else: print_date( "Neither -I/--image-name-root nor --top-front-right arguments not set. Image IDs will be excluded." ) self.top_id = None self.front_id = None self.right_id = None except AssertionError: print_date( "Invalid image IDs or image IDs not found. Did you use -I/--image-name-root option?" ) self.top_id = None self.front_id = None self.right_id = None
def handle_roi_del(args, configs): """ Handle `delroi` subcommand :param args: parsed arguments :type args: ``argparse.Namespace`` :param configs: configurations object :type config: :py:class:`sfftk.core.configs.Configs` :return int status: status """ from .omero.handlers import OMEROConnection with OMEROConnection(args, configs) as connection: if args.roi_id: if args.verbose: print_date("Deleting ROI %s" % args.roi_id) connection.deleteRoi(args.roi_id) elif args.image_id: rois = connection.rois(args.image_id) roi_count = len(rois) for roi in rois: if args.verbose: print_static("Deleting ROI %s" % roi.getId().getValue()) connection.deleteRoi(roi.getId().getValue()) print_static("\n", incl_date=False) print_date("Deleted {} ROIs".format(roi_count)) else: print_date( "Please specify an ROI ID. Search using 'sffp list --rois [--image-id <image_id>]'" ) return os.EX_OK
def handle_view(args, configs): """ Handle `view` subcommand :param args: parsed arguments :type args: ``argparse.Namespace`` :param configs: configurations object :type config: :py:class:`sfftk.core.configs.Configs` :return int status: status """ if args.visualise: # visualise if re.match(r'.*\.(sff|hff|json|xml|h5|hdf5)$', args.from_file, re.IGNORECASE): seg = schema.SFFSegmentation.from_file(args.from_file) from .formats.vtkmesh import VTKSegmentation vtk_seg = VTKSegmentation(seg, args, configs) vtk_seg.render() return os.EX_OK else: print_date("Unsupported file type: {}".format(args.from_file)) return os.EX_DATAERR else: return sff.handle_view(args, configs)
def handle_export(args, configs): """ Handle `export` subcommand :param args: parsed arguments :type args: ``argparse.Namespace`` :param configs: configurations object :type config: :py:class:`sfftk.core.configs.Configs` :return int status: status """ if args.verbose: print_date("Converting segments in {} to VTP files".format( args.sff_file)) from . import schema if re.match(r'.*\.(sff|hff|json|xml|h5|hdf5)$', args.sff_file, re.IGNORECASE): sff_seg = schema.SFFPSegmentation.from_file(args.sff_file) else: print_date("Unsupported file type: {}".format(args.sff_file)) return 1 vtk_seg = sff_seg.as_vtk(args, configs) out_fn = os.path.basename(".".join(args.sff_file.split('.')[:-1])) vtk_seg.export(out_fn, args, configs) return os.EX_OK
def render(self): """Render to display""" # define the renderer ren = vtk.vtkOpenGLRenderer() ren.SetBackground(*self._vtk_args.background_colour) # populate the renderer with the meshes for segment in self.segments: ren = segment.render(ren) # render window renWin = vtk.vtkRenderWindow() renWin.AddRenderer(ren) if self._vtk_args.full_screen: renWin.FullScreenOn() # render window interactor iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renWin) iren.SetInteractorStyle(vtk.vtkInteractorStyleTrackballCamera()) # from vtkCubeAxesActor.h # define VTK_FLY_OUTER_EDGES 0 # define VTK_FLY_CLOSEST_TRIAD 1 # define VTK_FLY_FURTHEST_TRIAD 2 # define VTK_FLY_STATIC_TRIAD 3 # define VTK_FLY_STATIC_EDGES 4 if self._vtk_args.cube_axes is not None: cubeAxesActor = vtk.vtkCubeAxesActor() cubeAxesActor.SetBounds(ren.ComputeVisiblePropBounds()) cubeAxesActor.SetCamera(ren.GetActiveCamera()) cubeAxesActor.SetFlyMode(self._vtk_args.cube_axes) cubeAxesActor.SetFlyModeToStaticEdges( ) # how the cube axes will appear cubeAxesActor.GetTitleTextProperty(0).SetColor(1.0, 1.0, 1.0) cubeAxesActor.GetTitleTextProperty(1).SetColor(1.0, 1.0, 1.0) cubeAxesActor.GetTitleTextProperty(2).SetColor(1.0, 1.0, 1.0) cubeAxesActor.XAxisMinorTickVisibilityOff() cubeAxesActor.YAxisMinorTickVisibilityOff() cubeAxesActor.ZAxisMinorTickVisibilityOff() ren.AddActor(cubeAxesActor) # _actor_count += 1 # assert ren.VisibleActorCount() == _actor_count # axes: display axes by default if not self._vtk_args.no_orientation_axes: axesActor = vtk.vtkAxesActor() axesWidget = vtk.vtkOrientationMarkerWidget() axesWidget.SetOrientationMarker(axesActor) axesWidget.SetViewport(0, 0, 0.1, 0.1) axesWidget.SetInteractor(iren) axesWidget.SetEnabled(1) ren.ResetCamera() # hello... print_date("Initialising...") iren.Initialize() print_date("Launching VTK viewer...") iren.Start() print_date("3D view completed.")
def parse_args(_args, use_shlex=False): """ Parse and check command-line arguments Subcommand handlers defined in __main__.py (e.g. handle_conver(...)) should not have to check arguments for consistency :param str _args: command string :return: parsed arguments :rtype: `argparse.Namespace` :return: config dict-like object :rtype: ``sfftk.core.configs.Configs`` """ # use shlex if use_shlex: try: assert isinstance(_args, str) except AssertionError: return os.EX_USAGE, None import shlex _args = shlex.split(_args) if len(_args) > 0: if _args[0] in ['convert', 'notes', 'prep', 'config']: return parser.parse_args(_args) """ :TODO: handle credentials in configs here instead of sffplus.py """ # if we have no subcommands then show the available tools if len(_args) == 0: parser.Parser.print_help() return os.EX_OK, None # if we only have a subcommand then show that subcommand's help elif len(_args) == 1: if _args[0] == '-V' or _args[0] == '--version': from .. import SFFTKPLUS_VERSION print_date("sfftk-plus version: {}".format(SFFTKPLUS_VERSION)) return os.EX_OK, None elif _args[0] in parser.Parser._actions[2].choices.keys(): try: exec('parser.{}_parser.print_help()'.format(_args[0])) except AttributeError: exec('{}_parser.print_help()'.format(_args[0])) return os.EX_OK, None elif len(_args) == 2: if _args[0] == 'roi': if _args[1] == 'create': if _args[1] in _dict_iter_keys( parser.Parser._actions[2].choices['roi']._actions[1]. choices): exec('{}_roi_parser.print_help()'.format(_args[1])) return os.EX_OK, None elif _args[1] == 'attach': if _args[1] in _dict_iter_keys( parser.Parser._actions[2].choices['roi']._actions[1]. choices): exec('{}_roi_parser.print_help()'.format(_args[1])) return os.EX_OK, None elif _args[0] == 'del': if _args[1] in _dict_iter_keys( parser.Parser._actions[2].choices['roi']._actions[1]. choices): exec('{}_roi_parser.print_help()'.format(_args[1])) return os.EX_OK, None elif _args[0] == 'notes': if _args[1] in _dict_iter_keys( parser.Parser._actions[2].choices['notes']._actions[1]. choices): exec('parser.{}_notes_parser.print_help()'.format(_args[1])) return os.EX_OK, None elif _args[0] == 'prep': if _args[1] in _dict_iter_keys( parser.Parser._actions[2].choices['prep']._actions[1]. choices): exec('parser.{}_prep_parser.print_help()'.format(_args[1])) return os.EX_OK, None elif _args[0] == 'config': if _args[1] in _dict_iter_keys( parser.Parser._actions[2].choices['config']._actions[1]. choices): exec('parser.{}_config_parser.print_help()'.format(_args[1])) return os.EX_OK, None # parse args args = parser.Parser.parse_args(_args) from sfftk.core.configs import get_config_file_path, load_configs, Configs config_file_path = get_config_file_path(args, user_conf_fn='sff.conf', user_folder='~/.sfftk', config_class=Configs) configs = load_configs(config_file_path, config_class=Configs) # config if args.subcommand == 'config': if args.verbose: print_date("Reading configs from {}...".format(config_file_path)) # handle config-specific argument modifications here if args.config_subcommand == 'del': if args.name not in configs: print_date("Missing config with name '{}'. Aborting...".format( args.name)) return os.EX_USAGE, configs # if force pass if not args.force: default_choice = 'n' # get user choice user_choice = input( "Are you sure you want to delete config '{}' [y/N]? ". format(args.name)).lower() if user_choice == '': choice = default_choice elif user_choice == 'n' or user_choice == 'N': choice = 'n' elif user_choice == 'y' or user_choice == 'Y': choice = 'y' else: print_date("Invalid choice: '{}'") return os.EX_DATAERR, configs # act on user choice if choice == 'n': print_date( "You have opted to cancel deletion of '{}'".format( args.name)) return os.EX_OK, configs elif choice == 'y': pass elif args.config_subcommand == 'set': if args.name in configs: # if force pass if not args.force: default_choice = 'n' # get user choice user_choice = input( "Are you sure you want to overwrite config '{}={}' [y/N]? " .format(args.name, configs[args.name])).lower() if user_choice == '': choice = default_choice elif user_choice == 'n' or user_choice == 'N': choice = 'n' elif user_choice == 'y' or user_choice == 'Y': choice = 'y' else: print_date("Invalid choice: '{}'") return os.EX_DATAERR, configs # act on user choice if choice == 'n': print_date( "You have opted to cancel overwriting of '{}'". format(args.name)) return os.EX_OK, configs elif choice == 'y': pass elif args.subcommand == 'list': # enforce local if specified if args.local: configs['CONNECT_WITH'] = 'LOCAL' elif args.remote: configs['CONNECT_WITH'] = 'REMOTE' # roi elif args.subcommand == 'roi': # create if args.roi_subcommand == 'create': # make the output file name and check if it exists if args.output is None: ofn = '.'.join(args.sff_file.split('.')[:-1]) + '.{}'.format( args.format) if os.path.exists(ofn) and not args.overwrite: print_date( "Output file exists. Use --overwrite to replace it.") return os.EX_USAGE, configs else: print_date("Using output file {}".format(ofn)) args.output = ofn # ensure valid primary_descriptor if args.primary_descriptor: try: assert args.primary_descriptor in [ 'three_d_volume', 'mesh_list', 'shape_primitive_list' ] except AssertionError: print_date('Invalid value for primary_descriptor: %s' % args.primary_descriptor) return os.EX_DATAERR, configs # ensure valid transparencey if args.transparency: try: assert 0 <= args.transparency <= 1 except AssertionError: print_date( "Invalid value for transparency: {}; should be between 0 and 1 (inclusive)" .format(args.transparency)) return os.EX_DATAERR, configs # ensure mask value is an integer (or long) if args.mask_value: try: assert isinstance( args.mask_value, int) # or isinstance(args.mask_value, long) except AssertionError: print_date("Non-integer for mask value") return os.EX_DATAERR, configs # quick pick values are 1-based (not 0-based) # if args.quick_pick is not None: if args.quick_pick <= 0: print_date( "Invalid value for --quick-pick. Should be 1-based value of item in list e.g. the value of " "'a' in ['a', 'b'] is 1 (one).") return os.EX_DATAERR, configs else: args.quick_pick -= 1 # make it a 0-based index for internal use # if we don't have --top-front-right set # then we can choose a default for -I/--image-name-root if not args.top_front_right: if args.image_name_root is None: image_name_root = os.path.basename('.'.join( args.sff_file.split('.')[:-1])) args.image_name_root = image_name_root print_date("Setting image name root to {}".format( image_name_root)) # attach elif args.subcommand == 'attach': # enforce local if specified if args.local: configs['CONNECT_WITH'] = 'LOCAL' elif args.remote: configs['CONNECT_WITH'] = 'REMOTE' # del elif args.subcommand == 'del': # enforce local if specified if args.local: configs['CONNECT_WITH'] = 'LOCAL' elif args.remote: configs['CONNECT_WITH'] = 'REMOTE' # ensure that we have either an image or ROI ID if not args.image_id and not args.roi_id: raise ValueError('Missing either image (-i) or ROI (-r) ID') # ensure that both image and ROI ID are not set simultaneously if args.image_id and args.roi_id: raise ValueError( 'Only set one of image (-i) or ROI (-r) ID; not both') # tests elif args.subcommand == 'tests': # normalise tool list # if 'all' is specified together with others then it should simply be 'all' if 'all' in args.tool: args.tool = ['all'] for tool in args.tool: try: assert tool in parser.tool_list except AssertionError: print_date( "Unknown tool: {}; Available tools for test: {}".format( tool, ", ".join(parser.tool_list))) return os.EX_USAGE, configs if args.verbosity: try: assert args.verbosity in range(4) except: print_date("Verbosity should be in {}-{}: {} given".format( verbosity_range[0], verbosity_range[-1], args.verbosity)) return os.EX_USAGE, configs # if args.verbosity: # try: # assert args.verbosity in range(4) # except: # raise ValueError("Verbosity should be in %s-%s: %s given" % ( # verbosity_range[0], verbosity_range[-1], args.verbosity)) # view3d elif args.subcommand == 'view': if args.primary_descriptor: try: assert args.primary_descriptor in [ 'three_d_volume', 'mesh_list', 'shape_primitive_list' ] except: raise ValueError('Invalid value for primary_descriptor: %s' % args.primary_descriptor) # ensure valid transparency assert 0 <= args.transparency <= 1 # ensure valid background colours assert 0 <= args.background_colour[0] <= 1 assert 0 <= args.background_colour[1] <= 1 assert 0 <= args.background_colour[2] <= 1 # view contours # don't specify -A and -X, -Y, and/or -Z if args.all_contours or args.x_contours or args.y_contours or args.z_contours: assert ( args.all_contours and not (args.x_contours or args.y_contours or args.z_contours) ) or (not args.all_contours and (args.x_contours or args.y_contours or args.z_contours)) # cube axes validity if args.cube_axes is not None: assert (0 <= args.cube_axes <= 4) return args, configs
def get_image_id(cursor, image_name_root, view, ext='map', quick_pick=None): """Obtain the image IDs for top, front and right images by EMDB accession code :param cursor: cursor to postgres connection :type cursor: psycopg2.Cursor :param str image_name_root: accession code in lowercase e.g. 'emd_1080' :param str view: the view of the image; either 'top', 'front' or 'right' :param str ext: extension of image file e.g. 'map' :return int image_id: the image ID or 0 for fail (no or multiple image IDs found) """ try: assert isinstance(cursor, psycopg2.extensions.cursor) except AssertionError: print_date("Not psycopg2.extensions.cursor object: {}".format(cursor)) sys.exit(1) views = ['top', 'front', 'right'] try: assert view in views except AssertionError: print_date("Invalid view: {}; should be one of: {}".format( view, ", ".join(views))) sys.exit(1) exts = ['map', 'mrc', 'rec'] try: assert ext in exts # supported file extensions except AssertionError: print_date("Invalid extension: {}; should be one of {}".format( ext, ", ".join(exts))) sys.exit(1) query_string = "select id from image where image.name like '{}-{}.%'".format( image_name_root, view) cursor.execute(query_string) rows = cursor.fetchall() if rows: if len(rows) == 1: return rows[0][0] else: print_date("Multiple image IDs for {}-{}: {}".format( image_name_root, view, rows)) if quick_pick is not None: print_date( "Quick picking an ID from index {}".format(quick_pick)) return rows[quick_pick][0] else: return os.EX_OK else: print_date("No image IDs found for view '{}'".format(view)) return os.EX_OK
def _export_rois_json(self, path, fn_root, orientation, args, configs, fill_alpha=1.0, stroke_alpha=1.0, font_size=2.0, stroke_colour=(1, 1, 0), stroke_width=0.22): """Export ROIs for this orientation as JSON (instead of as XML) :param fn_root: the output file name root; the image ID (or orientation) and the slice value will be included :param orientation: character specifying the orientation; either 'x', 'y', or 'z' :param fill_alpha: the alpha value for the shape fill; default 1.0 :param stroke_alpha: the alpha value for the shape stroke; default 1.0 :param font_size: the font size for text :param stroke_colour: the colour of the shape stroke; default (0, 1, 0) (green) :param stroke_width: the width of the shape stroke ; default 0.22 :return exit_status: the exit status (see Python's ``os`` module for details) """ try: assert orientation in ORIENTATIONS except AssertionError as a: print_date( "Invalid value for 'orientation'; should be in [{}]".format( ', '.join(ORIENTATIONS))) print(str(a)) return os.EX_DATAERR # ensure valid shape params try: assert 0 <= fill_alpha <= 1 except AssertionError as a: print_date( "Invalid value for 'fill_alpha' ({}); should be in [0-1]". format(fill_alpha)) print(str(a)) return os.EX_DATAERR try: assert 0 <= stroke_alpha <= 1 except AssertionError as a: print_date( "Invalid value for 'stroke_alpha' ({}); should be in [0-1]". format(stroke_alpha)) print(str(a)) return os.EX_DATAERR try: assert 0 <= font_size <= 30 # arbitrary bounds except AssertionError as a: print_date( "Invalid value for 'font_size' ({}); should be in [0-30]". format(font_size)) print(str(a)) return os.EX_DATAERR try: assert 3 <= len(stroke_colour) <= 4 if len(stroke_colour) == 3: r, g, b = stroke_colour a = 1 elif len(stroke_colour) == 4: r, g, b, a = stroke_colour assert 0 <= r <= 1 and 0 <= g <= 1 and 0 <= b <= 1 and 0 <= a <= 1 except AssertionError as a: print_date("Invalid value for 'stroke_colour' ({})".format( str(stroke_colour))) print(str(a)) return os.EX_DATAERR try: assert 0 <= stroke_width <= 10 except AssertionError as a: print_date( "Invalid value for 'stroke_width' (); should be in [0-10]". format(stroke_width)) print(str(a)) return os.EX_DATAERR # stroke colour stroke_colour_ = rgba_to_hex(stroke_colour) import json # x contours grouped_contours = dict() # we want to group contours by slice value (o) # each json file will have all the contours for all the segments batched together # we need to store the colour for each segment and the contours for each segment for segment in self.segments: for contour in getattr(segment.contours, '{}_contours'.format(orientation)): # get the value for this orientation slice level if orientation == 'x': o = int(contour[0][0]) elif orientation == 'y': o = int(contour[0][1]) elif orientation == 'z': o = int(contour[0][2]) if o not in grouped_contours: grouped_contours[o] = dict() if segment.id not in grouped_contours[o]: grouped_contours[o][segment.id] = dict() grouped_contours[o][segment.id]['colour'] = segment.colour grouped_contours[o][segment.id]['contours'] = [contour] else: grouped_contours[o][segment.id]['contours'] += [contour] # image size sizeX, sizeY, sizeZ = self.header.get_image_size(args, configs) max_size = max([sizeX, sizeY, sizeZ]) stroke_width = round( 0.24 / 100 * max_size, 3 ) # where 0.12 was obtained from having 0.22 in an image with 191px # y-shifts: since the image is vertically centered we need to adjust y coords for front (x) and right (y) points top_y_shift = 0.5 * (sizeX - sizeY) front_y_shift = 0.5 * (sizeY - sizeZ) right_y_shift = 0.5 * (sizeX - sizeZ) # we write a json file for each slice for o, segment in grouped_contours.items(): shapes = list() for segment_id, segment_contours in segment.items(): colour = segment_contours['colour'] for contour in segment_contours['contours']: point_str = '' for point_id, point in contour.items(): if point_id == 0: if orientation == 'x': point_str += 'M {:.2f} {:.2f} '.format( point[1], sizeZ - point[2]) elif orientation == 'y': point_str += 'M {:.2f} {:.2f} '.format( *self._rotate(point[0], point[2], -90, ( sizeX / 2, sizeY / 2))) elif orientation == 'z': point_str += 'M {:.2f} {:.2f} '.format( *self._rotate(point[1], point[0], -90, ( sizeY / 2, sizeX / 2))) else: if orientation == 'x': point_str += 'L {:.2f} {:.2f} '.format( point[1], sizeZ - point[2]) elif orientation == 'y': point_str += 'L {:.2f} {:.2f} '.format( *self._rotate(point[0], point[2], -90, ( sizeX / 2, sizeY / 2))) elif orientation == 'z': point_str += 'L {:.2f} {:.2f} '.format( *self._rotate(point[1], point[0], -90, ( sizeY / 2, sizeX / 2))) # if the last contour point is the same as the first the it is closed # if contour[point_id] == contour[0]: point_str += 'z' shapes.append({ "fontStyle": "Bold", "fillAlpha": fill_alpha, "strokeAlpha": stroke_alpha, "id": None, "points": point_str, "fontSize": font_size, "theZ": o, "strokeColor": stroke_colour_, "theT": 0, "type": "Polygon", "textValue": str(segment_id), "strokeWidth": stroke_width, "fillColor": rgba_to_hex( colour ), # if contour[point_id] == contour[0] else None, }) # write the shapes for this slice if orientation == 'x': if self.roi_seg.image_ids.front is not None: odir = str(self.roi_seg.image_ids.front) ofn = fn_root.format(o) else: odir = orientation ofn = fn_root.format(o) elif orientation == 'y': if self.roi_seg.image_ids.right is not None: odir = str(self.roi_seg.image_ids.right) ofn = fn_root.format(o) else: odir = orientation ofn = fn_root.format(o) elif orientation == 'z': if self.roi_seg.image_ids.top is not None: odir = str(self.roi_seg.image_ids.top) ofn = fn_root.format(o) else: odir = orientation ofn = fn_root.format(o) # write out the JSON for this orientation and this slice if not os.path.exists(os.path.join(path, odir)): os.makedirs(os.path.join(path, odir), mode=0o0755) with open(os.path.join(path, odir, ofn), 'w') as f: json.dump([{"shapes": shapes}], f) return
def handle_roi_create(args, configs): """ Handle `createroi` subcommand :param args: parsed arguments :type args: ``argparse.Namespace`` :param configs: configurations object :type config: :py:class:`sfftk.core.configs.Configs` :return int exit_status: exit status """ # convert an EMDB-SFF file to an ROI file if re.match(r'.*\.(sff|hff|json|xml|h5|hdf5)$', args.sff_file, re.IGNORECASE): from .schema import SFFPSegmentation if args.verbose: print_date("Reading in EMDB-SFF file {}".format(args.sff_file)) sff_seg = SFFPSegmentation.from_file(args.sff_file) # convert segments to VTK meshes if args.verbose: print_date("Converting EMDB-SFF segments to VTK meshes") vtk_seg = sff_seg.as_vtk(args, configs) # slice to get contours if args.verbose: print_date("Slicing segmentation to get ROI contours...") vtk_seg.slice() # convert to ROI using sfftkplus.schema.roi if args.verbose: print_date("Converting to ROI using roi.xsd...") roi_seg = vtk_seg.as_roi(args, configs) # export to file if args.verbose: print_date("Writing output to {}".format(args.output)) exit_status = roi_seg.export(args.output, args, configs) if args.verbose: print_date("Done") elif re.match(r'.*\.roi$', args.sff_file, re.IGNORECASE): from .formats import roi if args.verbose: print_date("Reading in ROI file {}".format(args.sff_file)) roi_seg = roi.ROISegmentation(args.sff_file) if args.reset_ids: if args.verbose: print_date("Resetting IDs...") roi_seg.header.reset_ids(args, configs) # export to file if args.verbose: print_date("Writing output to {}".format(args.output)) exit_status = roi_seg.export(args.output, args, configs) if args.verbose: print_date("Done") else: print_date("Unsupported file type: {}".format(args.sff_file)) exit_status = os.EX_DATAERR return exit_status
def __init__(self, sff_seg, args, configs): self._sff_seg = sff_seg # the EMDB-SFF segmentation if not args.primary_descriptor: args.primary_descriptor = self._sff_seg.primary_descriptor self._vtk_args = args self.configs = configs self._header = VTKHeader(self._sff_seg, self._vtk_args) self._segments = list() self._sliced_segments = list() # 3D volume segmentations if self._vtk_args.primary_descriptor == "three_d_volume": self._lattices = dict() # reconstitute into a dict if args.verbose: print_static("Decoding lattices...") for lattice in self._sff_seg.lattices: if args.verbose: print_static("Decoding lattice {}...".format(lattice.id)) # lattice.decode() print_date('', incl_date=False) self._lattices[lattice.id] = lattice.data_array # now we have the lattices decoded into 3D volumes, # we need to compute the surfaces for each for segment in self._sff_seg.segments: lattice_data = self._lattices[ segment.three_d_volume.lattice_id] voxel_values = set(lattice_data.flatten().tolist()).difference( {0}) if len(voxel_values) == 1: # it's a binary mask if args.verbose: print_date("Binary lattice") new_simplified_mask = lattice_data else: # lattice_data = lattice.data if args.verbose: print_static( "Non-binary lattice: segment label #{}".format( int(segment.three_d_volume.value))) # new mask new_simplified_mask = numpy.ndarray(lattice_data.shape, dtype=numpy.dtype(int)) # new_simplified_mask = lattice new_simplified_mask[:, :, :] = 0 # only the parts for this segment new_simplified_mask = (lattice_data == int( segment.three_d_volume.value)) * int( segment.three_d_volume.value) if new_simplified_mask.sum() == 0: print_date('', incl_date=False) print_date('No data found for segment {}'.format( segment.id)) continue self._segments.append( VTKSegment(segment, self._vtk_args, transforms=self._sff_seg.transform_list, lattice=new_simplified_mask)) print_date('', incl_date=False) else: self._segments = list( map( lambda s: VTKSegment(s, self._vtk_args, transforms=self._sff_seg. transform_list), self._sff_seg.segment_list))
def handle_roi_attach(args, configs): """ Handle `attachroi` subcommand :param args: parsed arguments :type args: ``argparse.Namespace`` :param configs: configurations object :type config: :py:class:`sfftk.core.configs.Configs` :return int status: status """ from .formats.roi import ROISegmentation from .omero.handlers import OMEROConnection if re.match(r'.*\.roi$', args.roi_file, re.IGNORECASE): if args.verbose: print_date("Reading ROIs from %s..." % args.roi_file) roi_seg = ROISegmentation(args.roi_file) else: print_date( "Unkown file type '%s'. Should be valid ROI file. Aborting..." % args.roi_file) return 1 # image_ids image_ids = get_image_ids(roi_seg, args) # open the connection # iterate over orientations # get image id for this orientation # check image exists # create and load roi object # attach rois with OMEROConnection(args, configs) as connection: for orientation in roi_seg.oriented_segments: # get the image_id for this orientation image_id = image_ids[orientation] if args.verbose: print_date("Checking whether %s image of ID %s exists..." % (orientation, image_id), newline=False) # get the image to which we will write ROIs image = connection.getImage(image_id) if image: if args.verbose: print_date("OK", incl_date=False) # convert the oriented segments to rois omero_rois = roi_seg.as_omero_rois(orientation, image, args) # load the rois to OMERO if args.verbose: print_date("Attaching ROIs...", newline=False) status = connection.attachRois(omero_rois) if status == 0: if args.verbose: print_date("OK", incl_date=False) else: if args.verbose: print_date("FAIL", incl_date=False) return status else: if args.verbose: print_date("FAIL", incl_date=False) continue # non-fatal return os.EX_OK