def on_show_imagej(self): """Show the ImageJ user interface This method shows the ImageJ user interface when the user presses the Show ImageJ button. """ logger.debug("Starting ImageJ UI") ui_service = ij2.get_ui_service(get_context()) if ui_service is not None and not ui_service.isVisible(): if cpprefs.get_headless(): # Silence the auto-updater in the headless preferences # ij2.update_never_remind() ui_service.createUI() elif ui_service is not None: ui = ui_service.getDefaultUI() J.execute_runnable_in_main_thread( J.run_script( """new java.lang.Runnable() { run: function() { ui.getApplicationFrame().setVisible(true); }}""", dict(ui=ui), ), True, )
def do_imagej(self, ijb, workspace, when=None): if when == D_FIRST_IMAGE_SET: choice = self.prepare_group_choice.value command = self.prepare_group_command macro = self.prepare_group_macro.value options = self.prepare_group_options.value d = self.pre_command_settings_dictionary elif when == D_LAST_IMAGE_SET: choice = self.post_group_choice.value command = self.post_group_command macro = self.post_group_macro.value options = self.post_group_options.value d = self.pre_command_settings_dictionary else: choice = self.command_or_macro.value command = self.command macro = self.macro.value options = self.options.value d = self.command_settings_dictionary if choice == CM_COMMAND: if self.is_advanced(command, d): self.execute_advanced_command(workspace, command, d) else: ijb.execute_command(command.value, options) elif choice == CM_MACRO: macro = workspace.measurements.apply_metadata(macro) ijb.execute_macro(macro) if (choice != CM_NOTHING and (not cpprefs.get_headless()) and self.pause_before_proceeding): import wx wx.MessageBox( "Please edit the image in ImageJ and hit OK to proceed", "Waiting for ImageJ")
def interaction_request(self, module, *args, **kwargs): '''make a request for GUI interaction via a pipeline event module - target module for interaction request headless_ok - True if the interaction request can be made in a headless context. An example is synchronized access to a shared resource which must be coordinated among all workers. ''' # See also: # main().interaction_handler() in analysis_worker.py # PipelineController.module_interaction_request() in pipelinecontroller.py import cellprofiler.preferences as cpprefs if "headless_ok" in kwargs: tmp = kwargs.copy() del tmp["headless_ok"] headless_ok = kwargs["headless_ok"] kwargs = tmp else: headless_ok = False if self.interaction_handler is None: if cpprefs.get_headless() and not headless_ok: raise self.NoInteractionException() else: return module.handle_interaction(*args, **kwargs) else: return self.interaction_handler(module, *args, **kwargs)
def prepare_run(self, workspace): '''Invoke the image_set_list pickling mechanism and save the pipeline''' pipeline = workspace.pipeline image_set_list = workspace.image_set_list if pipeline.test_mode or self.from_old_matlab: return True if self.batch_mode.value: self.enter_batch_mode(workspace) return True else: path = self.save_pipeline(workspace) if not cpprefs.get_headless(): import wx wx.MessageBox( "CreateBatchFiles saved pipeline to %s" % path, caption="CreateBatchFiles: Batch file saved", style=wx.OK | wx.ICON_INFORMATION) if self.go_to_website: try: import webbrowser import urllib server_path = self.alter_path(os.path.dirname(path)) query = urllib.urlencode(dict(data_dir=server_path)) url = cpprefs.get_batchprofiler_url() + \ "/NewBatch.py?" + query webbrowser.open_new(url) except: import traceback traceback.print_exc() return False
def do_imagej(self, workspace, when=None): if when == D_FIRST_IMAGE_SET: choice = self.prepare_group_choice.value command = self.prepare_group_command macro = self.prepare_group_macro.value d = self.pre_command_settings_dictionary elif when == D_LAST_IMAGE_SET: choice = self.post_group_choice.value command = self.post_group_command macro = self.post_group_macro.value d = self.pre_command_settings_dictionary else: choice = self.command_or_macro.value command = self.command macro = self.macro.value d = self.command_settings_dictionary if choice == CM_COMMAND: self.execute_advanced_command(workspace, command, d) elif choice == CM_MACRO: macro = workspace.measurements.apply_metadata(macro) script_service = ij2.get_script_service(get_context()) factory = script_service.getByName(self.macro_language.value) engine = factory.getScriptEngine() engine.put("ImageJ", get_context()) result = engine.evalS(macro) if (choice != CM_NOTHING and (not cpprefs.get_headless()) and self.pause_before_proceeding): import wx wx.MessageBox( "Please edit the image in ImageJ and hit OK to proceed", "Waiting for ImageJ")
def do_imagej(self, ijb, workspace, when=None): if when == D_FIRST_IMAGE_SET: choice = self.prepare_group_choice.value command = self.prepare_group_command macro = self.prepare_group_macro.value options = self.prepare_group_options.value d = self.pre_command_settings_dictionary elif when == D_LAST_IMAGE_SET: choice = self.post_group_choice.value command = self.post_group_command macro = self.post_group_macro.value options = self.post_group_options.value d = self.pre_command_settings_dictionary else: choice = self.command_or_macro.value command = self.command macro = self.macro.value options = self.options.value d = self.command_settings_dictionary if choice == CM_COMMAND: if self.is_advanced(command, d): self.execute_advanced_command(workspace, command, d) else: ijb.execute_command(command.value, options) elif choice == CM_MACRO: macro = workspace.measurements.apply_metadata(macro) ijb.execute_macro(macro) if (choice != CM_NOTHING and (not cpprefs.get_headless()) and self.pause_before_proceeding): import wx wx.MessageBox("Please edit the image in ImageJ and hit OK to proceed", "Waiting for ImageJ")
def do_imagej(self, workspace, when=None): if when == D_FIRST_IMAGE_SET: choice = self.prepare_group_choice.value command = self.prepare_group_command macro = self.prepare_group_macro.value d = self.pre_command_settings_dictionary elif when == D_LAST_IMAGE_SET: choice = self.post_group_choice.value command = self.post_group_command macro = self.post_group_macro.value d = self.pre_command_settings_dictionary else: choice = self.command_or_macro.value command = self.command macro = self.macro.value d = self.command_settings_dictionary if choice == CM_COMMAND: self.execute_advanced_command(workspace, command, d) elif choice == CM_MACRO: macro = workspace.measurements.apply_metadata(macro) script_service = ij2.get_script_service(get_context()) factory = script_service.getByName(self.macro_language.value) engine = factory.getScriptEngine() engine.put("ImageJ", get_context()) result = engine.evalS(macro) if (choice != CM_NOTHING and (not cpprefs.get_headless()) and self.pause_before_proceeding): import wx wx.MessageBox("Please edit the image in ImageJ and hit OK to proceed", "Waiting for ImageJ")
def check_overwrite(self, workspace): """Make sure it's ok to overwrite any existing files before starting run workspace - workspace with all image sets already populated returns True if ok to proceed, False if user cancels """ if self.wants_overwrite_without_warning: return True files_to_check = [] metadata_groups = self.get_metadata_groups(workspace) for metadata_group in metadata_groups: image_number = metadata_group.image_numbers[0] files_to_check.append( self.make_image_file_name(workspace, image_number)) files_to_overwrite = filter(os.path.isfile, files_to_check) if len(files_to_overwrite) > 0: if get_headless(): logger.error( "ExportToACC is configured to refrain from overwriting files and the following file(s) already exist: %s" % ", ".join(files_to_overwrite)) return False msg = "Overwrite the following file(s)?\n" +\ "\n".join(files_to_overwrite) import wx result = wx.MessageBox( msg, caption="ExportToACC: Overwrite existing files", style=wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION) if result != wx.YES: return False return True
def show_warning(title, message, get_preference, set_preference): """Show a silenceable warning message to the user title - title for the dialog box message - message to be displayed get_preference - function that gets a user preference: do you want to show this warning? set_preference - function that sets the user preference if they choose not to see the warning again. The message is printed to the console if headless. """ from cellprofiler.preferences import get_headless if get_headless(): print(message) return if not get_preference(): return import wx if wx.GetApp() is None: print(message) return with wx.Dialog(None, title=title) as dlg: dlg.Sizer = sizer = wx.BoxSizer(wx.VERTICAL) subsizer = wx.BoxSizer(wx.HORIZONTAL) sizer.Add(subsizer, 0, wx.EXPAND | wx.ALL, 5) subsizer.Add( wx.StaticBitmap( dlg, wx.ID_ANY, wx.ArtProvider.GetBitmap(wx.ART_INFORMATION, wx.ART_CMN_DIALOG), ), 0, wx.ALIGN_LEFT | wx.ALIGN_TOP | wx.RIGHT, 5, ) text = wx.StaticText(dlg, wx.ID_ANY, message) subsizer.Add(text, 0, wx.ALIGN_LEFT | wx.ALIGN_TOP | wx.ALL, 5) dont_show = wx.CheckBox(dlg, label="Don't show this message again.") sizer.Add(dont_show, 0, wx.ALIGN_LEFT | wx.ALL, 5) buttons_sizer = wx.StdDialogButtonSizer() buttons_sizer.AddButton(wx.Button(dlg, wx.ID_OK)) buttons_sizer.Realize() sizer.Add(buttons_sizer, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ALL, 5) dlg.Fit() dlg.ShowModal() if dont_show.GetValue(): set_preference(False)
def prepare_run(self, workspace): '''Set up omero image providers inside the image_set_list''' pipeline = workspace.pipeline image_set_list = workspace.image_set_list if pipeline.in_batch_mode(): #TODO: Rewrite the OmeroImageProvider such that it can be used in batch mode #e.g. omero session keys could be used to attach to existing sessions to #keep OmeroImageProviders from creating a new session every time an image should be loaded return False if cpp.get_headless(): print 'OmeroLoadImages running in headless mode: image directory parameter will be used as omero object id' self.omero_object_id.set_value(int(cpp.get_default_image_directory())) print 'omero object id = %d'%self.omero_object_id.value print 'omero object type = %s'%self.omero_object.value self.create_omero_gateway() if self.omero_object == MS_IMAGE: omero_image_list = [self.omero_gateway.getImage(self.omero_object_id.value)] elif self.omero_object == MS_DATASET: #Get dataset without leaves(=images&pixels) dataset = self.omero_gateway.getDataset(self.omero_object_id.value, False) self.dataset_name = dataset.getName().getValue() omero_image_list = self.get_images_from_dataset(self.omero_object_id.value) elif self.omero_object == MS_PLATE: self.wells = self.get_wells_from_plate(self.omero_object_id.value) self.plate_name = self.wells[0].getPlate().getName().getValue() omero_image_list = [] for well in self.wells: for wellsample in well.iterateWellSamples(): omero_image_list.append(wellsample.getImage()) #get names and pixels from omero images pixels_list = [] for omero_image in omero_image_list: image_id = omero_image.getId().getValue() pixels_list += self.omero_gateway.getPixelsFromImage(image_id) #add images to image sets image_set_count = len(pixels_list) for i in range(0, image_set_count): image_set = image_set_list.get_image_set(i) pixels = pixels_list[i] pixels_id = pixels.getId().getValue() sizeZ = pixels.getSizeZ().getValue() sizeC = pixels.getSizeC().getValue() sizeT = pixels.getSizeT().getValue() for channel in self.channels: for z in range(0, sizeZ): for t in range(0, sizeT): c = int(channel.channel_number.value) self.save_image_set_info(image_set, channel.cpimage_name.value, P_OMERO, V_OMERO, self.omero_gateway, pixels_id, z, c, t) return True
def prepare_run(self, workspace): '''Set up omero image providers inside the image_set_list''' pipeline = workspace.pipeline image_set_list = workspace.image_set_list if pipeline.in_batch_mode(): # TODO: Rewrite the OmeroImageProvider such that it can be used in batch mode # e.g., omero session keys could be used to attach to existing sessions to # keep OmeroImageProviders from creating a new session every time an image should be loaded return False if cpp.get_headless(): print 'OmeroLoadImages running in headless mode: image directory parameter will be used as omero object id' self.omero_object_id.set_value(int(cpp.get_default_image_directory())) print 'omero object id = %d' % self.omero_object_id.value print 'omero object type = %s' % self.omero_object.value self.create_omero_gateway() if self.omero_object == MS_IMAGE: omero_image_list = [self.omero_gateway.getImage(self.omero_object_id.value)] elif self.omero_object == MS_DATASET: # Get dataset without leaves(=images&pixels) dataset = self.omero_gateway.getDataset(self.omero_object_id.value, False) self.dataset_name = dataset.getName().getValue() omero_image_list = self.get_images_from_dataset(self.omero_object_id.value) elif self.omero_object == MS_PLATE: self.wells = self.get_wells_from_plate(self.omero_object_id.value) self.plate_name = self.wells[0].getPlate().getName().getValue() omero_image_list = [] for well in self.wells: for wellsample in well.iterateWellSamples(): omero_image_list.append(wellsample.getImage()) # get names and pixels from omero images pixels_list = [] for omero_image in omero_image_list: image_id = omero_image.getId().getValue() pixels_list += self.omero_gateway.getPixelsFromImage(image_id) # add images to image sets image_set_count = len(pixels_list) for i in range(0, image_set_count): image_set = image_set_list.get_image_set(i) pixels = pixels_list[i] pixels_id = pixels.getId().getValue() sizeZ = pixels.getSizeZ().getValue() sizeC = pixels.getSizeC().getValue() sizeT = pixels.getSizeT().getValue() for channel in self.channels: for z in range(0, sizeZ): for t in range(0, sizeT): c = int(channel.channel_number.value) self.save_image_set_info(image_set, channel.cpimage_name.value, P_OMERO, V_OMERO, self.omero_gateway, pixels_id, z, c, t) return True
def show_warning(title, message, get_preference, set_preference): """Show a silenceable warning message to the user title - title for the dialog box message - message to be displayed get_preference - function that gets a user preference: do you want to show this warning? set_preference - function that sets the user preference if they choose not to see the warning again. The message is printed to the console if headless. """ from cellprofiler.preferences import get_headless if get_headless(): print message return if not get_preference(): return import wx if wx.GetApp() is None: print message return dlg = wx.Dialog(wx.GetApp().GetTopWindow(), title=title) dlg.Sizer = sizer = wx.BoxSizer(wx.VERTICAL) subsizer = wx.BoxSizer(wx.HORIZONTAL) sizer.Add(subsizer, 0, wx.EXPAND | wx.ALL, 5) subsizer.Add( wx.StaticBitmap(dlg, wx.ID_ANY, wx.ArtProvider.GetBitmap(wx.ART_INFORMATION, wx.ART_CMN_DIALOG)), 0, wx.ALIGN_LEFT | wx.ALIGN_TOP | wx.RIGHT, 5, ) text = wx.StaticText(dlg, wx.ID_ANY, message) subsizer.Add(text, 0, wx.ALIGN_LEFT | wx.ALIGN_TOP | wx.ALL, 5) dont_show = wx.CheckBox(dlg, label="Don't show this message again.") sizer.Add(dont_show, 0, wx.ALIGN_LEFT | wx.ALL, 5) buttons_sizer = wx.StdDialogButtonSizer() buttons_sizer.AddButton(wx.Button(dlg, wx.ID_OK)) buttons_sizer.Realize() sizer.Add(buttons_sizer, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ALL, 5) dlg.Fit() dlg.ShowModal() if dont_show.Value: set_preference(False)
def on_show_imagej(self): '''Show the ImageJ user interface This method shows the ImageJ user interface when the user presses the Show ImageJ button. ''' logger.debug("Starting ImageJ UI") ui_service = ij2.get_ui_service(get_context()) if ui_service is not None and not ui_service.isVisible(): if cpprefs.get_headless(): # Silence the auto-updater in the headless preferences # ij2.update_never_remind() ui_service.createUI()
def prepare_run(self, workspace): '''Invoke the image_set_list pickling mechanism and save the pipeline''' pipeline = workspace.pipeline image_set_list = workspace.image_set_list if pipeline.test_mode or self.from_old_matlab: return True if self.batch_mode.value: self.enter_batch_mode(workspace) return True else: path = self.save_pipeline(workspace) if not cpprefs.get_headless(): import wx wx.MessageBox("CreateBatchFiles saved pipeline to %s" % path, caption="CreateBatchFiles: Batch file saved", style=wx.OK | wx.ICON_INFORMATION) return False
def prepare_run(self, workspace): '''Invoke the image_set_list pickling mechanism and save the pipeline''' pipeline = workspace.pipeline image_set_list = workspace.image_set_list if pipeline.test_mode or self.from_old_matlab: return True if self.batch_mode.value: self.enter_batch_mode(workspace) return True else: path = self.save_pipeline(workspace) if not cpprefs.get_headless(): import wx wx.MessageBox( "CreateBatchFiles saved pipeline to %s" % path, caption = "CreateBatchFiles: Batch file saved", style = wx.OK | wx.ICON_INFORMATION) return False
def on_show_imagej(self): '''Show the ImageJ user interface This method shows the ImageJ user interface when the user presses the Show ImageJ button. ''' logger.debug("Starting ImageJ UI") ui_service = ij2.get_ui_service(get_context()) if ui_service is not None and not ui_service.isVisible(): if cpprefs.get_headless(): # Silence the auto-updater in the headless preferences # ij2.update_never_remind() ui_service.createUI() elif ui_service is not None: ui = ui_service.getDefaultUI() J.execute_runnable_in_main_thread( J.run_script( """new java.lang.Runnable() { run: function() { ui.getApplicationFrame().setVisible(true); }}""", dict(ui=ui)), True)
def prepare_group(self, pipeline, image_set_list, grouping, image_numbers): '''Handle initialization per-group pipeline - the pipeline being run image_set_list - the list of image sets for the whole experiment grouping - a dictionary that describes the key for the grouping. For instance, { 'Metadata_Row':'A','Metadata_Column':'01'} image_numbers - a sequence of the image numbers within the group (image sets can be retreved as image_set_list.get_image_set(image_numbers[i]-1) We use prepare_group to compute the minimum or maximum values among all images in the group for certain values of "wants_automatic_[low,high]". ''' if (self.wants_automatic_high != HIGH_ALL_IMAGES and self.wants_automatic_low != LOW_ALL_IMAGES): return True if not pipeline.in_batch_mode() and not cpprefs.get_headless(): import wx progress_dialog = wx.ProgressDialog( "#%d: RescaleIntensity for %s"%(self.module_num, self.image_name.value), "RescaleIntensity will process %d images while preparing for run" % (len(image_numbers)), len(image_numbers), None, wx.PD_APP_MODAL | wx.PD_AUTO_HIDE | wx.PD_CAN_ABORT) else: progress_dialog = None min_value = None max_value = None try: for i,image_number in enumerate(image_numbers): image_set = image_set_list.get_image_set(image_number-1) image = image_set.get_image(self.image_name.value, must_be_grayscale=True, cache = False) if self.wants_automatic_high == HIGH_ALL_IMAGES: if image.has_mask: vmax = np.max(image.pixel_data[image.mask]) else: vmax = np.max(image.pixel_data) max_value = vmax if max_value is None else max(max_value, vmax) if self.wants_automatic_low == LOW_ALL_IMAGES: if image.has_mask: vmin = np.min(image.pixel_data[image.mask]) else: vmin = np.min(image.pixel_data) min_value = vmin if min_value is None else min(min_value, vmin) if progress_dialog is not None: should_continue, skip = progress_dialog.Update(i+1) if not should_continue: progress_dialog.EndModal(0) return False finally: if progress_dialog is not None: progress_dialog.Destroy() if self.wants_automatic_high == HIGH_ALL_IMAGES: self.set_automatic_maximum(image_set_list, max_value) if self.wants_automatic_low == LOW_ALL_IMAGES: self.set_automatic_minimum(image_set_list, min_value)
def merge_files(destination, sources, force_headless=False): is_headless = force_headless or get_headless() if not is_headless: import wx if len(sources) == 0: return if not is_headless: progress = wx.ProgressDialog("Writing " + destination, "Loading " + sources[0], maximum = len(sources) * 4 + 1, style = wx.PD_CAN_ABORT | wx.PD_APP_MODAL | wx.PD_ELAPSED_TIME | wx.PD_REMAINING_TIME) count = 0 try: pipeline = cpp.Pipeline() has_error = [False] def callback(caller, event): if isinstance(event, cpp.LoadExceptionEvent): has_error = True wx.MessageBox( message = "Could not load %s: %s" % ( sources[0], event.error), caption = "Failed to load %s" % sources[0]) has_error[0] = True pipeline.add_listener(callback) if has_error[0]: return # distributed processing passes a list of functions, not a # list of filenames. pipeline.load(sources[0]() if callable(sources[0]) else sources[0]) mdest = cpmeas.Measurements() image_set_count = 0 for source in sources: if not is_headless: count += 1 keep_going, skip = progress.Update(count, "Loading " + source) if not keep_going: return # distributed processing passes a list of functions msource = cpmeas.load_measurements(source() if callable(source) else source) source_image_set_count = 0 for object_name in msource.get_object_names(): if object_name in mdest.get_object_names(): destfeatures = mdest.get_feature_names(object_name) else: destfeatures = [] for feature in msource.get_feature_names(object_name): src_values = msource.get_all_measurements(object_name, feature) if np.isscalar(src_values): # For something like "Experiment", there is a single # value. Keep the first value seen among all sources. # if not feature in destfeatures: mdest.add_all_measurements(object_name, feature, [src_values]) else: source_image_count = max(source_image_set_count, len(src_values)) if feature in destfeatures: dest_values = mdest.get_all_measurements(object_name, feature) if not isinstance(dest_values, list): dest_values = dest_values.tolist() else: dest_values = [None] * image_set_count if isinstance(src_values, list): dest_values += src_values else: dest_values += src_values.tolist() mdest.add_all_measurements(object_name, feature, dest_values) destset = set(destfeatures) # # These are features that are in the destination, but not # in the source. We have to add Nones to the destination. # for feature in destset.difference( msource.get_feature_names(object_name)): dest_values = mdest.get_all_measurements( object_name, feature) dest_values += [None] * source_image_count # # The source may not have all of the objects that are in # the destination. Add blanks here. # destobjects = set(mdest.get_object_names()) for object_name in destobjects.difference( msource.get_object_names()): for feature in mdest.get_feature_names(object_name): dest_values = mdest.get_all_measurements( object_name, feature) dest_values += [None] * source_image_count mdest.add_all_measurements(object_name, feature, dest_values) image_set_count += source_image_count mdest.image_set_number = image_set_count if not is_headless: keep_going, skip = progress.Update(count+1, "Saving to "+destination) if not keep_going: return pipeline.save_measurements(destination, mdest) finally: if not is_headless: progress.Destroy()
def start_cellprofiler_jvm(): '''Start the Java VM with arguments appropriate for CellProfiler''' global logger if hasattr(sys, 'frozen'): if sys.platform != 'darwin': root_path = os.path.split(os.path.abspath(sys.argv[0]))[0] bioformats_path = os.path.join(root_path, 'bioformats') else: bioformats_path = os.path.abspath(os.path.split(__file__)[0]) root_path = os.path.split(bioformats_path)[0] imagej_path = os.path.join(root_path, 'imagej','jars') def sort_fn(a, b): aa,bb = [(0 if x.startswith("cellprofiler-java") else 1, x) for x in a, b] return cmp(aa, bb) jar_files = [ jar_filename for jar_filename in os.listdir(imagej_path) if jar_filename.lower().endswith(".jar")] jar_files = sorted(jar_files, cmp = sort_fn) else: bioformats_path = os.path.abspath(os.path.split(__file__)[0]) root_path = os.path.split(bioformats_path)[0] jar_files = get_cellprofiler_jars() imagej_path = os.path.join(root_path, 'imagej','jars') class_path = os.pathsep.join( [os.path.join(imagej_path, jar_file) for jar_file in jar_files]) if os.environ.has_key("CLASSPATH"): class_path += os.pathsep + os.environ["CLASSPATH"] plugin_directory = get_ij_plugin_directory() logger.debug("Using %s as imagej plugin directory" % plugin_directory) if (plugin_directory is not None and os.path.isdir(plugin_directory)): # # Add the plugin directory to pick up .class files in a directory # hierarchy. # class_path += os.pathsep + plugin_directory logger.debug("Adding %s to class path" % plugin_directory) # # Add any .jar files in the directory # for jarfile in os.listdir(plugin_directory): jarpath = os.path.join(plugin_directory, jarfile) if jarfile.lower().endswith(".jar"): logger.debug("Adding %s to class path" % jarpath) class_path += os.pathsep + jarpath else: logger.debug("Skipping %s" % jarpath) else: logger.info("Plugin directory doesn't point to valid folder: " + plugin_directory) if sys.platform.startswith("win") and not hasattr(sys, 'frozen'): # Have to find tools.jar from cellprofiler.utilities.setup import find_jdk jdk_path = find_jdk() if jdk_path is not None: tools_jar = os.path.join(jdk_path, "lib","tools.jar") class_path += os.pathsep + tools_jar else: logger.warning("Failed to find tools.jar") jvm_arg = [x.groups()[0] for x in [ re.match('--jvm-heap-size=([0-9]+[gGkKmM])', y) for y in sys.argv] if x is not None] if len(jvm_arg) > 0: jvm_arg = jvm_arg[0] else: jvm_arg = "512m" args = [r"-Djava.class.path="+class_path, r"-Dloci.bioformats.loaded=true", #r"-verbose:class", #r"-verbose:jni", r"-Xmx%s" % jvm_arg] # # Get the log4j logger setup from a file in the bioformats directory # if such a file exists. # log4j_properties = os.path.join(bioformats_path, "log4j.properties") if os.path.exists(log4j_properties): log4j_properties = "file:/"+log4j_properties.replace(os.path.sep, "/") args += [r"-Dlog4j.configuration="+log4j_properties] init_logger = False else: init_logger = True if get_headless(): # We're running silently, so don't change the Java preferences # The following definition uses a process-scope preferences factory args += [ "-Djava.util.prefs.PreferencesFactory=" "org.cellprofiler.headlesspreferences.HeadlessPreferencesFactory"] run_headless = (get_headless() and not os.environ.has_key("CELLPROFILER_USE_XVFB")) run_headless = False logger.debug("JVM arguments: " + " ".join(args)) jutil.start_vm(args, run_headless) logger.debug("Java virtual machine started.") jutil.attach() try: jutil.static_call("loci/common/Location", "cacheDirectoryListings", "(Z)V", True) except: logger.warning("Bioformats version does not support directory cacheing") # # Start the log4j logger to avoid error messages. # if init_logger: try: jutil.static_call("org/apache/log4j/BasicConfigurator", "configure", "()V") log4j_logger = jutil.static_call("org/apache/log4j/Logger", "getRootLogger", "()Lorg/apache/log4j/Logger;") warn_level = jutil.get_static_field("org/apache/log4j/Level","WARN", "Lorg/apache/log4j/Level;") jutil.call(log4j_logger, "setLevel", "(Lorg/apache/log4j/Level;)V", warn_level) del logger del warn_level except: logger.error("Failed to initialize log4j\n", exc_info=True) if not get_headless(): jutil.activate_awt()
def merge_files(destination, sources, force_headless=False): is_headless = force_headless or get_headless() if not is_headless: import wx if len(sources) == 0: return if not is_headless: progress = wx.ProgressDialog("Writing " + destination, "Loading " + sources[0], maximum=len(sources) * 4 + 1, style=wx.PD_CAN_ABORT | wx.PD_APP_MODAL | wx.PD_ELAPSED_TIME | wx.PD_REMAINING_TIME) count = 0 try: pipeline = cpp.Pipeline() has_error = [False] def callback(caller, event): if isinstance(event, cpp.LoadExceptionEvent): has_error = True wx.MessageBox(message="Could not load %s: %s" % (sources[0], event.error), caption="Failed to load %s" % sources[0]) has_error[0] = True pipeline.add_listener(callback) pipeline.load(sources[0]) if has_error[0]: return if destination.lower().endswith(".h5"): mdest = cpmeas.Measurements(filename=destination, multithread=False) h5_dest = True else: mdest = cpmeas.Measurements(multithread=False) h5_dest = False for source in sources: if not is_headless: count += 1 keep_going, skip = progress.Update(count, "Loading " + source) if not keep_going: return if h5py.is_hdf5(source): msource = cpmeas.Measurements(filename=source, mode="r", multithread=False) else: msource = cpmeas.load_measurements(source) dest_image_numbers = mdest.get_image_numbers() source_image_numbers = msource.get_image_numbers() if (len(dest_image_numbers) == 0 or len(source_image_numbers) == 0): offset_source_image_numbers = source_image_numbers else: offset_source_image_numbers = ( np.max(dest_image_numbers) - np.min(source_image_numbers) + source_image_numbers + 1) for object_name in msource.get_object_names(): if object_name in mdest.get_object_names(): destfeatures = mdest.get_feature_names(object_name) else: destfeatures = [] for feature in msource.get_feature_names(object_name): if object_name == cpmeas.EXPERIMENT: if not mdest.has_feature(object_name, feature): src_value = msource.get_experiment_measurement( feature) mdest.add_experiment_measurement( feature, src_value) continue src_values = msource.get_measurement( object_name, feature, image_set_number=source_image_numbers) mdest[object_name, feature, offset_source_image_numbers] = src_values destset = set(destfeatures) if not is_headless: keep_going, skip = progress.Update(count + 1, "Saving to " + destination) if not keep_going: return if not h5_dest: pipeline.save_measurements(destination, mdest) finally: if not is_headless: progress.Destroy()
def merge_files(destination, sources, force_headless=False): is_headless = force_headless or get_headless() if not is_headless: import wx if len(sources) == 0: return if not is_headless: progress = wx.ProgressDialog("Writing " + destination, "Loading " + sources[0], maximum = len(sources) * 4 + 1, style = wx.PD_CAN_ABORT | wx.PD_APP_MODAL | wx.PD_ELAPSED_TIME | wx.PD_REMAINING_TIME) count = 0 try: pipeline = cpp.Pipeline() has_error = [False] def callback(caller, event): if isinstance(event, cpp.LoadExceptionEvent): has_error = True wx.MessageBox( message = "Could not load %s: %s" % ( sources[0], event.error), caption = "Failed to load %s" % sources[0]) has_error[0] = True pipeline.add_listener(callback) pipeline.load(sources[0]) if has_error[0]: return if destination.lower().endswith(".h5"): mdest = cpmeas.Measurements(filename=destination, multithread=False) h5_dest = True else: mdest = cpmeas.Measurements(multithread=False) h5_dest = False for source in sources: if not is_headless: count += 1 keep_going, skip = progress.Update(count, "Loading " + source) if not keep_going: return if h5py.is_hdf5(source): msource = cpmeas.Measurements(filename=source, mode="r", multithread=False) else: msource = cpmeas.load_measurements(source) dest_image_numbers = mdest.get_image_numbers() source_image_numbers = msource.get_image_numbers() if (len(dest_image_numbers) == 0 or len(source_image_numbers) == 0): offset_source_image_numbers = source_image_numbers else: offset_source_image_numbers = ( np.max(dest_image_numbers) - np.min(source_image_numbers) + source_image_numbers + 1) for object_name in msource.get_object_names(): if object_name in mdest.get_object_names(): destfeatures = mdest.get_feature_names(object_name) else: destfeatures = [] for feature in msource.get_feature_names(object_name): if object_name == cpmeas.EXPERIMENT: if not mdest.has_feature(object_name, feature): src_value = msource.get_experiment_measurement( feature) mdest.add_experiment_measurement(feature, src_value) continue src_values = msource.get_measurement( object_name, feature, image_set_number = source_image_numbers) mdest[object_name, feature, offset_source_image_numbers] = src_values destset = set(destfeatures) if not is_headless: keep_going, skip = progress.Update(count+1, "Saving to "+destination) if not keep_going: return if not h5_dest: pipeline.save_measurements(destination, mdest) finally: if not is_headless: progress.Destroy()
def merge_files(destination, sources, force_headless=False): is_headless = force_headless or get_headless() if not is_headless: import wx if len(sources) == 0: return if not is_headless: progress = wx.ProgressDialog("Writing " + destination, "Loading " + sources[0], maximum = len(sources) * 4 + 1, style = wx.PD_CAN_ABORT | wx.PD_APP_MODAL | wx.PD_ELAPSED_TIME | wx.PD_REMAINING_TIME) count = 0 try: pipeline = cpp.Pipeline() has_error = [False] def callback(caller, event): if isinstance(event, cpp.LoadExceptionEvent): has_error = True wx.MessageBox( message = "Could not load %s: %s" % ( sources[0], event.error), caption = "Failed to load %s" % sources[0]) has_error[0] = True pipeline.add_listener(callback) # distributed processing passes a list of functions, not a # list of filenames. pipeline.load(sources[0]() if callable(sources[0]) else sources[0]) if has_error[0]: return mdest = cpmeas.Measurements() for source in sources: if not is_headless: count += 1 keep_going, skip = progress.Update(count, "Loading " + source) if not keep_going: return # distributed processing passes a list of functions msource = cpmeas.load_measurements(source() if callable(source) else source) dest_image_numbers = mdest.get_image_numbers() source_image_numbers = msource.get_image_numbers() if len(dest_image_numbers) == 0: offset_source_image_numbers = source_image_numbers else: offset_source_image_numbers = ( np.max(dest_image_numbers) + source_image_numbers) for object_name in msource.get_object_names(): if object_name in mdest.get_object_names(): destfeatures = mdest.get_feature_names(object_name) else: destfeatures = [] for feature in msource.get_feature_names(object_name): if object_name == cpmeas.EXPERIMENT: if not mdest.has_feature(object_name, feature): src_value = msource.get_experiment_measurement( feature) mdest.add_experiment_measurement(feature, src_value) continue src_values = msource.get_measurement( object_name, feature, image_set_number = source_image_numbers) for image_number, value in zip( offset_source_image_numbers, src_values): mdest.add_measurement( object_name, feature, value, image_set_number = image_number) destset = set(destfeatures) if not is_headless: keep_going, skip = progress.Update(count+1, "Saving to "+destination) if not keep_going: return pipeline.save_measurements(destination, mdest) finally: if not is_headless: progress.Destroy()
def run(self, workspace): '''Run the module. Add the measurements. ''' statistics_dict = {} ratio_dict = {} for channel in self.channels: provider = workspace.image_set.get_image_provider(channel.cpimage_name.value) assert isinstance(provider, OmeroImageProvider) name = provider.get_name() omero_image_name = provider.get_omero_image_name() omero_image_id = provider.get_image_id() pixels_id = provider.get_pixels_id() z = provider.get_z() c = provider.get_c() t = provider.get_t() header = [] row = [] ratio = [] m = workspace.measurements measurements = () if self.omero_object == MS_DATASET: measurements += ((M_DATASET_NAME, self.dataset_name, 3.0), (M_DATASET_ID, self.omero_object_id.value, 1.0)) elif self.omero_object == MS_PLATE: #CellProfiler starts counting image sets from 1 well = self.wells[workspace.measurements.image_set_number-1] well_row = well.getRow().getValue() well_column = well.getColumn().getValue() well_id = well.getId().getValue() measurements += ((M_PLATE_NAME, self.plate_name, 3.0), (M_PLATE_ID, self.omero_object_id.value, 1.0), (M_WELL_ROW, well_row, 1.0), (M_WELL_COLUMN, well_column, 1.0), (M_WELL_ID, well_id, 3.0)) measurements += ((M_IMAGE_NAME, omero_image_name, 3.0), (M_IMAGE_ID, omero_image_id, 1.0), (M_PIXELS_ID, pixels_id, 1.0), (M_Z, z, 0.5), (M_C, c, 0.5), (M_T, t, 0.5)) for tag, value, r in measurements: m.add_image_measurement("_".join((tag, name)), value) header.append(tag) row.append(value) ratio.append(r) statistics = [header, row] ratio = [x / sum(ratio) for x in ratio] statistics_dict[channel.channel_number.value] = statistics ratio_dict[channel.channel_number.value] = ratio workspace.display_data.statistics = statistics_dict workspace.display_data.ratio = ratio_dict if cpp.get_headless(): #headless mode for channel in self.channels: image_name, channel_number = channel.cpimage_name.value, channel.channel_number.value print "--- image name: %s\tchannel: %s"%(image_name, channel_number) (header, row) = workspace.display_data.statistics[channel_number] for i in range(0, len(header)): print "\t%s: %s"%(header[i], row[i])
path = os.path.split(os.path.abspath(sys.argv[0]))[0] path = os.path.join(path, 'cellprofiler','utilities') else: path = os.path.split(__file__)[0] p = subprocess.Popen(["java","-cp", path, "findlibjvm"], stdout=subprocess.PIPE) stdout, stderr = p.communicate() jvm_dir = stdout.strip() ctypes.CDLL(os.path.join(jvm_dir, "libjvm.so")) if jvm_dir is None: from cellprofiler.preferences \ import get_report_jvm_error, set_report_jvm_error, get_headless from cellprofiler.preferences import set_has_reported_jvm_error if not get_headless(): import wx app = wx.GetApp() if app is not None and get_report_jvm_error(): dlg = wx.Dialog(wx.GetApp().GetTopWindow(), title="Java not installed properly") sizer = wx.BoxSizer(wx.VERTICAL) dlg.SetSizer(sizer) text = wx.StaticText(dlg,-1, "CellProfiler can't find Java on your computer.") text.Font = wx.Font(int(dlg.Font.GetPointSize()*5/4), dlg.Font.GetFamily(), dlg.Font.GetStyle(), wx.FONTWEIGHT_BOLD) sizer.Add(text, 0, wx.ALIGN_LEFT | wx.ALL, 5)
def start_cellprofiler_jvm(): '''Start the Java VM with arguments appropriate for CellProfiler''' global logger if hasattr(sys, 'frozen'): if sys.platform != 'darwin': root_path = os.path.split(os.path.abspath(sys.argv[0]))[0] bioformats_path = os.path.join(root_path, 'bioformats') else: bioformats_path = os.path.abspath(os.path.split(__file__)[0]) root_path = os.path.split(bioformats_path)[0] imagej_path = os.path.join(root_path, 'imagej', 'jars') def sort_fn(a, b): aa, bb = [(0 if x.startswith("cellprofiler-java") else 1, x) for x in a, b] return cmp(aa, bb) jar_files = [ jar_filename for jar_filename in os.listdir(imagej_path) if jar_filename.lower().endswith(".jar") ] jar_files = sorted(jar_files, cmp=sort_fn) else: bioformats_path = os.path.abspath(os.path.split(__file__)[0]) root_path = os.path.split(bioformats_path)[0] jar_files = get_cellprofiler_jars() imagej_path = os.path.join(root_path, 'imagej', 'jars') class_path = os.pathsep.join( [os.path.join(imagej_path, jar_file) for jar_file in jar_files]) if os.environ.has_key("CLASSPATH"): class_path += os.pathsep + os.environ["CLASSPATH"] if (get_ij_plugin_directory() is not None and os.path.isdir(get_ij_plugin_directory())): plugin_directory = get_ij_plugin_directory() # # Add the plugin directory to pick up .class files in a directory # hierarchy. # class_path += os.pathsep + plugin_directory # # Add any .jar files in the directory # class_path += os.pathsep + os.pathsep.join([ os.path.join(plugin_directory, jarfile) for jarfile in os.listdir(plugin_directory) if jarfile.lower().endswith(".jar") ]) if sys.platform.startswith("win") and not hasattr(sys, 'frozen'): # Have to find tools.jar from cellprofiler.utilities.setup import find_jdk jdk_path = find_jdk() if jdk_path is not None: tools_jar = os.path.join(jdk_path, "lib", "tools.jar") class_path += os.pathsep + tools_jar else: logger.warning("Failed to find tools.jar") jvm_arg = [ x.groups()[0] for x in [re.match('--jvm-heap-size=([0-9]+[gGkKmM])', y) for y in sys.argv] if x is not None ] if len(jvm_arg) > 0: jvm_arg = jvm_arg[0] else: jvm_arg = "512m" args = [ r"-Djava.class.path=" + class_path, r"-Dloci.bioformats.loaded=true", #r"-verbose:class", #r"-verbose:jni", r"-Xmx%s" % jvm_arg ] # # Get the log4j logger setup from a file in the bioformats directory # if such a file exists. # log4j_properties = os.path.join(bioformats_path, "log4j.properties") if os.path.exists(log4j_properties): log4j_properties = "file:/" + log4j_properties.replace( os.path.sep, "/") args += [r"-Dlog4j.configuration=" + log4j_properties] init_logger = False else: init_logger = True if get_headless(): # We're running silently, so don't change the Java preferences # The following definition uses a process-scope preferences factory args += [ "-Djava.util.prefs.PreferencesFactory=" "org.cellprofiler.headlesspreferences.HeadlessPreferencesFactory" ] run_headless = (get_headless() and not os.environ.has_key("CELLPROFILER_USE_XVFB")) run_headless = False logger.debug("JVM arguments: " + " ".join(args)) jutil.start_vm(args, run_headless) logger.debug("Java virtual machine started.") jutil.attach() try: jutil.static_call("loci/common/Location", "cacheDirectoryListings", "(Z)V", True) except: logger.warning( "Bioformats version does not support directory cacheing") # # Start the log4j logger to avoid error messages. # if init_logger: try: jutil.static_call("org/apache/log4j/BasicConfigurator", "configure", "()V") log4j_logger = jutil.static_call("org/apache/log4j/Logger", "getRootLogger", "()Lorg/apache/log4j/Logger;") warn_level = jutil.get_static_field("org/apache/log4j/Level", "WARN", "Lorg/apache/log4j/Level;") jutil.call(log4j_logger, "setLevel", "(Lorg/apache/log4j/Level;)V", warn_level) del logger del warn_level except: logger.error("Failed to initialize log4j\n", exc_info=True) if not get_headless(): jutil.activate_awt()
def run(self, workspace): '''Run the module. Add the measurements. ''' statistics_dict = {} ratio_dict = {} for channel in self.channels: provider = workspace.image_set.get_image_provider( channel.cpimage_name.value) assert isinstance(provider, OmeroImageProvider) name = provider.get_name() omero_image_name = provider.get_omero_image_name() omero_image_id = provider.get_image_id() pixels_id = provider.get_pixels_id() z = provider.get_z() c = provider.get_c() t = provider.get_t() header = [] row = [] ratio = [] m = workspace.measurements measurements = () if self.omero_object == MS_DATASET: measurements += ((M_DATASET_NAME, self.dataset_name, 3.0), (M_DATASET_ID, self.omero_object_id.value, 1.0)) elif self.omero_object == MS_PLATE: #CellProfiler starts counting image sets from 1 well = self.wells[workspace.measurements.image_set_number - 1] well_row = well.getRow().getValue() well_column = well.getColumn().getValue() well_id = well.getId().getValue() measurements += ((M_PLATE_NAME, self.plate_name, 3.0), (M_PLATE_ID, self.omero_object_id.value, 1.0), (M_WELL_ROW, well_row, 1.0), (M_WELL_COLUMN, well_column, 1.0), (M_WELL_ID, well_id, 3.0)) measurements += ((M_IMAGE_NAME, omero_image_name, 3.0), (M_IMAGE_ID, omero_image_id, 1.0), (M_PIXELS_ID, pixels_id, 1.0), (M_Z, z, 0.5), (M_C, c, 0.5), (M_T, t, 0.5)) for tag, value, r in measurements: m.add_image_measurement("_".join((tag, name)), value) header.append(tag) row.append(value) ratio.append(r) statistics = [header, row] ratio = [x / sum(ratio) for x in ratio] statistics_dict[channel.channel_number.value] = statistics ratio_dict[channel.channel_number.value] = ratio workspace.display_data.statistics = statistics_dict workspace.display_data.ratio = ratio_dict if cpp.get_headless(): #headless mode for channel in self.channels: image_name, channel_number = channel.cpimage_name.value, channel.channel_number.value print "--- image name: %s\tchannel: %s" % (image_name, channel_number) (header, row) = workspace.display_data.statistics[channel_number] for i in range(0, len(header)): print "\t%s: %s" % (header[i], row[i])
def start_cellprofiler_jvm(): '''Start the Java VM with arguments appropriate for CellProfiler''' global logger if hasattr(sys, 'frozen'): if sys.platform != 'darwin': root_path = os.path.split(os.path.abspath(sys.argv[0]))[0] bioformats_path = os.path.join(root_path, 'bioformats') else: bioformats_path = os.path.abspath(os.path.split(__file__)[0]) root_path = os.path.split(bioformats_path)[0] imagej_path = os.path.join(root_path, 'imagej','jars') def sort_fn(a, b): aa,bb = [(0 if x.startswith("cellprofiler-java") else 1, x) for x in a, b] return cmp(aa, bb) jar_files = [ jar_filename for jar_filename in os.listdir(imagej_path) if jar_filename.lower().endswith(".jar")] jar_files = sorted(jar_files, cmp = sort_fn) else: bioformats_path = os.path.abspath(os.path.split(__file__)[0]) root_path = os.path.split(bioformats_path)[0] jar_files = get_cellprofiler_jars() imagej_path = os.path.join(root_path, 'imagej','jars') class_path = os.pathsep.join( [os.path.join(imagej_path, jar_file) for jar_file in jar_files]) if os.environ.has_key("CLASSPATH"): class_path += os.pathsep + os.environ["CLASSPATH"] plugin_directory = get_ij_plugin_directory() logger.debug("Using %s as imagej plugin directory" % plugin_directory) if (plugin_directory is not None and os.path.isdir(plugin_directory)): # # Add the plugin directory to pick up .class files in a directory # hierarchy. # class_path += os.pathsep + plugin_directory logger.debug("Adding %s to class path" % plugin_directory) # # Add any .jar files in the directory # for jarfile in os.listdir(plugin_directory): jarpath = os.path.join(plugin_directory, jarfile) if jarfile.lower().endswith(".jar"): logger.debug("Adding %s to class path" % jarpath) class_path += os.pathsep + jarpath else: logger.debug("Skipping %s" % jarpath) else: logger.info("Plugin directory doesn't point to valid folder: " + plugin_directory) if sys.platform.startswith("win") and not hasattr(sys, 'frozen'): # Have to find tools.jar from cellprofiler.utilities.setup import find_jdk jdk_path = find_jdk() if jdk_path is not None: tools_jar = os.path.join(jdk_path, "lib","tools.jar") class_path += os.pathsep + tools_jar else: logger.warning("Failed to find tools.jar") jvm_arg = jutil.get_jvm_heap_size_arg() if jvm_arg is None: jvm_arg = "512m" args = [r"-Djava.class.path="+class_path, r"-Dloci.bioformats.loaded=true", #r"-verbose:class", #r"-verbose:jni", r"-Xmx%s" % jvm_arg] # # Get the log4j logger setup from a file in the bioformats directory # if such a file exists. # log4j_properties = os.path.join(bioformats_path, "log4j.properties") if os.path.exists(log4j_properties): log4j_properties = "file:/"+log4j_properties.replace(os.path.sep, "/") args += [r"-Dlog4j.configuration="+log4j_properties] init_logger = False else: init_logger = True if plugin_directory is not None and os.path.isdir(plugin_directory): # For IJ1 compatibility args += [r"-Dplugins.dir=%s" % plugin_directory] # In headless mode, we have to avoid changing the Java preferences. # # Aside from that, we need to prevent ImageJ from exiting and from # displaying the updater dialog - at least temporarily, we do that # through preferences. We use the HeadlessPreferencesFactory to # limit the scope of the changes to this process - otherwise we'd # turn off updating for the machine. # # TODO: ImageJ is implementing a pluggable mechanism to control the # quit process. We can also contribute a pluggable mechanism # that gives us more control over the updater. # args += [ "-Djava.util.prefs.PreferencesFactory=" "org.cellprofiler.headlesspreferences.HeadlessPreferencesFactory"] run_headless = (get_headless() and not os.environ.has_key("CELLPROFILER_USE_XVFB")) run_headless = False logger.debug("JVM arguments: " + " ".join(args)) jutil.start_vm(args, run_headless) logger.debug("Java virtual machine started.") jutil.attach() try: jutil.static_call("loci/common/Location", "cacheDirectoryListings", "(Z)V", True) except: logger.warning("Bioformats version does not support directory cacheing") # # Start the log4j logger to avoid error messages. # if init_logger: try: jutil.static_call("org/apache/log4j/BasicConfigurator", "configure", "()V") log4j_logger = jutil.static_call("org/apache/log4j/Logger", "getRootLogger", "()Lorg/apache/log4j/Logger;") warn_level = jutil.get_static_field("org/apache/log4j/Level","WARN", "Lorg/apache/log4j/Level;") jutil.call(log4j_logger, "setLevel", "(Lorg/apache/log4j/Level;)V", warn_level) del logger del warn_level except: logger.error("Failed to initialize log4j\n", exc_info=True) if not get_headless(): jutil.activate_awt()
if get_ij_plugin_directory() is not None: __args.append("-Dplugins.dir="+get_ij_plugin_directory()) # # Get the log4j logger setup from a file in the bioformats directory # if such a file exists. # __log4j_properties = os.path.join(__path, "log4j.properties") if os.path.exists(__log4j_properties): __log4j_properties = "file:/"+__log4j_properties.replace(os.path.sep, "/") __args += [r"-Dlog4j.configuration="+__log4j_properties] __init_logger = False else: __init_logger = True if ((get_headless() and not os.environ.has_key("CELLPROFILER_USE_XVFB")) or sys.platform=="darwin"): __args += [ r"-Djava.awt.headless=true" ] logger.debug("JVM arguments: " + " ".join(__args)) jutil.start_vm(__args) logger.debug("Java virtual machine started.") jutil.attach() try: jutil.static_call("loci/common/Location", "cacheDirectoryListings", "(Z)V", True) except: logger.warning("Bioformats version does not support directory cacheing") finally: jutil.detach()
path = os.path.split(os.path.abspath(sys.argv[0]))[0] path = os.path.join(path, 'cellprofiler', 'utilities') else: path = os.path.split(__file__)[0] p = subprocess.Popen(["java", "-cp", path, "findlibjvm"], stdout=subprocess.PIPE) stdout, stderr = p.communicate() jvm_dir = stdout.strip() ctypes.CDLL(os.path.join(jvm_dir, "libjvm.so")) if jvm_dir is None: from cellprofiler.preferences \ import get_report_jvm_error, set_report_jvm_error, get_headless from cellprofiler.preferences import set_has_reported_jvm_error if not get_headless(): import wx app = wx.GetApp() if app is not None and get_report_jvm_error(): dlg = wx.Dialog(wx.GetApp().GetTopWindow(), title="Java not installed properly") sizer = wx.BoxSizer(wx.VERTICAL) dlg.SetSizer(sizer) text = wx.StaticText( dlg, -1, "CellProfiler can't find Java on your computer.") text.Font = wx.Font(int(dlg.Font.GetPointSize() * 5 / 4), dlg.Font.GetFamily(), dlg.Font.GetStyle(), wx.FONTWEIGHT_BOLD) sizer.Add(text, 0, wx.ALIGN_LEFT | wx.ALL, 5) if java_home is None or sys.platform == "darwin":
def start_cellprofiler_jvm(): '''Start the Java VM with arguments appropriate for CellProfiler''' global USE_IJ2 global logger if hasattr(sys, 'frozen') and sys.platform != 'darwin': root_path = os.path.split(os.path.abspath(sys.argv[0]))[0] else: root_path = os.path.abspath(os.path.split(__file__)[0]) root_path = os.path.split(root_path)[0] path = os.path.join(root_path, 'bioformats') imagej_path = os.path.join(root_path, 'imagej') loci_jar = os.path.join(path, "loci_tools.jar") ij2_jar = os.path.join(imagej_path, "imagej-2.0-SNAPSHOT-all.jar") ij_jar = os.path.join(imagej_path, "ij.jar") imglib_jar = os.path.join(imagej_path, "imglib.jar") javacl_jar = os.path.join(imagej_path, "javacl-1.0-beta-4-shaded.jar") USE_IJ2 = get_ij_version() == IJ_2 if os.path.exists(ij2_jar) and USE_IJ2: class_path = os.pathsep.join((loci_jar, ij2_jar)) USE_IJ2 = True else: USE_IJ2 = False class_path = os.pathsep.join((loci_jar, ij_jar, imglib_jar, javacl_jar)) if os.environ.has_key("CLASSPATH"): class_path += os.pathsep + os.environ["CLASSPATH"] if sys.platform.startswith("win") and not hasattr(sys, 'frozen'): # Have to find tools.jar from cellprofiler.utilities.setup import find_jdk jdk_path = find_jdk() if jdk_path is not None: tools_jar = os.path.join(jdk_path, "lib","tools.jar") class_path += os.pathsep + tools_jar else: logger.warning("Failed to find tools.jar") jvm_arg = [x.groups()[0] for x in [ re.match('--jvm-heap-size=([0-9]+[gGkKmM])', y) for y in sys.argv] if x is not None] if len(jvm_arg) > 0: jvm_arg = jvm_arg[0] else: jvm_arg = "512m" args = [r"-Djava.class.path="+class_path, r"-Dloci.bioformats.loaded=true", #r"-verbose:class", #r"-verbose:jni", r"-Xmx%s" % jvm_arg] if get_ij_plugin_directory() is not None: args.append("-Dplugins.dir="+get_ij_plugin_directory()) # # Get the log4j logger setup from a file in the bioformats directory # if such a file exists. # log4j_properties = os.path.join(path, "log4j.properties") if os.path.exists(log4j_properties): log4j_properties = "file:/"+log4j_properties.replace(os.path.sep, "/") args += [r"-Dlog4j.configuration="+log4j_properties] init_logger = False else: init_logger = True run_headless = (get_headless() and not os.environ.has_key("CELLPROFILER_USE_XVFB")) logger.debug("JVM arguments: " + " ".join(args)) jutil.start_vm(args, run_headless) logger.debug("Java virtual machine started.") jutil.attach() try: jutil.static_call("loci/common/Location", "cacheDirectoryListings", "(Z)V", True) except: logger.warning("Bioformats version does not support directory cacheing") # # Start the log4j logger to avoid error messages. # if init_logger: try: jutil.static_call("org/apache/log4j/BasicConfigurator", "configure", "()V") log4j_logger = jutil.static_call("org/apache/log4j/Logger", "getRootLogger", "()Lorg/apache/log4j/Logger;") warn_level = jutil.get_static_field("org/apache/log4j/Level","WARN", "Lorg/apache/log4j/Level;") jutil.call(log4j_logger, "setLevel", "(Lorg/apache/log4j/Level;)V", warn_level) del logger del warn_level except: logger.error("Failed to initialize log4j\n", exc_info=True) if not run_headless: jutil.activate_awt()
__args.append("-Dplugins.dir=" + get_ij_plugin_directory()) # # Get the log4j logger setup from a file in the bioformats directory # if such a file exists. # __log4j_properties = os.path.join(__path, "log4j.properties") if os.path.exists(__log4j_properties): __log4j_properties = "file:/" + __log4j_properties.replace( os.path.sep, "/") __args += [r"-Dlog4j.configuration=" + __log4j_properties] __init_logger = False else: __init_logger = True if ((get_headless() and not os.environ.has_key("CELLPROFILER_USE_XVFB")) or sys.platform == "darwin"): __args += [r"-Djava.awt.headless=true"] logger.debug("JVM arguments: " + " ".join(__args)) jutil.start_vm(__args) logger.debug("Java virtual machine started.") jutil.attach() try: jutil.static_call("loci/common/Location", "cacheDirectoryListings", "(Z)V", True) except: logger.warning("Bioformats version does not support directory cacheing") finally: jutil.detach()
def start_cellprofiler_jvm(): '''Start the Java VM with arguments appropriate for CellProfiler''' global logger if hasattr(sys, 'frozen'): if sys.platform != 'darwin': root_path = os.path.split(os.path.abspath(sys.argv[0]))[0] bioformats_path = os.path.join(root_path, 'bioformats') else: bioformats_path = os.path.abspath(os.path.split(__file__)[0]) root_path = os.path.split(bioformats_path)[0] imagej_path = os.path.join(root_path, 'imagej', 'jars') def sort_fn(a, b): aa, bb = [(0 if x.startswith("cellprofiler-java") else 1, x) for x in a, b] return cmp(aa, bb) jar_files = [ jar_filename for jar_filename in os.listdir(imagej_path) if jar_filename.lower().endswith(".jar") ] jar_files = sorted(jar_files, cmp=sort_fn) else: bioformats_path = os.path.abspath(os.path.split(__file__)[0]) root_path = os.path.split(bioformats_path)[0] jar_files = get_cellprofiler_jars() imagej_path = os.path.join(root_path, 'imagej', 'jars') class_path = os.pathsep.join( [os.path.join(imagej_path, jar_file) for jar_file in jar_files]) if os.environ.has_key("CLASSPATH"): class_path += os.pathsep + os.environ["CLASSPATH"] plugin_directory = get_ij_plugin_directory() logger.debug("Using %s as imagej plugin directory" % plugin_directory) if (plugin_directory is not None and os.path.isdir(plugin_directory)): # # Add the plugin directory to pick up .class files in a directory # hierarchy. # class_path += os.pathsep + plugin_directory logger.debug("Adding %s to class path" % plugin_directory) # # Add any .jar files in the directory # for jarfile in os.listdir(plugin_directory): jarpath = os.path.join(plugin_directory, jarfile) if jarfile.lower().endswith(".jar"): logger.debug("Adding %s to class path" % jarpath) class_path += os.pathsep + jarpath else: logger.debug("Skipping %s" % jarpath) else: logger.info("Plugin directory doesn't point to valid folder: " + plugin_directory) if sys.platform.startswith("win") and not hasattr(sys, 'frozen'): # Have to find tools.jar from cellprofiler.utilities.setup import find_jdk jdk_path = find_jdk() if jdk_path is not None: tools_jar = os.path.join(jdk_path, "lib", "tools.jar") class_path += os.pathsep + tools_jar else: logger.warning("Failed to find tools.jar") jvm_arg = jutil.get_jvm_heap_size_arg() if jvm_arg is None: jvm_arg = "512m" args = [ r"-Djava.class.path=" + class_path, r"-Dloci.bioformats.loaded=true", #r"-verbose:class", #r"-verbose:jni", r"-Xmx%s" % jvm_arg ] # # Get the log4j logger setup from a file in the bioformats directory # if such a file exists. # log4j_properties = os.path.join(bioformats_path, "log4j.properties") if os.path.exists(log4j_properties): log4j_properties = "file:/" + log4j_properties.replace( os.path.sep, "/") args += [r"-Dlog4j.configuration=" + log4j_properties] init_logger = False else: init_logger = True if plugin_directory is not None and os.path.isdir(plugin_directory): # For IJ1 compatibility args += [r"-Dplugins.dir=%s" % plugin_directory] # In headless mode, we have to avoid changing the Java preferences. # # Aside from that, we need to prevent ImageJ from exiting and from # displaying the updater dialog - at least temporarily, we do that # through preferences. We use the HeadlessPreferencesFactory to # limit the scope of the changes to this process - otherwise we'd # turn off updating for the machine. # # TODO: ImageJ is implementing a pluggable mechanism to control the # quit process. We can also contribute a pluggable mechanism # that gives us more control over the updater. # args += [ "-Djava.util.prefs.PreferencesFactory=" "org.cellprofiler.headlesspreferences.HeadlessPreferencesFactory" ] run_headless = (get_headless() and not os.environ.has_key("CELLPROFILER_USE_XVFB")) run_headless = False logger.debug("JVM arguments: " + " ".join(args)) jutil.start_vm(args, run_headless) logger.debug("Java virtual machine started.") jutil.attach() try: jutil.static_call("loci/common/Location", "cacheDirectoryListings", "(Z)V", True) except: logger.warning( "Bioformats version does not support directory cacheing") # # Start the log4j logger to avoid error messages. # if init_logger: try: jutil.static_call("org/apache/log4j/BasicConfigurator", "configure", "()V") log4j_logger = jutil.static_call("org/apache/log4j/Logger", "getRootLogger", "()Lorg/apache/log4j/Logger;") warn_level = jutil.get_static_field("org/apache/log4j/Level", "WARN", "Lorg/apache/log4j/Level;") jutil.call(log4j_logger, "setLevel", "(Lorg/apache/log4j/Level;)V", warn_level) del logger del warn_level except: logger.error("Failed to initialize log4j\n", exc_info=True) if not get_headless(): jutil.activate_awt()