Example #1
0
    def run(self, workspace):
        '''Run the imageJ command'''
        import imagej.ijbridge as ijbridge

        J.attach()
        ijb = None
        try:
            ijb = ijbridge.get_ij_bridge()

            image_set = workspace.image_set
            assert isinstance(image_set, cpi.ImageSet)
            d = self.get_dictionary(workspace.image_set_list)
            if self.wants_to_set_current_image:
                input_image_name = self.current_input_image_name.value
                img = image_set.get_image(input_image_name,
                                          must_be_grayscale=True)
            else:
                img = None

            #
            # Run a command or macro on the first image of the set
            #
            if d[D_FIRST_IMAGE_SET] == image_set.number + 1:
                self.do_imagej(ijb, workspace, D_FIRST_IMAGE_SET)
            #
            # Install the input image as the current image
            #
            if img is not None:
                ijb.inject_image(img.pixel_data, input_image_name)

            self.do_imagej(ijb, workspace)
            #
            # Get the output image
            #
            if self.wants_to_get_current_image:
                output_image_name = self.current_output_image_name.value
                pixel_data = ijb.get_current_image()
                image = cpi.Image(pixel_data)
                image_set.add(output_image_name, image)
            #
            # Execute the post-group macro or command
            #
            if d[D_LAST_IMAGE_SET] == image_set.number + 1:
                self.do_imagej(ijb, workspace, D_LAST_IMAGE_SET)
                #
                # Save the current ImageJ image after executing the post-group
                # command or macro
                #
                if (self.post_group_choice != CM_NOTHING
                        and self.wants_post_group_image):
                    output_image_name = self.post_group_output_image.value
                    pixel_data = ijb.get_current_image()
                    image = cpi.Image(pixel_data)
                    image_set.add(output_image_name, image)
            if self.is_interactive():
                self.display(workspace)
        finally:
            if ijb is not None:
                del (ijb)
            J.detach()
Example #2
0
 def run(self, workspace):
     '''Run the imageJ command'''
     import imagej.ijbridge as ijbridge
     
     J.attach()
     ijb = None
     try:
         ijb = ijbridge.get_ij_bridge()
         
         image_set = workspace.image_set
         assert isinstance(image_set, cpi.ImageSet)
         d = self.get_dictionary(workspace.image_set_list)
         if self.wants_to_set_current_image:
             input_image_name = self.current_input_image_name.value
             img = image_set.get_image(input_image_name,
                                       must_be_grayscale = True)
         else:
             img = None
         
         #
         # Run a command or macro on the first image of the set
         #
         if d[D_FIRST_IMAGE_SET] == image_set.number + 1:
             self.do_imagej(ijb, workspace, D_FIRST_IMAGE_SET)
         #
         # Install the input image as the current image
         #
         if img is not None:
             ijb.inject_image(img.pixel_data, input_image_name)
 
         self.do_imagej(ijb, workspace)
         #
         # Get the output image
         #
         if self.wants_to_get_current_image:
             output_image_name = self.current_output_image_name.value
             pixel_data = ijb.get_current_image()
             image = cpi.Image(pixel_data)
             image_set.add(output_image_name, image)
         #
         # Execute the post-group macro or command
         #
         if d[D_LAST_IMAGE_SET] == image_set.number + 1:
             self.do_imagej(ijb, workspace, D_LAST_IMAGE_SET)
             #
             # Save the current ImageJ image after executing the post-group
             # command or macro
             #
             if (self.post_group_choice != CM_NOTHING and
                 self.wants_post_group_image):
                 output_image_name = self.post_group_output_image.value
                 pixel_data = ijb.get_current_image()
                 image = cpi.Image(pixel_data)
                 image_set.add(output_image_name, image)
         if self.is_interactive():
             self.display(workspace)
     finally:
         if ijb is not None:
             del(ijb)
         J.detach()
 def exit_thread(self):
     from bioformats.formatreader import clear_image_reader_cache
     self.notify_socket.close()
     clear_image_reader_cache()
     J.deactivate_awt()
     J.detach()
     if self.with_stop_run_loop:
         stop_run_loop()
 def exit_thread(self):
     from bioformats.formatreader import clear_image_reader_cache
     self.notify_socket.close()
     clear_image_reader_cache()
     J.deactivate_awt()
     J.detach()
     if self.with_stop_run_loop:
         stop_run_loop()
    def run(self, workspace):
        jb.attach()#initialize JVM
        
        input_image_name = self.input_image_name.value
        output_image_name = self.output_image_name.value
        self.gsize = self.input_gaussian_filter.value
        self.rsize = self.input_rolling_ball.value
        self.lowseed = self.input_low_seed.value
        self.highseed = self.input_high_seed.value
        
        image_set = workspace.image_set
        assert isinstance(image_set, cpi.ImageSet)
        
        #prepare input image        
        input_image = image_set.get_image(input_image_name, must_be_grayscale = True)        
        input_pixels = input_image.pixel_data
        ij_processor = ijiproc.make_image_processor((input_pixels*255.0).astype('float32'))
        #JavaScript API
        script = """       
        var img=Packages.ij.ImagePlus(name,ij_processor);
        Packages.ij.IJ.run(img, "8-bit", "");     
	var macro="g_size="+gsize+" r_size="+rsize+" low_edge="+lowseed+" high_edge="+highseed+" noise=-1";
        java.lang.System.out.println(macro);        
        Packages.ij.IJ.run(img, "Hysteresis Segment", macro);
        var output_proc=img.getProcessor();
        """
        #img.show();
        #Packages.ij.WindowManager.setCurrentWindow(img.getWindow());
        #"""
        in_params={
                   "name":output_image_name,
                   "ij_processor": ij_processor,
                   "gsize":self.gsize,
                   "rsize":self.rsize,
                   "lowseed":self.lowseed,
                   "highseed":self.highseed}
        out_params={"output_proc":None}
        r = jb.run_script(script, bindings_in = in_params,bindings_out = out_params)
        
        #prepare output image
        output_pixels = ijiproc.get_image(out_params["output_proc"], False)
        output_image = cpi.Image(output_pixels, parent_image = input_image)
        
        #write output
        image_set.add(output_image_name, output_image)
        
        if workspace.frame is not None:
            workspace.display_data.input_pixels = input_pixels
            workspace.display_data.output_pixels = output_pixels
        jb.detach()#close JVM
Example #6
0
if ((get_headless() and not os.environ.has_key("CELLPROFILER_USE_XVFB"))
    or sys.platform=="darwin"):
    __args += [ r"-Djava.awt.headless=true" ]

logger.debug("JVM arguments: " + " ".join(__args))
jutil.start_vm(__args)
logger.debug("Java virtual machine started.")
jutil.attach()
try:
    jutil.static_call("loci/common/Location",
                      "cacheDirectoryListings",
                      "(Z)V", True)
except:
    logger.warning("Bioformats version does not support directory cacheing")
finally:
    jutil.detach()
    
# if get_headless() or sys.platform=="darwin":
#     jutil.attach()
#     jutil.static_call("java/lang/System", "setProperty", '(Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String;', "java.awt.headless", "true")
#     jutil.detach()

#
# Start the log4j logger to avoid error messages.
#
if __init_logger:
    jutil.attach()
    try:
        jutil.static_call("org/apache/log4j/BasicConfigurator",
                          "configure", "()V")
        log4j_logger = jutil.static_call("org/apache/log4j/Logger",
Example #7
0
 def run(my_integer=my_integer):
     env = J.attach()
     self.assertEqual(my_integer.intValue(), my_value)
     del my_integer
     J.detach()
Example #8
0
 def __del__(self):
     '''call del on this object to detach from javabridge. If the object is
     declared locally the javabridge will be detached once the program leaves 
     it's scope'''
     J.detach()
Example #9
0
 def tearDown(self):
     J.detach()
Example #10
0
 def tearDown(self):
     J.detach()
 def exit_thread(self):
     self.notify_socket.close()
     J.deactivate_awt()
     J.detach()
     if self.with_stop_run_loop:
         stop_run_loop()
Example #12
0
    def interface(self,
                  start_signal,
                  image_set_start=1,
                  image_set_end=None,
                  overwrite=True):
        '''Top-half thread for running an analysis.  Sets up grouping for jobs,
        deals with returned measurements, reports status periodically.

        start_signal- signal this semaphore when jobs are ready.
        image_set_start - beginning image set number to process
        image_set_end - last image set number to process
        overwrite - whether to recompute imagesets that already have data in initial_measurements.
        '''
        from cellprofiler.utilities.jutil import attach, detach
        posted_analysis_started = False
        acknowledged_thread_start = False
        measurements = None
        workspace = None
        attach()
        try:
            # listen for pipeline events, and pass them upstream
            self.pipeline.add_listener(lambda pipe, evt: self.post_event(evt))

            initial_measurements = None
            if self.output_path is None:
                # Caller wants a temporary measurements file.
                fd, filename = tempfile.mkstemp(".h5")
                try:
                    fd = os.fdopen(fd, "wb")
                    fd.write(self.initial_measurements_buf)
                    fd.close()
                    initial_measurements = cpmeas.Measurements(
                        filename=filename, mode="r")
                    measurements = cpmeas.Measurements(
                        image_set_start=None,
                        copy=initial_measurements,
                        mode="a")
                finally:
                    if initial_measurements is not None:
                        initial_measurements.close()
                    os.unlink(filename)
            else:
                with open(self.output_path, "wb") as fd:
                    fd.write(self.initial_measurements_buf)
                measurements = cpmeas.Measurements(image_set_start=None,
                                                   filename=self.output_path,
                                                   mode="a")
            # The shared dicts are needed in jobserver()
            self.shared_dicts = [
                m.get_dictionary() for m in self.pipeline.modules()
            ]
            workspace = cpw.Workspace(self.pipeline, None, None, None,
                                      measurements, cpimage.ImageSetList())

            if image_set_end is None:
                image_set_end = measurements.get_image_numbers()[-1]
            image_sets_to_process = filter(
                lambda x: x >= image_set_start and x <= image_set_end,
                measurements.get_image_numbers())

            self.post_event(AnalysisStarted())
            posted_analysis_started = True

            # reset the status of every image set that needs to be processed
            has_groups = measurements.has_groups()
            if self.pipeline.requires_aggregation():
                overwrite = True
            if has_groups and not overwrite:
                if not measurements.has_feature(cpmeas.IMAGE, self.STATUS):
                    overwrite = True
                else:
                    group_status = {}
                    for image_number in measurements.get_image_numbers():
                        group_number = measurements[cpmeas.IMAGE,
                                                    cpmeas.GROUP_NUMBER,
                                                    image_number]
                        status = measurements[cpmeas.IMAGE, self.STATUS,
                                              image_number]
                        if status != self.STATUS_DONE:
                            group_status[
                                group_number] = self.STATUS_UNPROCESSED
                        elif group_number not in group_status:
                            group_status[group_number] = self.STATUS_DONE

            new_image_sets_to_process = []
            for image_set_number in image_sets_to_process:
                needs_reset = False
                if (overwrite or (not measurements.has_measurements(
                        cpmeas.IMAGE, self.STATUS, image_set_number)) or
                    (measurements[cpmeas.IMAGE, self.STATUS, image_set_number]
                     != self.STATUS_DONE)):
                    needs_reset = True
                elif has_groups:
                    group_number = measurements[cpmeas.IMAGE,
                                                cpmeas.GROUP_NUMBER,
                                                image_set_number]
                    if group_status[group_number] != self.STATUS_DONE:
                        needs_reset = True
                if needs_reset:
                    measurements[cpmeas.IMAGE, self.STATUS, image_set_number] =\
                        self.STATUS_UNPROCESSED
                    new_image_sets_to_process.append(image_set_number)
            image_sets_to_process = new_image_sets_to_process

            # Find image groups.  These are written into measurements prior to
            # analysis.  Groups are processed as a single job.
            if has_groups or self.pipeline.requires_aggregation():
                worker_runs_post_group = True
                job_groups = {}
                for image_set_number in image_sets_to_process:
                    group_number = measurements[cpmeas.IMAGE,
                                                cpmeas.GROUP_NUMBER,
                                                image_set_number]
                    group_index = measurements[cpmeas.IMAGE,
                                               cpmeas.GROUP_INDEX,
                                               image_set_number]
                    job_groups[group_number] = job_groups.get(
                        group_number, []) + [(group_index, image_set_number)]
                job_groups = [[
                    isn for _, isn in sorted(job_groups[group_number])
                ] for group_number in sorted(job_groups)]
            else:
                worker_runs_post_group = False  # prepare_group will be run in worker, but post_group is below.
                job_groups = [[image_set_number]
                              for image_set_number in image_sets_to_process]

            # XXX - check that any constructed groups are complete, i.e.,
            # image_set_start and image_set_end shouldn't carve them up.

            if not worker_runs_post_group:
                # put the first job in the queue, then wait for the first image to
                # finish (see the check of self.finish_queue below) to post the rest.
                # This ensures that any shared data from the first imageset is
                # available to later imagesets.
                self.work_queue.put(
                    (job_groups[0], worker_runs_post_group, True))
                waiting_for_first_imageset = True
                del job_groups[0]
            else:
                waiting_for_first_imageset = False
                for job in job_groups:
                    self.work_queue.put((job, worker_runs_post_group, False))
                job_groups = []
            start_signal.release()
            acknowledged_thread_start = True

            # We loop until every image is completed, or an outside event breaks the loop.
            while not self.cancelled:

                # gather measurements
                while not self.received_measurements_queue.empty():
                    image_numbers, buf = self.received_measurements_queue.get()
                    image_numbers = [int(i) for i in image_numbers]
                    recd_measurements = cpmeas.load_measurements_from_buffer(
                        buf)
                    self.copy_recieved_measurements(recd_measurements,
                                                    measurements,
                                                    image_numbers)
                    recd_measurements.close()
                    del recd_measurements

                # check for jobs in progress
                while not self.in_process_queue.empty():
                    image_set_numbers = self.in_process_queue.get()
                    for image_set_number in image_set_numbers:
                        measurements[
                            cpmeas.IMAGE, self.STATUS,
                            int(image_set_number)] = self.STATUS_IN_PROCESS

                # check for finished jobs that haven't returned measurements, yet
                while not self.finished_queue.empty():
                    finished_req = self.finished_queue.get()
                    measurements[cpmeas.IMAGE, self.STATUS,
                                 int(finished_req.image_set_number
                                     )] = self.STATUS_FINISHED_WAITING
                    if waiting_for_first_imageset:
                        assert isinstance(finished_req,
                                          ImageSetSuccessWithDictionary)
                        self.shared_dicts = finished_req.shared_dicts
                        waiting_for_first_imageset = False
                        assert len(self.shared_dicts) == len(
                            self.pipeline.modules())
                        # if we had jobs waiting for the first image set to finish,
                        # queue them now that the shared state is available.
                        for job in job_groups:
                            self.work_queue.put(
                                (job, worker_runs_post_group, False))
                    finished_req.reply(Ack())

                # check progress and report
                counts = collections.Counter(
                    measurements[cpmeas.IMAGE, self.STATUS, image_set_number]
                    for image_set_number in image_sets_to_process)
                self.post_event(AnalysisProgress(counts))

                # Are we finished?
                if counts[self.STATUS_DONE] == len(image_sets_to_process):
                    last_image_number = measurements.get_image_numbers()[-1]
                    measurements.image_set_number = last_image_number
                    if not worker_runs_post_group:
                        self.pipeline.post_group(workspace, {})

                    workspace = cpw.Workspace(self.pipeline, None, None, None,
                                              measurements, None, None)
                    workspace.post_run_display_handler = \
                        self.post_run_display_handler
                    self.pipeline.post_run(workspace)
                    break

                measurements.flush()
                # not done, wait for more work
                with self.interface_work_cv:
                    while (self.paused
                           or ((not self.cancelled)
                               and self.in_process_queue.empty()
                               and self.finished_queue.empty()
                               and self.received_measurements_queue.empty())):
                        self.interface_work_cv.wait(
                        )  # wait for a change of status or work to arrive
        finally:
            detach()
            # Note - the measurements file is owned by the queue consumer
            #        after this post_event.
            #
            if not acknowledged_thread_start:
                start_signal.release()
            if posted_analysis_started:
                was_cancelled = self.cancelled
                self.post_event(AnalysisFinished(measurements, was_cancelled))
            self.stop_workers()
        self.analysis_id = False  # this will cause the jobserver thread to exit
Example #13
0
    
    print 'big endian:', meta.getPixelsBigEndian(0, 0)
    print 'dim order:', meta.getPixelsDimensionOrder(0, 0)
    print 'pixel type:', meta.getPixelsPixelType(0, 0)
    print 'size x:', meta.getPixelsSizeX(0, 0)
    print 'size y:', meta.getPixelsSizeY(0, 0)
    print 'size c:', meta.getPixelsSizeC(0, 0)
    print 'size z:', meta.getPixelsSizeZ(0, 0)
    print 'size t:', meta.getPixelsSizeT(0, 0)
    print 'samples per pixel:', meta.getLogicalChannelSamplesPerPixel(0, 0)

    writer.setMetadataRetrieve(meta)
    writer.setId(out_file)
    for image in images:
        if len(image.shape)==3 and image.shape[2] == 3:  
            save_im = np.array([image[:,:,0], image[:,:,1], image[:,:,2]]).astype(np.uint8).flatten()
        else:
            save_im = image.astype(np.uint8).flatten()
        writer.saveBytes(env.make_byte_array(save_im), (image is images[-1]))
    writer.close()
    
    print 'Done writing image :)'
#    import PIL.Image as Image
#    im = Image.open(out_file, 'r')
#    im.show()
    
    jutil.detach()
    app.MainLoop()
    
    
Example #14
0
 def exit_thread(self):
     self.notify_socket.close()
     J.deactivate_awt()
     J.detach()
     if self.with_stop_run_loop:
         stop_run_loop()
Example #15
0
    def interface(self, 
                  start_signal,
                  image_set_start=1, 
                  image_set_end=None,
                  overwrite=True):
        '''Top-half thread for running an analysis.  Sets up grouping for jobs,
        deals with returned measurements, reports status periodically.

        start_signal- signal this semaphore when jobs are ready.
        image_set_start - beginning image set number to process
        image_set_end - last image set number to process
        overwrite - whether to recompute imagesets that already have data in initial_measurements.
        '''
        from cellprofiler.utilities.jutil import attach, detach
        posted_analysis_started = False
        acknowledged_thread_start = False
        measurements = None
        workspace = None
        attach()
        try:
            # listen for pipeline events, and pass them upstream
            self.pipeline.add_listener(lambda pipe, evt: self.post_event(evt))
            
            initial_measurements = None
            if self.output_path is None:
                # Caller wants a temporary measurements file.
                fd, filename = tempfile.mkstemp(".h5")
                try:
                    fd = os.fdopen(fd, "wb")
                    fd.write(self.initial_measurements_buf)
                    fd.close()
                    initial_measurements = cpmeas.Measurements(
                        filename=filename, mode="r")
                    measurements = cpmeas.Measurements(
                        image_set_start = None,
                        copy = initial_measurements,
                        mode = "a")
                finally:
                    if initial_measurements is not None:
                        initial_measurements.close()
                    os.unlink(filename)
            else:
                with open(self.output_path, "wb") as fd:
                    fd.write(self.initial_measurements_buf)
                measurements = cpmeas.Measurements(image_set_start=None,
                                                   filename=self.output_path,
                                                   mode="a")
            # The shared dicts are needed in jobserver()
            self.shared_dicts = [m.get_dictionary() for m in self.pipeline.modules()]
            workspace = cpw.Workspace(self.pipeline, None, None, None,
                                      measurements, cpimage.ImageSetList())
    
            if image_set_end is None:
                image_set_end = measurements.get_image_numbers()[-1]
            image_sets_to_process = filter(
                lambda x: x >= image_set_start and x <= image_set_end,
                measurements.get_image_numbers())

            self.post_event(AnalysisStarted())
            posted_analysis_started = True

            # reset the status of every image set that needs to be processed
            has_groups = measurements.has_groups()
            if self.pipeline.requires_aggregation():
                overwrite = True
            if has_groups and not overwrite:
                if not measurements.has_feature(cpmeas.IMAGE, self.STATUS):
                    overwrite = True
                else:
                    group_status = {}
                    for image_number in measurements.get_image_numbers():
                        group_number = measurements[
                            cpmeas.IMAGE, cpmeas.GROUP_NUMBER, image_number]
                        status = measurements[cpmeas.IMAGE, self.STATUS,
                                              image_number]
                        if status != self.STATUS_DONE:
                            group_status[group_number] = self.STATUS_UNPROCESSED
                        elif group_number not in group_status:
                            group_status[group_number] = self.STATUS_DONE
                            
            new_image_sets_to_process = []
            for image_set_number in image_sets_to_process:
                needs_reset = False
                if (overwrite or
                    (not measurements.has_measurements(
                        cpmeas.IMAGE, self.STATUS, image_set_number)) or
                    (measurements[cpmeas.IMAGE, self.STATUS, image_set_number] 
                     != self.STATUS_DONE)):
                    needs_reset = True
                elif has_groups:
                    group_number = measurements[
                        cpmeas.IMAGE, cpmeas.GROUP_NUMBER, image_set_number]
                    if group_status[group_number] != self.STATUS_DONE:
                        needs_reset = True
                if needs_reset:
                    measurements[cpmeas.IMAGE, self.STATUS, image_set_number] =\
                        self.STATUS_UNPROCESSED
                    new_image_sets_to_process.append(image_set_number)
            image_sets_to_process = new_image_sets_to_process

            # Find image groups.  These are written into measurements prior to
            # analysis.  Groups are processed as a single job.
            if has_groups or self.pipeline.requires_aggregation():
                worker_runs_post_group = True
                job_groups = {}
                for image_set_number in image_sets_to_process:
                    group_number = measurements[cpmeas.IMAGE, 
                                                cpmeas.GROUP_NUMBER, 
                                                image_set_number]
                    group_index = measurements[cpmeas.IMAGE, 
                                               cpmeas.GROUP_INDEX, 
                                               image_set_number]
                    job_groups[group_number] = job_groups.get(group_number, []) + [(group_index, image_set_number)]
                job_groups = [[isn for _, isn in sorted(job_groups[group_number])] 
                              for group_number in sorted(job_groups)]
            else:
                worker_runs_post_group = False  # prepare_group will be run in worker, but post_group is below.
                job_groups = [[image_set_number] for image_set_number in image_sets_to_process]

            # XXX - check that any constructed groups are complete, i.e.,
            # image_set_start and image_set_end shouldn't carve them up.

            if not worker_runs_post_group:
                # put the first job in the queue, then wait for the first image to
                # finish (see the check of self.finish_queue below) to post the rest.
                # This ensures that any shared data from the first imageset is
                # available to later imagesets.
                self.work_queue.put((job_groups[0], 
                                     worker_runs_post_group,
                                     True))
                waiting_for_first_imageset = True
                del job_groups[0]
            else:
                waiting_for_first_imageset = False
                for job in job_groups:
                    self.work_queue.put((job, worker_runs_post_group, False))
                job_groups = []
            start_signal.release()
            acknowledged_thread_start = True


            # We loop until every image is completed, or an outside event breaks the loop.
            while not self.cancelled:

                # gather measurements
                while not self.received_measurements_queue.empty():
                    image_numbers, buf = self.received_measurements_queue.get()
                    image_numbers = [int(i) for i in image_numbers]
                    recd_measurements = cpmeas.load_measurements_from_buffer(buf)
                    self.copy_recieved_measurements(recd_measurements, measurements, image_numbers)
                    recd_measurements.close()
                    del recd_measurements

                # check for jobs in progress
                while not self.in_process_queue.empty():
                    image_set_numbers = self.in_process_queue.get()
                    for image_set_number in image_set_numbers:
                        measurements[cpmeas.IMAGE, self.STATUS, int(image_set_number)] = self.STATUS_IN_PROCESS

                # check for finished jobs that haven't returned measurements, yet
                while not self.finished_queue.empty():
                    finished_req = self.finished_queue.get()
                    measurements[cpmeas.IMAGE, self.STATUS, int(finished_req.image_set_number)] = self.STATUS_FINISHED_WAITING
                    if waiting_for_first_imageset:
                        assert isinstance(finished_req, 
                                          ImageSetSuccessWithDictionary)
                        self.shared_dicts = finished_req.shared_dicts
                        waiting_for_first_imageset = False
                        assert len(self.shared_dicts) == len(self.pipeline.modules())
                        # if we had jobs waiting for the first image set to finish,
                        # queue them now that the shared state is available.
                        for job in job_groups:
                            self.work_queue.put((job, worker_runs_post_group, False))
                    finished_req.reply(Ack())

                # check progress and report
                counts = collections.Counter(measurements[cpmeas.IMAGE, self.STATUS, image_set_number]
                                             for image_set_number in image_sets_to_process)
                self.post_event(AnalysisProgress(counts))

                # Are we finished?
                if counts[self.STATUS_DONE] == len(image_sets_to_process):
                    last_image_number = measurements.get_image_numbers()[-1]
                    measurements.image_set_number = last_image_number
                    if not worker_runs_post_group:
                        self.pipeline.post_group(workspace, {})
                    
                    workspace = cpw.Workspace(self.pipeline,
                                              None, None, None,
                                              measurements, None, None)
                    workspace.post_run_display_handler = \
                        self.post_run_display_handler
                    self.pipeline.post_run(workspace)
                    break

                measurements.flush()
                # not done, wait for more work
                with self.interface_work_cv:
                    while (self.paused or
                           ((not self.cancelled) and
                            self.in_process_queue.empty() and
                            self.finished_queue.empty() and
                            self.received_measurements_queue.empty())):
                        self.interface_work_cv.wait()  # wait for a change of status or work to arrive
        finally:
            detach()
            # Note - the measurements file is owned by the queue consumer
            #        after this post_event.
            #
            if not acknowledged_thread_start:
                start_signal.release()
            if posted_analysis_started:
                was_cancelled = self.cancelled
                self.post_event(AnalysisFinished(measurements, was_cancelled))
            self.stop_workers()
        self.analysis_id = False  # this will cause the jobserver thread to exit
Example #16
0
                         "()Ljava/lang/ClassLoader;")


def get_plugin(classname):
    '''Return an instance of the named plugin'''
    if classname.startswith("ij."):
        cls = J.class_for_name(classname)
    else:
        cls = J.class_for_name(classname, get_user_loader())
    cls = J.get_class_wrapper(cls, True)
    constructor = J.get_constructor_wrapper(cls.getConstructor(None))
    return constructor.newInstance(None)


if __name__ == "__main__":
    import sys
    J.attach()
    try:
        commands = get_commands()
        print "Commands: "
        for command in commands:
            print "\t" + command
        if len(sys.argv) == 2:
            execute_command(sys.argv[1])
        elif len(sys.argv) > 2:
            execute_command(sys.argv[1], sys.argv[2])

    finally:
        J.detach()
        J.kill_vm()
Example #17
0
 def __del__(self):
     '''call del on this object to detach from javabridge. If the object is
     declared locally the javabridge will be detached once the program leaves 
     it's scope'''
     J.detach()
Example #18
0
 def run(my_integer = my_integer):
     env = J.attach()
     self.assertEqual(my_integer.intValue(),my_value)
     del my_integer
     J.detach()
Example #19
0

def get_plugin(classname):
    """Return an instance of the named plugin"""
    if classname.startswith("ij."):
        cls = J.class_for_name(classname)
    else:
        cls = J.class_for_name(classname, get_user_loader())
    cls = J.get_class_wrapper(cls, True)
    constructor = J.get_constructor_wrapper(cls.getConstructor(None))
    return constructor.newInstance(None)


if __name__ == "__main__":
    import sys

    J.attach()
    try:
        commands = get_commands()
        print "Commands: "
        for command in commands:
            print "\t" + command
        if len(sys.argv) == 2:
            execute_command(sys.argv[1])
        elif len(sys.argv) > 2:
            execute_command(sys.argv[1], sys.argv[2])

    finally:
        J.detach()
        J.kill_vm()
Example #20
0
 def __exit__(self, type, value, traceback):
     self.notify_socket.close()
     for m in self.initial_measurements.values():
         m.close()
     self.initial_measurements = {}
     J.detach()