def skeletonize_in_subprocess(config, id_box_mask_factor_err):
    """
    Execute skeletonize() in a subprocess, and handle TimeoutErrors.
    """
    logger = logging.getLogger(__name__ + '.skeletonize')
    logger.setLevel(logging.WARN)
    timeout = config['options']['analysis-timeout']

    body_id, combined_box, combined_mask, downsample_factor, _err_msg = id_box_mask_factor_err

    try:
        func = execute_in_subprocess(timeout, logger)(skeletonize)
        body_id, swc = func(config, body_id, combined_box, combined_mask, downsample_factor)
        return (body_id, swc, None)
    except TimeoutError:
        err_msg = f"Timeout ({timeout}) while skeletonizing body: id={body_id} box={combined_box.tolist()}"     
        logger.error(err_msg)

        output_dir = config['options']['failed-mask-dir']
        if output_dir:
            os.makedirs(output_dir, exist_ok=True)

            output_path = output_dir + f'/failed-skeleton-{body_id}.h5'
            logger.error(f"Writing mask to {output_path}")

            import h5py
            with h5py.File(output_path, 'w') as f:
                f["downsample_factor"] = downsample_factor
                f["box"] = combined_box
                f.create_dataset("mask", data=combined_mask, chunks=True)
        
        return (body_id, None, err_msg)
 def test_timeout(self):
     try:
         _result = execute_in_subprocess(1.0, logger)(_test_helper)(1,2,3.0)
     except TimeoutError:
         pass
     else:
         assert False, "Expected a timeout error."
 def test_timeout_in_C_function(self):
     try:
         _result = execute_in_subprocess(1.0, logger)(c_sleep)()
     except TimeoutError:
         pass
     else:
         assert False, "Expected a timeout error."
def combine_masks_in_subprocess(config, ids_and_boxes_and_compressed_masks):
    """
    Execute combine_masks() in a subprocess, and handle TimeoutErrors.
    """
    logger = logging.getLogger(__name__ + '.combine_masks')
    logger.setLevel(logging.WARN)
    timeout = config['options']['downsample-timeout']

    body_id, boxes_and_compressed_masks = ids_and_boxes_and_compressed_masks

    if config["options"]["downsample-in-subprocess"]:
        func = execute_in_subprocess(timeout, logger)(combine_masks)
    else:
        func = combine_masks
        
    try:
        body_id, combined_box, combined_mask_downsampled, chosen_downsample_factor = func(config, body_id, boxes_and_compressed_masks)
        return (body_id, combined_box, combined_mask_downsampled, chosen_downsample_factor, None)
    
    except TimeoutError: # fyi: can't be raised unless a subprocessed is used
        boxes, _compressed_masks, counts = zip(*boxes_and_compressed_masks)

        total_count = sum(counts)
        boxes = np.asarray(boxes)
        combined_box = np.zeros((2,3), dtype=np.int64)
        combined_box[0] = boxes[:, 0, :].min(axis=0)
        combined_box[1] = boxes[:, 1, :].max(axis=0)

        err_msg = f"Timeout ({timeout}) while downsampling/assembling body: id={body_id} size={total_count} box={combined_box.tolist()}"
        logger.error(err_msg)
        return (body_id, combined_box, None, None, err_msg)
Ejemplo n.º 5
0
def skeletonize_in_subprocess(config, id_box_mask_factor_err):
    """
    Execute skeletonize() in a subprocess, and handle TimeoutErrors.
    """
    logger = logging.getLogger(__name__ + '.skeletonize')
    logger.setLevel(logging.WARN)
    timeout = config['options']['analysis-timeout']

    body_id, combined_box, combined_mask, downsample_factor, _err_msg = id_box_mask_factor_err

    try:
        func = execute_in_subprocess(timeout, logger)(skeletonize)
        body_id, swc = func(config, body_id, combined_box, combined_mask,
                            downsample_factor)
        return (body_id, swc, None)
    except TimeoutError:
        err_msg = f"Timeout ({timeout}) while skeletonizing body: id={body_id} box={combined_box.tolist()}"
        logger.error(err_msg)

        output_dir = config['options']['failed-mask-dir']
        if output_dir:
            os.makedirs(output_dir, exist_ok=True)

            output_path = output_dir + f'/failed-skeleton-{body_id}.h5'
            logger.error(f"Writing mask to {output_path}")

            import h5py
            with h5py.File(output_path, 'w') as f:
                f["downsample_factor"] = downsample_factor
                f["box"] = combined_box
                f.create_dataset("mask", data=combined_mask, chunks=True)

        return (body_id, None, err_msg)
Ejemplo n.º 6
0
def combine_masks_in_subprocess(config, ids_and_boxes_and_compressed_masks):
    """
    Execute combine_masks() in a subprocess, and handle TimeoutErrors.
    """
    logger = logging.getLogger(__name__ + '.combine_masks')
    logger.setLevel(logging.WARN)
    timeout = config['options']['downsample-timeout']

    body_id, boxes_and_compressed_masks = ids_and_boxes_and_compressed_masks

    if config["options"]["downsample-in-subprocess"]:
        func = execute_in_subprocess(timeout, logger)(combine_masks)
    else:
        func = combine_masks

    try:
        body_id, combined_box, combined_mask_downsampled, chosen_downsample_factor = func(
            config, body_id, boxes_and_compressed_masks)
        return (body_id, combined_box, combined_mask_downsampled,
                chosen_downsample_factor, None)

    except TimeoutError:  # fyi: can't be raised unless a subprocessed is used
        boxes, _compressed_masks, counts = zip(*boxes_and_compressed_masks)

        total_count = sum(counts)
        boxes = np.asarray(boxes)
        combined_box = np.zeros((2, 3), dtype=np.int64)
        combined_box[0] = boxes[:, 0, :].min(axis=0)
        combined_box[1] = boxes[:, 1, :].max(axis=0)

        err_msg = f"Timeout ({timeout}) while downsampling/assembling body: id={body_id} size={total_count} box={combined_box.tolist()}"
        logger.error(err_msg)
        return (body_id, combined_box, None, None, err_msg)
 def test_error(self):
     """
     Generate an exception in the subprocess and verify that it appears in the parent.
     """
     # Too many arguments; should fail
     try:
         _result = execute_in_subprocess(1.0, logger)(_test_helper)(1,2,3,4,5)
     except:
         pass
     else:
         raise RuntimeError("Expected to see an exception in the subprocess.")
 def test_timeout_in_skeletonize(self):
     """
     Verify that the subprocess_decorator works to kill the skeletonize function.
     """
     a = np.ones((100,1000,1000), dtype=np.uint8)
       
     try:
         _result = execute_in_subprocess(1.0, logger)(skeletonize_array)(a)
     except TimeoutError:
         pass
     else:
         assert False, "Expected a timeout error."
    def test_basic(self):
        """
        Execute a well-behaved function in a subprocess and verify the result.
        """
        handler = MessageCollector()
        logging.getLogger().addHandler(handler)
       
        try:        
            result = execute_in_subprocess(1.0, logger)(_test_helper)(1,2,0)
            assert result == 1+2+0, f"Wrong result: {result}"
            assert handler.collected_messages['INFO'] == ['1', '0'], f"Got: {handler.collected_messages['INFO']}"
            assert handler.collected_messages['ERROR'] == ['2'], f"Got: {handler.collected_messages['ERROR']}"
 
        finally:
            logging.getLogger().removeHandler(handler)
 def test_segfault(self):
     """
     Test the behavior of a subprocess that segfaults.
        
     Note: If the process dies via a signal (SEGFAULT or SIGKILL), the error is not
           propagated immediately and eventually an ordinary TimeoutError is raised.
           This is due to a limitation in the Python multiprocessing module:
           https://bugs.python.org/issue22393
     """
     try:
         with pause_faulthandler():
             _result = execute_in_subprocess(1.0, logger)(generate_segfault)()
     except TimeoutError:
         pass
     else:
         raise RuntimeError("Expected to see a TimeoutError exception.")
 def test_no_logger(self):
     """
     If no logger is specified to the decorator, then standard out and
     standard error are sent to the parent process stdout/stderr as usual.
     """
     with open('/tmp/captured-stdout.txt', 'w') as f_out, \
          open('/tmp/captured-stderr.txt', 'w') as f_err:
           
         with stdout_redirected( f_out.fileno(), sys.stdout ), \
              stdout_redirected( f_err.fileno(), sys.stderr ):
  
             result = execute_in_subprocess(1.0)(_test_helper)(1,2,0)
             assert result == 1+2+0, "Wrong result: {}".format(result)
   
     with open('/tmp/captured-stdout.txt', 'r') as f_out, \
          open('/tmp/captured-stderr.txt', 'r') as f_err:
   
         out = f_out.read()
         err = f_err.read()
         assert out == '1\n0', f"Got: {out}"
         assert err == '2\n', f"Got: {err}"
Ejemplo n.º 12
0
def generate_mesh_in_subprocess(config, id_box_mask_factor_err):
    """
    If use_subprocess is True, execute generate_mesh() in a subprocess, and handle TimeoutErrors.
    Otherwise, just call generate_mesh() directly, with the appropriate parameters
    """
    logger = logging.getLogger(__name__ + '.generate_mesh')
    logger.setLevel(logging.WARN)
    timeout = config['options']['analysis-timeout']

    body_id, combined_box, combined_mask, downsample_factor, _err_msg = id_box_mask_factor_err

    if config["mesh-config"]["use-subprocesses"]:
        func = execute_in_subprocess(timeout, logger)(generate_mesh)
    else:
        func = generate_mesh

    try:
        _body_id, mesh_obj = func(config, body_id, combined_box, combined_mask,
                                  downsample_factor)
        return (body_id, mesh_obj, None)

    except TimeoutError:  # fyi: can't be raised unless a subprocessed is used
        err_msg = f"Timeout ({timeout}) while meshifying body: id={body_id} box={combined_box.tolist()}"
        logger.error(err_msg)

        output_dir = config['options']['failed-mask-dir']
        if output_dir:
            os.makedirs(output_dir, exist_ok=True)

            output_path = output_dir + f'/failed-mesh-{body_id}.h5'
            logger.error(f"Writing mask to {output_path}")

            import h5py
            with h5py.File(output_path, 'w') as f:
                f["downsample_factor"] = downsample_factor
                f["box"] = combined_box
                f.create_dataset("mask", data=combined_mask, chunks=True)

        return (body_id, None, err_msg)
def generate_mesh_in_subprocess(config, id_box_mask_factor_err):
    """
    If use_subprocess is True, execute generate_mesh() in a subprocess, and handle TimeoutErrors.
    Otherwise, just call generate_mesh() directly, with the appropriate parameters
    """
    logger = logging.getLogger(__name__ + '.generate_mesh')
    logger.setLevel(logging.WARN)
    timeout = config['options']['analysis-timeout']

    body_id, combined_box, combined_mask, downsample_factor, _err_msg = id_box_mask_factor_err

    if config["mesh-config"]["use-subprocesses"]:
        func = execute_in_subprocess(timeout, logger)(generate_mesh)
    else:
        func = generate_mesh

    try:
        _body_id, mesh_obj = func(config, body_id, combined_box, combined_mask, downsample_factor)
        return (body_id, mesh_obj, None)

    except TimeoutError: # fyi: can't be raised unless a subprocessed is used
        err_msg = f"Timeout ({timeout}) while meshifying body: id={body_id} box={combined_box.tolist()}"     
        logger.error(err_msg)

        output_dir = config['options']['failed-mask-dir']
        if output_dir:
            os.makedirs(output_dir, exist_ok=True)

            output_path = output_dir + f'/failed-mesh-{body_id}.h5'
            logger.error(f"Writing mask to {output_path}")

            import h5py
            with h5py.File(output_path, 'w') as f:
                f["downsample_factor"] = downsample_factor
                f["box"] = combined_box
                f.create_dataset("mask", data=combined_mask, chunks=True)
        
        return (body_id, None, err_msg)