Exemple #1
0
def test_cached_log_records(caplog):

    # Generate some cached log messages in easy_mp child processes.
    results = multi_node_parallel_map(
        log_something,
        iterable=range(5),
        njobs=2,
        nproc=2,
        cluster_method="multiprocessing",
    )
    # Get all the resulting log records in a single flattened list.
    results = [record for records in results for record in records]

    results_batch = batch_multi_node_parallel_map(
        log_something,
        range(5),
        njobs=2,
        nproc=2,
        cluster_method="multiprocessing",
        callback=lambda _: None,
    )
    # Get all the resulting log records in a single flattened list.
    results_batch = [
        record for batch in results_batch for records in batch
        for record in records
    ]

    # Check that re-handling the messages in a logger in this process with a
    # threshold severity of WARNING results in no log records being emitted.
    with dials.util.log.LoggingContext("dials", logging.WARNING):
        dials.util.log.rehandle_cached_records(results)
        assert not caplog.records

        dials.util.log.rehandle_cached_records(results_batch)
        assert not caplog.records

    # Check that re-handling the messages in a logger in this process with a
    # threshold severity of INFO results in all the log records being emitted.
    with dials.util.log.LoggingContext("dials", logging.INFO):
        dials.util.log.rehandle_cached_records(results)
        assert caplog.records == results
        caplog.clear()

        dials.util.log.rehandle_cached_records(results_batch)
        assert caplog.records == results_batch
        caplog.clear()
Exemple #2
0
    def _find_spots_2d_no_shoeboxes(self, imageset):
        """
        Find the spots in the imageset

        :param imageset: The imageset to process
        :return: The list of spot shoeboxes
        """
        from dials.util.mp import batch_multi_node_parallel_map

        # Change the number of processors if necessary
        mp_nproc = self.mp_nproc
        mp_njobs = self.mp_njobs
        if os.name == "nt" and (mp_nproc > 1 or mp_njobs > 1):
            logger.warning(_no_multiprocessing_on_windows)
            mp_nproc = 1
            mp_njobs = 1
        if mp_nproc * mp_njobs > len(imageset):
            mp_nproc = min(mp_nproc, len(imageset))
            mp_njobs = int(math.ceil(len(imageset) / mp_nproc))

        mp_method = self.mp_method
        mp_chunksize = self.mp_chunksize

        if mp_chunksize == libtbx.Auto:
            mp_chunksize = self._compute_chunksize(len(imageset),
                                                   mp_njobs * mp_nproc,
                                                   self.min_chunksize)
            logger.info("Setting chunksize=%i" % mp_chunksize)

        len_by_nproc = int(math.floor(len(imageset) / (mp_njobs * mp_nproc)))
        if mp_chunksize > len_by_nproc:
            mp_chunksize = len_by_nproc
        assert mp_nproc > 0, "Invalid number of processors"
        assert mp_njobs > 0, "Invalid number of jobs"
        assert mp_njobs == 1 or mp_method is not None, "Invalid cluster method"
        assert mp_chunksize > 0, "Invalid chunk size"

        # The extract pixels function
        function = ExtractPixelsFromImage2DNoShoeboxes(
            imageset=imageset,
            threshold_function=self.threshold_function,
            mask=self.mask,
            max_strong_pixel_fraction=self.max_strong_pixel_fraction,
            compute_mean_background=self.compute_mean_background,
            region_of_interest=self.region_of_interest,
            min_spot_size=self.min_spot_size,
            max_spot_size=self.max_spot_size,
            filter_spots=self.filter_spots,
        )

        # The indices to iterate over
        indices = list(range(len(imageset)))

        # The resulting reflections
        reflections = flex.reflection_table()

        # Do the processing
        logger.info("Extracting strong spots from images")
        if mp_njobs > 1:
            logger.info(
                " Using %s with %d parallel job(s) and %d processes per node\n"
                % (mp_method, mp_njobs, mp_nproc))
        else:
            logger.info(" Using multiprocessing with %d parallel job(s)\n" %
                        (mp_nproc))
        if mp_nproc > 1 or mp_njobs > 1:

            def process_output(result):
                for message in result[1]:
                    logger.log(message.levelno, message.msg)
                reflections.extend(result[0][0])
                result[0][0] = None

            batch_multi_node_parallel_map(
                func=ExtractSpotsParallelTask(function),
                iterable=indices,
                nproc=mp_nproc,
                njobs=mp_njobs,
                cluster_method=mp_method,
                chunksize=mp_chunksize,
                callback=process_output,
            )
        else:
            for task in indices:
                reflections.extend(function(task)[0])

        # Return the reflections
        return reflections, None
Exemple #3
0
    def _find_spots(self, imageset):
        """
        Find the spots in the imageset

        :param imageset: The imageset to process
        :return: The list of spot shoeboxes
        """
        from dials.model.data import PixelListLabeller
        from dials.util.mp import batch_multi_node_parallel_map

        # Change the number of processors if necessary
        mp_nproc = self.mp_nproc
        mp_njobs = self.mp_njobs
        if os.name == "nt" and (mp_nproc > 1 or mp_njobs > 1):
            logger.warning(_no_multiprocessing_on_windows)
            mp_nproc = 1
            mp_njobs = 1
        if mp_nproc * mp_njobs > len(imageset):
            mp_nproc = min(mp_nproc, len(imageset))
            mp_njobs = int(math.ceil(len(imageset) / mp_nproc))

        mp_method = self.mp_method
        mp_chunksize = self.mp_chunksize

        if mp_chunksize == libtbx.Auto:
            mp_chunksize = self._compute_chunksize(len(imageset),
                                                   mp_njobs * mp_nproc,
                                                   self.min_chunksize)
            logger.info("Setting chunksize=%i" % mp_chunksize)

        len_by_nproc = int(math.floor(len(imageset) / (mp_njobs * mp_nproc)))
        if mp_chunksize > len_by_nproc:
            mp_chunksize = len_by_nproc
        if mp_chunksize == 0:
            mp_chunksize = 1
        assert mp_nproc > 0, "Invalid number of processors"
        assert mp_njobs > 0, "Invalid number of jobs"
        assert mp_njobs == 1 or mp_method is not None, "Invalid cluster method"
        assert mp_chunksize > 0, "Invalid chunk size"

        # The extract pixels function
        function = ExtractPixelsFromImage(
            imageset=imageset,
            threshold_function=self.threshold_function,
            mask=self.mask,
            max_strong_pixel_fraction=self.max_strong_pixel_fraction,
            compute_mean_background=self.compute_mean_background,
            region_of_interest=self.region_of_interest,
        )

        # The indices to iterate over
        indices = list(range(len(imageset)))

        # Initialise the pixel labeller
        num_panels = len(imageset.get_detector())
        pixel_labeller = [PixelListLabeller() for p in range(num_panels)]

        # Do the processing
        logger.info("Extracting strong pixels from images")
        if mp_njobs > 1:
            logger.info(
                " Using %s with %d parallel job(s) and %d processes per node\n"
                % (mp_method, mp_njobs, mp_nproc))
        else:
            logger.info(" Using multiprocessing with %d parallel job(s)\n" %
                        (mp_nproc))
        if mp_nproc > 1 or mp_njobs > 1:

            def process_output(result):
                for message in result[1]:
                    logger.log(message.levelno, message.msg)
                assert len(pixel_labeller) == len(
                    result[0].pixel_list), "Inconsistent size"
                for plabeller, plist in zip(pixel_labeller,
                                            result[0].pixel_list):
                    plabeller.add(plist)
                result[0].pixel_list = None

            batch_multi_node_parallel_map(
                func=ExtractSpotsParallelTask(function),
                iterable=indices,
                nproc=mp_nproc,
                njobs=mp_njobs,
                cluster_method=mp_method,
                chunksize=mp_chunksize,
                callback=process_output,
            )
        else:
            for task in indices:
                result = function(task)
                assert len(pixel_labeller) == len(
                    result.pixel_list), "Inconsistent size"
                for plabeller, plist in zip(pixel_labeller, result.pixel_list):
                    plabeller.add(plist)
                    result.pixel_list = None

        # Create shoeboxes from pixel list
        converter = PixelListToReflectionTable(
            self.min_spot_size,
            self.max_spot_size,
            self.filter_spots,
            self.write_hot_pixel_mask,
        )
        return converter(imageset, pixel_labeller)
Exemple #4
0
  def _find_spots_2d_no_shoeboxes(self, imageset):
    '''
    Find the spots in the imageset

    :param imageset: The imageset to process
    :return: The list of spot shoeboxes

    '''
    from dials.util.command_line import Command
    from dials.array_family import flex
    from dxtbx.imageset import ImageSweep
    from dials.model.data import PixelListLabeller
    from dials.util.mp import batch_multi_node_parallel_map
    from math import floor, ceil
    import platform

    # Change the number of processors if necessary
    mp_nproc = self.mp_nproc
    mp_njobs = self.mp_njobs
    if (mp_nproc > 1 or mp_njobs > 1) and platform.system() == "Windows": # platform.system() forks which is bad for MPI, so don't use it unless nproc > 1
      logger.warn("")
      logger.warn("*" * 80)
      logger.warn("Multiprocessing is not available on windows. Setting nproc = 1, njobs = 1")
      logger.warn("*" * 80)
      logger.warn("")
      mp_nproc = 1
      mp_njobs = 1
    if mp_nproc * mp_njobs > len(imageset):
      mp_nproc = min(mp_nproc, len(imageset))
      mp_njobs = int(ceil(len(imageset) / mp_nproc))

    mp_method = self.mp_method
    mp_chunksize = self.mp_chunksize

    import libtbx
    if mp_chunksize == libtbx.Auto:
      mp_chunksize = self._compute_chunksize(
        len(imageset),
        mp_njobs * mp_nproc,
        self.min_chunksize)
      logger.info("Setting chunksize=%i" %mp_chunksize)

    len_by_nproc = int(floor(len(imageset) / (mp_njobs * mp_nproc)))
    if mp_chunksize > len_by_nproc:
      mp_chunksize = len_by_nproc
    assert mp_nproc > 0, "Invalid number of processors"
    assert mp_njobs > 0, "Invalid number of jobs"
    assert mp_njobs == 1 or mp_method is not None, "Invalid cluster method"
    assert mp_chunksize > 0, "Invalid chunk size"

    # The extract pixels function
    function = ExtractPixelsFromImage2DNoShoeboxes(
        imageset                  = imageset,
        threshold_function        = self.threshold_function,
        mask                      = self.mask,
        max_strong_pixel_fraction = self.max_strong_pixel_fraction,
        compute_mean_background   = self.compute_mean_background,
        region_of_interest        = self.region_of_interest,
        min_spot_size             = self.min_spot_size,
        max_spot_size             = self.max_spot_size,
        filter_spots              = self.filter_spots)

    # The indices to iterate over
    indices = list(range(len(imageset)))

    # The resulting reflections
    reflections = flex.reflection_table()

    # Do the processing
    logger.info('Extracting strong spots from images')
    if mp_njobs > 1:
      logger.info(' Using %s with %d parallel job(s) and %d processes per node\n' % (mp_method, mp_njobs, mp_nproc))
    else:
      logger.info(' Using multiprocessing with %d parallel job(s)\n' % (mp_nproc))
    if mp_nproc > 1 or mp_njobs > 1:
      def process_output(result):
        for message in result[1]:
          logger.log(message.levelno, message.msg)
        reflections.extend(result[0][0])
        result[0][0] = None
      batch_multi_node_parallel_map(
        func           = ExtractSpotsParallelTask(function),
        iterable       = indices,
        nproc          = mp_nproc,
        njobs          = mp_njobs,
        cluster_method = mp_method,
        chunksize      = mp_chunksize,
        callback       = process_output)
    else:
      for task in indices:
        reflections.extend(function(task)[0])

    # Return the reflections
    return reflections, None
Exemple #5
0
    def _find_spots(self, imageset):
        """
        Find the spots in the imageset

        :param imageset: The imageset to process
        :return: The list of spot shoeboxes
        """
        # Change the number of processors if necessary
        mp_nproc = self.mp_nproc
        mp_njobs = self.mp_njobs
        if mp_nproc is libtbx.Auto:
            mp_nproc = available_cores()
            logger.info(f"Setting nproc={mp_nproc}")
        if mp_nproc * mp_njobs > len(imageset):
            mp_nproc = min(mp_nproc, len(imageset))
            mp_njobs = int(math.ceil(len(imageset) / mp_nproc))

        mp_method = self.mp_method
        mp_chunksize = self.mp_chunksize

        if mp_chunksize is libtbx.Auto:
            mp_chunksize = self._compute_chunksize(len(imageset),
                                                   mp_njobs * mp_nproc,
                                                   self.min_chunksize)
            logger.info("Setting chunksize=%i", mp_chunksize)

        len_by_nproc = int(math.floor(len(imageset) / (mp_njobs * mp_nproc)))
        if mp_chunksize > len_by_nproc:
            mp_chunksize = len_by_nproc
        if mp_chunksize == 0:
            mp_chunksize = 1
        assert mp_nproc > 0, "Invalid number of processors"
        assert mp_njobs > 0, "Invalid number of jobs"
        assert mp_njobs == 1 or mp_method is not None, "Invalid cluster method"
        assert mp_chunksize > 0, "Invalid chunk size"

        # The extract pixels function
        function = ExtractPixelsFromImage(
            imageset=imageset,
            threshold_function=self.threshold_function,
            mask=self.mask,
            max_strong_pixel_fraction=self.max_strong_pixel_fraction,
            compute_mean_background=self.compute_mean_background,
            region_of_interest=self.region_of_interest,
        )

        # The indices to iterate over
        indices = list(range(len(imageset)))

        # Initialise the pixel labeller
        num_panels = len(imageset.get_detector())
        pixel_labeller = [PixelListLabeller() for p in range(num_panels)]

        # Do the processing
        logger.info("Extracting strong pixels from images")
        if mp_njobs > 1:
            logger.info(
                " Using %s with %d parallel job(s) and %d processes per node\n",
                mp_method,
                mp_njobs,
                mp_nproc,
            )
        else:
            logger.info(" Using multiprocessing with %d parallel job(s)\n",
                        mp_nproc)
        if mp_nproc > 1 or mp_njobs > 1:

            def process_output(result):
                rehandle_cached_records(result[1])
                assert len(pixel_labeller) == len(
                    result[0]), "Inconsistent size"
                for plabeller, plist in zip(pixel_labeller, result[0]):
                    plabeller.add(plist)

            batch_multi_node_parallel_map(
                func=ExtractSpotsParallelTask(function),
                iterable=indices,
                nproc=mp_nproc,
                njobs=mp_njobs,
                cluster_method=mp_method,
                chunksize=mp_chunksize,
                callback=process_output,
            )
        else:
            for task in indices:
                result = function(task)
                assert len(pixel_labeller) == len(result), "Inconsistent size"
                for plabeller, plist in zip(pixel_labeller, result):
                    plabeller.add(plist)
                result.clear()

        # Create shoeboxes from pixel list
        return pixel_list_to_reflection_table(
            imageset,
            pixel_labeller,
            filter_spots=self.filter_spots,
            min_spot_size=self.min_spot_size,
            max_spot_size=self.max_spot_size,
            write_hot_pixel_mask=self.write_hot_pixel_mask,
        )
Exemple #6
0
    def _find_spots_2d_no_shoeboxes(self, imageset):
        '''
    Find the spots in the imageset

    :param imageset: The imageset to process
    :return: The list of spot shoeboxes

    '''
        from dials.array_family import flex
        from dxtbx.imageset import ImageSweep
        from dials.model.data import PixelListLabeller
        from dials.util.mp import batch_multi_node_parallel_map
        from math import floor, ceil
        import platform

        # Change the number of processors if necessary
        mp_nproc = self.mp_nproc
        mp_njobs = self.mp_njobs
        if (mp_nproc > 1 or mp_njobs > 1) and platform.system(
        ) == "Windows":  # platform.system() forks which is bad for MPI, so don't use it unless nproc > 1
            logger.warn("")
            logger.warn("*" * 80)
            logger.warn(
                "Multiprocessing is not available on windows. Setting nproc = 1, njobs = 1"
            )
            logger.warn("*" * 80)
            logger.warn("")
            mp_nproc = 1
            mp_njobs = 1
        if mp_nproc * mp_njobs > len(imageset):
            mp_nproc = min(mp_nproc, len(imageset))
            mp_njobs = int(ceil(len(imageset) / mp_nproc))

        mp_method = self.mp_method
        mp_chunksize = self.mp_chunksize

        import libtbx
        if mp_chunksize == libtbx.Auto:
            mp_chunksize = self._compute_chunksize(len(imageset),
                                                   mp_njobs * mp_nproc,
                                                   self.min_chunksize)
            logger.info("Setting chunksize=%i" % mp_chunksize)

        len_by_nproc = int(floor(len(imageset) / (mp_njobs * mp_nproc)))
        if mp_chunksize > len_by_nproc:
            mp_chunksize = len_by_nproc
        assert mp_nproc > 0, "Invalid number of processors"
        assert mp_njobs > 0, "Invalid number of jobs"
        assert mp_njobs == 1 or mp_method is not None, "Invalid cluster method"
        assert mp_chunksize > 0, "Invalid chunk size"

        # The extract pixels function
        function = ExtractPixelsFromImage2DNoShoeboxes(
            imageset=imageset,
            threshold_function=self.threshold_function,
            mask=self.mask,
            max_strong_pixel_fraction=self.max_strong_pixel_fraction,
            compute_mean_background=self.compute_mean_background,
            region_of_interest=self.region_of_interest,
            min_spot_size=self.min_spot_size,
            max_spot_size=self.max_spot_size,
            filter_spots=self.filter_spots)

        # The indices to iterate over
        indices = list(range(len(imageset)))

        # The resulting reflections
        reflections = flex.reflection_table()

        # Do the processing
        logger.info('Extracting strong spots from images')
        if mp_njobs > 1:
            logger.info(
                ' Using %s with %d parallel job(s) and %d processes per node\n'
                % (mp_method, mp_njobs, mp_nproc))
        else:
            logger.info(' Using multiprocessing with %d parallel job(s)\n' %
                        (mp_nproc))
        if mp_nproc > 1 or mp_njobs > 1:

            def process_output(result):
                for message in result[1]:
                    logger.log(message.levelno, message.msg)
                reflections.extend(result[0][0])
                result[0][0] = None

            batch_multi_node_parallel_map(
                func=ExtractSpotsParallelTask(function),
                iterable=indices,
                nproc=mp_nproc,
                njobs=mp_njobs,
                cluster_method=mp_method,
                chunksize=mp_chunksize,
                callback=process_output)
        else:
            for task in indices:
                reflections.extend(function(task)[0])

        # Return the reflections
        return reflections, None