def run (out=None, omit_unicode_experiment=False):
  if (out is None): out = sys.stdout
  out.write(boost.python.platform_info)
  tag = libtbx.env.under_dist("boost", "TAG")
  if (op.isfile(tag)):
    tag = open(tag).read().strip()
  else:
    tag = None
  print >> out, "boost/TAG:", tag
  print >> out, "os.name:", os.name
  print >> out, "sys.platform:", sys.platform
  print >> out, "sys.byteorder:", sys.byteorder
  print >> out, "platform.platform():", platform.platform()
  print >> out, "platform.architecture():", platform.architecture()
  for attr in ["division_by_zero", "invalid", "overflow"]:
    attr = "floating_point_exceptions.%s_trapped" % attr
    print >> out, "%s:" % attr, eval("boost.python.%s" % attr)
  print >> out, "number of processors:", introspection.number_of_processors(
    return_value_if_unknown="unknown")
  introspection.machine_memory_info().show(out=out)
  try: import thread
  except ImportError: print >> out, "import thread: NO"
  else: print >> out, "import thread: OK"
  print "Division operator semantics: %s division" % (div_probe() / 0)
  c = getattr(boost.python.ext, "str_or_unicode_as_char_list", None)
  if (c is not None and not omit_unicode_experiment):
    print >> out, '"hello" =', c("hello")
    print >> out, 'u"hello" =', c(u"hello")
    e = u"\u00C5".encode("utf-8", "strict")
    print >> out, 'u"\u00C5" =', c(u"\u00C5"), 'as utf-8 =', c(e)
    print >> out, "LATIN CAPITAL LETTER A WITH RING ABOVE =", e
  from libtbx.utils import format_cpu_times
  print format_cpu_times()
Example #2
0
def __cubicles_max_memory_allocation_set():
    from libtbx.introspection import machine_memory_info
    m = machine_memory_info().memory_total()
    if (m is None): m = 1000000000
    l = 2**(8 * bp.sizeof_void_ptr - 1) - 1
    if (m > l): m = l
    cubicles_max_memory_allocation_set(number_of_bytes=m // 2)
    def run(self):
        ''' Run the script. '''
        from dials.util.command_line import Command
        from dials.util.options import flatten_experiments
        from libtbx.utils import Sorry
        from libtbx.introspection import machine_memory_info
        from math import floor
        memory_info = machine_memory_info()
        total_memory = memory_info.memory_total()

        # Parse the command line arguments
        params, options = self.parser.parse_args(show_diff_phil=True)
        experiments = flatten_experiments(params.input.experiments)
        if len(experiments) == 0:
            self.parser.print_help()
            return
        elif len(experiments) > 1:
            raise RuntimeError("Only 1 experiment can be specified")

        imageset = experiments[0].imageset

        from dials.algorithms.integration.parallel_integrator import MultiThreadedIntegrator

        max_block_size = MultiThreadedIntegrator.compute_max_block_size(
            imageset, params.use_dynamic_mask, total_memory)

        print("Total memory:   %.2f GB" % (total_memory / 1e9))
        print("Max Block Size: %d" % max_block_size)

        for i in range(1, min(max_block_size, len(imageset) + 1)):
            memory = MultiThreadedIntegrator.compute_required_memory(
                imageset[0:i], params.use_dynamic_mask)
            print("Block Size = %d, Memory = %.2f GB" % (i, memory / 1e9))
def __cubicles_max_memory_allocation_set():
  from libtbx.introspection import machine_memory_info
  m = machine_memory_info().memory_total()
  if (m is None): m = 1000000000
  l = 2**(8*boost.python.sizeof_void_ptr-1)-1
  if (m > l): m = l
  cubicles_max_memory_allocation_set(number_of_bytes=m//2)
Example #5
0
def run(out=None, omit_unicode_experiment=False):
  if (out is None): out = sys.stdout
  out.write(bp.platform_info)
  tag = libtbx.env.under_dist("boost", "TAG")
  if (op.isfile(tag)):
    tag = open(tag).read().strip()
  else:
    tag = None
  print("boost/TAG:", tag, file=out)
  print("os.name:", os.name, file=out)
  print("sys.platform:", sys.platform, file=out)
  print("sys.byteorder:", sys.byteorder, file=out)
  print("platform.platform():", platform.platform(), file=out)
  print("platform.architecture():", platform.architecture(), file=out)
  for attr in ["division_by_zero", "invalid", "overflow"]:
    attr = "floating_point_exceptions.%s_trapped" % attr
    print("%s:" % attr, eval("bp.%s" % attr), file=out)
  print("number of processors:", introspection.number_of_processors(
    return_value_if_unknown="unknown"), file=out)
  introspection.machine_memory_info().show(out=out)
  try: import thread
  except ImportError: print("import thread: NO", file=out)
  else: print("import thread: OK", file=out)
  print("Division operator semantics: %s division" % (div_probe() / 0))
  c = getattr(bp.ext, "str_or_unicode_as_char_list", None)
  if (c is not None and not omit_unicode_experiment):
    print('"hello" =', c("hello"), file=out)
    print('u"hello" =', c(u"hello"), file=out)
    e = u"\u00C5".encode("utf-8", "strict")
    # XXX temp fix for python3 failure: unicode failure in show_platform_info.py
    # Reason: c(u"\u00C5") fails and c(e) fails
    try:
      cc = c(u"\u00C5")
      ce = c(e)
    except Exception as ignore_exception:
      cc = u"\u00C5"
      ce = e

    print('u"\u00C5" =', cc, 'as utf-8 =', ce, file=out)
    print("LATIN CAPITAL LETTER A WITH RING ABOVE =", e, file=out)
  from libtbx.utils import format_cpu_times
  print(format_cpu_times())
Example #6
0
def run(out=None, omit_unicode_experiment=False):
    if (out is None): out = sys.stdout
    out.write(boost.python.platform_info)
    tag = libtbx.env.under_dist("boost", "TAG")
    if (op.isfile(tag)):
        tag = open(tag).read().strip()
    else:
        tag = None
    print >> out, "boost/TAG:", tag
    print >> out, "os.name:", os.name
    print >> out, "sys.platform:", sys.platform
    print >> out, "sys.byteorder:", sys.byteorder
    print >> out, "platform.platform():", platform.platform()
    print >> out, "platform.architecture():", platform.architecture()
    for attr in ["division_by_zero", "invalid", "overflow"]:
        attr = "floating_point_exceptions.%s_trapped" % attr
        print >> out, "%s:" % attr, eval("boost.python.%s" % attr)
    print >> out, "number of processors:", introspection.number_of_processors(
        return_value_if_unknown="unknown")
    introspection.machine_memory_info().show(out=out)
    try:
        import thread
    except ImportError:
        print >> out, "import thread: NO"
    else:
        print >> out, "import thread: OK"
    print "Division operator semantics: %s division" % (div_probe() / 0)
    c = getattr(boost.python.ext, "str_or_unicode_as_char_list", None)
    if (c is not None and not omit_unicode_experiment):
        print >> out, '"hello" =', c("hello")
        print >> out, 'u"hello" =', c(u"hello")
        e = u"\u00C5".encode("utf-8", "strict")
        print >> out, 'u"\u00C5" =', c(u"\u00C5"), 'as utf-8 =', c(e)
        print >> out, "LATIN CAPITAL LETTER A WITH RING ABOVE =", e
    from libtbx.utils import format_cpu_times
    print format_cpu_times()
  def compute_max_block_size(self):
    '''
    Compute the required memory

    '''
    from libtbx.introspection import machine_memory_info
    from math import floor
    memory_info = machine_memory_info()
    total_memory = memory_info.memory_total()
    max_memory_usage = self.params.integration.block.max_memory_usage
    if total_memory is None:
      raise RuntimeError("Inspection of system memory failed")
    assert total_memory > 0, "Your system appears to have no memory!"
    assert max_memory_usage >  0.0, "maximum memory usage must be > 0"
    assert max_memory_usage <= 1.0, "maximum memory usage must be <= 1"
    limit_memory = int(floor(total_memory * max_memory_usage))
    return MultiThreadedIntegrator.compute_max_block_size(
      self.experiments[0].imageset,
      max_memory_usage = limit_memory)
Example #8
0
def assert_enough_memory(required_memory, max_memory_usage):
    """
    Check there is enough memory available or fail

    :param required_memory: The required number of bytes
    :param max_memory_usage: The maximum memory usage allowed

    """
    from libtbx.introspection import machine_memory_info

    # Compute percentage of max available. The function is not portable to
    # windows so need to add a check if the function fails. On windows no
    # warning will be printed
    memory_info = machine_memory_info()
    total_memory = memory_info.memory_total()
    if total_memory is None:
        raise RuntimeError("Inspection of system memory failed")
    assert total_memory > 0, "Your system appears to have no memory!"
    assert max_memory_usage > 0.0, "maximum memory usage must be > 0"
    assert max_memory_usage <= 1.0, "maximum memory usage must be <= 1"
    limit_memory = total_memory * max_memory_usage
    if required_memory > limit_memory:
        raise RuntimeError(
            """
    There was a problem allocating memory for image data. Possible solutions
    include increasing the percentage of memory allowed for shoeboxes or
    decreasing the block size. This could also be caused by a highly mosaic
    crystal model - is your crystal really this mosaic?
      Total system memory: %g GB
      Limit image memory: %g GB
      Required image memory: %g GB
    """
            % (total_memory / 1e9, limit_memory / 1e9, required_memory / 1e9)
        )
    else:
        logger.info("")
        logger.info(" Memory usage:")
        logger.info("  Total system memory: %g GB" % (total_memory / 1e9))
        logger.info("  Limit image memory: %g GB" % (limit_memory / 1e9))
        logger.info("  Required image memory: %g GB" % (required_memory / 1e9))
        logger.info("")
Example #9
0
    def compute_processors(self):
        '''
    Compute the number of processors

    '''
        from libtbx.introspection import machine_memory_info
        from math import floor
        from dials.array_family import flex

        # Set the memory usage per processor
        if self.params.mp.method == 'multiprocessing' and self.params.mp.nproc > 1:

            # Get the maximum shoebox memory
            max_memory = flex.max(
                self.jobs.shoebox_memory(self.reflections,
                                         self.params.shoebox.flatten))

            # Compute percentage of max available. The function is not portable to
            # windows so need to add a check if the function fails. On windows no
            # warning will be printed
            memory_info = machine_memory_info()
            total_memory = memory_info.memory_total()
            if total_memory is not None:
                assert total_memory > 0, "Your system appears to have no memory!"
                limit_memory = total_memory * self.params.block.max_memory_usage
                njobs = int(floor(limit_memory / max_memory))
                if njobs < 1:
                    raise RuntimeError('''
            No enough memory to run integration jobs. Possible solutions
            include increasing the percentage of memory allowed for shoeboxes or
            decreasing the block size.
              Total system memory: %g GB
              Limit shoebox memory: %g GB
              Max shoebox memory: %g GB
          ''' % (total_memory / 1e9, limit_memory / 1e9, max_memory / 1e9))
                else:
                    self.params.mp.nproc = min(self.params.mp.nproc, njobs)
                    self.params.block.max_memory_usage /= self.params.mp.nproc
Example #10
0
  def compute_processors(self):
    '''
    Compute the number of processors

    '''
    from libtbx.introspection import machine_memory_info
    from math import floor
    from dials.array_family import flex

    # Set the memory usage per processor
    if self.params.mp.method == 'multiprocessing' and self.params.mp.nproc > 1:

      # Get the maximum shoebox memory
      max_memory = flex.max(self.jobs.shoebox_memory(
        self.reflections, self.params.shoebox.flatten))

      # Compute percentage of max available. The function is not portable to
      # windows so need to add a check if the function fails. On windows no
      # warning will be printed
      memory_info = machine_memory_info()
      total_memory = memory_info.memory_total()
      if total_memory is not None:
        assert total_memory > 0, "Your system appears to have no memory!"
        limit_memory = total_memory * self.params.block.max_memory_usage
        njobs = int(floor(limit_memory / max_memory))
        if njobs < 1:
          raise RuntimeError('''
            No enough memory to run integration jobs. Possible solutions
            include increasing the percentage of memory allowed for shoeboxes or
            decreasing the block size.
              Total system memory: %g GB
              Limit shoebox memory: %g GB
              Max shoebox memory: %g GB
          ''' % (total_memory/1e9, limit_memory/1e9, max_memory/1e9))
        else:
          self.params.mp.nproc = min(self.params.mp.nproc, njobs)
          self.params.block.max_memory_usage /= self.params.mp.nproc
Example #11
0
    def _build_components(cls, params, reflections, experiments):
        """low level build"""

        # Currently a refinement job can only have one parameterisation of the
        # prediction equation. This can either be of the XYDelPsi (stills) type, the
        # XYPhi (scans) type or the scan-varying XYPhi type with a varying crystal
        # model
        single_as_still = params.refinement.parameterisation.treat_single_image_as_still
        exps_are_stills = []
        for exp in experiments:
            if exp.scan is None:
                exps_are_stills.append(True)
            elif exp.scan.get_num_images() == 1:
                if single_as_still:
                    exps_are_stills.append(True)
                elif exp.scan.get_oscillation()[1] == 0.0:
                    exps_are_stills.append(True)
                else:
                    exps_are_stills.append(False)
            else:
                if exp.scan.get_oscillation()[1] <= 0.0:
                    raise DialsRefineConfigError(
                        "Cannot refine a zero-width scan")
                exps_are_stills.append(False)

        # check experiment types are consistent
        if not all(exps_are_stills[0] == e for e in exps_are_stills):
            raise DialsRefineConfigError(
                "Cannot refine a mixture of stills and scans")
        do_stills = exps_are_stills[0]

        # If experiments are stills, ensure scan-varying refinement won't be attempted
        if do_stills:
            params.refinement.parameterisation.scan_varying = False

        # Refiner does not accept scan_varying=Auto. This is a special case for
        # doing macrocycles of refinement in dials.refine.
        if params.refinement.parameterisation.scan_varying is libtbx.Auto:
            params.refinement.parameterisation.scan_varying = False

        # calculate reflection block_width if required for scan-varying refinement
        if params.refinement.parameterisation.scan_varying:
            from dials.algorithms.refinement.reflection_manager import BlockCalculator

            block_calculator = BlockCalculator(experiments, reflections)
            if params.refinement.parameterisation.compose_model_per == "block":
                reflections = block_calculator.per_width(
                    params.refinement.parameterisation.block_width, deg=True)
            elif params.refinement.parameterisation.compose_model_per == "image":
                reflections = block_calculator.per_image()

        logger.debug("\nBuilding reflection manager")
        logger.debug("Input reflection list size = %d observations",
                     len(reflections))

        # create reflection manager
        from dials.algorithms.refinement.reflection_manager import (
            ReflectionManagerFactory, )

        refman = ReflectionManagerFactory.from_parameters_reflections_experiments(
            params.refinement.reflections, reflections, experiments, do_stills)

        logger.debug(
            "Number of observations that pass initial inclusion criteria = %d",
            refman.get_accepted_refs_size(),
        )
        sample_size = refman.get_sample_size()
        if sample_size > 0:
            logger.debug("Working set size = %d observations", sample_size)
        logger.debug("Reflection manager built\n")

        # configure use of sparse data types
        params = cls.config_sparse(params, experiments)
        do_sparse = params.refinement.parameterisation.sparse

        # create managed reflection predictor
        from dials.algorithms.refinement.prediction.managed_predictors import (
            ExperimentsPredictorFactory, )

        ref_predictor = ExperimentsPredictorFactory.from_experiments(
            experiments,
            force_stills=do_stills,
            spherical_relp=params.refinement.parameterisation.
            spherical_relp_model,
        )

        # Predict for the managed observations, set columns for residuals and set
        # the used_in_refinement flag to the predictions
        obs = refman.get_obs()
        ref_predictor(obs)
        x_obs, y_obs, phi_obs = obs["xyzobs.mm.value"].parts()
        x_calc, y_calc, phi_calc = obs["xyzcal.mm"].parts()
        obs["x_resid"] = x_calc - x_obs
        obs["y_resid"] = y_calc - y_obs
        obs["phi_resid"] = phi_calc - phi_obs

        # determine whether to do basic centroid analysis to automatically
        # determine outlier rejection block
        if params.refinement.reflections.outlier.block_width is libtbx.Auto:
            ca = refman.get_centroid_analyser()
            analysis = ca(calc_average_residuals=False,
                          calc_periodograms=False)
        else:
            analysis = None

        # Now predictions and centroid analysis are available, so we can finalise
        # the reflection manager
        refman.finalise(analysis)

        # Create model parameterisations
        logger.debug("Building prediction equation parameterisation")
        pred_param, param_reporter = cls.config_parameterisation(
            params.refinement.parameterisation, experiments, refman, do_stills)
        logger.debug("Prediction equation parameterisation built")
        logger.debug("Parameter order : name mapping")
        for i, e in enumerate(pred_param.get_param_names()):
            logger.debug("Parameter %03d : %s", i + 1, e)

        # Build a restraints parameterisation (if requested).
        # Only unit cell restraints are supported at the moment.
        restraints_parameterisation = cls.config_restraints(
            params.refinement.parameterisation, pred_param)

        # Build a constraints manager, if requested
        from dials.algorithms.refinement.constraints import ConstraintManagerFactory

        cmf = ConstraintManagerFactory(params, pred_param)
        constraints_manager = cmf()

        # Create target function
        logger.debug("Building target function")
        target = cls.config_target(
            params.refinement.target,
            experiments,
            refman,
            ref_predictor,
            pred_param,
            restraints_parameterisation,
            do_stills,
            do_sparse,
        )
        logger.debug("Target function built")

        # create refinery
        logger.debug("Building refinement engine")
        refinery = cls.config_refinery(params, target, pred_param,
                                       constraints_manager)
        logger.debug("Refinement engine built")

        nparam = len(pred_param)
        ndim = target.dim
        nref = len(refman.get_matches())
        logger.info(
            "There are {0} parameters to refine against {1} reflections in {2} dimensions"
            .format(nparam, nref, ndim))
        from dials.algorithms.refinement.engine import AdaptLstbx

        if not params.refinement.parameterisation.sparse and isinstance(
                refinery, AdaptLstbx):
            dense_jacobian_gigabytes = (nparam * nref * ndim *
                                        flex.double.element_size()) / 1e9
            tot_memory_gigabytes = machine_memory_info().memory_total() / 1e9
            # Report if the Jacobian requires a large amount of storage
            if (dense_jacobian_gigabytes > 0.2 * tot_memory_gigabytes
                    or dense_jacobian_gigabytes > 0.5):
                logger.info(
                    "Storage of the Jacobian matrix requires {:.1f} GB".format(
                        dense_jacobian_gigabytes))

        # build refiner interface and return
        if params.refinement.parameterisation.scan_varying:
            refiner = ScanVaryingRefiner
        else:
            refiner = Refiner
        return refiner(experiments, pred_param, param_reporter, refman, target,
                       refinery)
Example #12
0
    def __call__(self):
        '''
    Do the processing.

    :return: The processed data

    '''
        from dials.array_family import flex
        from time import time
        from dials.model.data import make_image
        from libtbx.introspection import machine_memory_info

        # Get the start time
        start_time = time()

        # Set the global process ID
        job.index = self.index

        # Check all reflections have same imageset and get it
        exp_id = list(set(self.reflections['id']))
        imageset = self.experiments[exp_id[0]].imageset
        for i in exp_id[1:]:
            assert self.experiments[
                i].imageset == imageset, "Task can only handle 1 imageset"

        # Get the sub imageset
        frame00, frame01 = self.job
        try:
            frame10, frame11 = imageset.get_array_range()
        except Exception:
            frame10, frame11 = (0, len(imageset))
        try:
            assert frame00 < frame01
            assert frame10 < frame11
            assert frame00 >= frame10
            assert frame01 <= frame11
            index0 = frame00 - frame10
            index1 = index0 + (frame01 - frame00)
            assert index0 < index1
            assert index0 >= 0
            assert index1 <= len(imageset)
            imageset = imageset[index0:index1]
        except Exception:
            raise RuntimeError('Programmer Error: bad array range')
        try:
            frame0, frame1 = imageset.get_array_range()
        except Exception:
            frame0, frame1 = (0, len(imageset))

        # Initlize the executor
        self.executor.initialize(frame0, frame1, self.reflections)

        # Set the shoeboxes (dont't allocate)
        self.reflections['shoebox'] = flex.shoebox(
            self.reflections['panel'],
            self.reflections['bbox'],
            allocate=False,
            flatten=self.params.shoebox.flatten)

        # Create the processor
        processor = ShoeboxProcessor(self.reflections,
                                     len(imageset.get_detector()), frame0,
                                     frame1, self.params.debug.output)

        # Compute percentage of max available. The function is not portable to
        # windows so need to add a check if the function fails. On windows no
        # warning will be printed
        memory_info = machine_memory_info()
        total_memory = memory_info.memory_total()
        sbox_memory = processor.compute_max_memory_usage()
        if total_memory is not None:
            assert total_memory > 0, "Your system appears to have no memory!"
            assert self.params.block.max_memory_usage > 0.0, "maximum memory usage must be > 0"
            assert self.params.block.max_memory_usage <= 1.0, "maximum memory usage must be <= 1"
            limit_memory = total_memory * self.params.block.max_memory_usage
            if sbox_memory > limit_memory:
                raise RuntimeError('''
        There was a problem allocating memory for shoeboxes. Possible solutions
        include increasing the percentage of memory allowed for shoeboxes or
        decreasing the block size. This could also be caused by a highly mosaic
        crystal model - is your crystal really this mosaic?
          Total system memory: %g GB
          Limit shoebox memory: %g GB
          Required shoebox memory: %g GB
        ''' % (total_memory / 1e9, limit_memory / 1e9, sbox_memory / 1e9))
            else:
                logger.info(' Memory usage:')
                logger.info('  Total system memory: %g GB' %
                            (total_memory / 1e9))
                logger.info('  Limit shoebox memory: %g GB' %
                            (limit_memory / 1e9))
                logger.info('  Required shoebox memory: %g GB' %
                            (sbox_memory / 1e9))
                logger.info('')

        # Loop through the imageset, extract pixels and process reflections
        read_time = 0.0
        for i in range(len(imageset)):
            st = time()
            image = imageset.get_corrected_data(i)
            mask = imageset.get_mask(i)
            if self.params.lookup.mask is not None:
                assert len(mask) == len(self.params.lookup.mask), \
                  "Mask/Image are incorrect size %d %d" % (
                    len(mask),
                    len(self.params.lookup.mask))
                mask = tuple(m1 & m2
                             for m1, m2 in zip(self.params.lookup.mask, mask))

            read_time += time() - st
            processor.next(make_image(image, mask), self.executor)
            del image
            del mask
        assert processor.finished(), "Data processor is not finished"

        # Optionally save the shoeboxes
        if self.params.debug.output and self.params.debug.separate_files:
            output = self.reflections
            if self.params.debug.select is not None:
                output = output.select(self.params.debug.select(output))
            if self.params.debug.split_experiments:
                output = output.split_by_experiment_id()
                for table in output:
                    i = table['id'][0]
                    table.as_pickle('shoeboxes_%d_%d.pickle' % (self.index, i))
            else:
                output.as_pickle('shoeboxes_%d.pickle' % self.index)

        # Delete the shoeboxes
        if self.params.debug.separate_files or not self.params.debug.output:
            del self.reflections['shoebox']

        # Finalize the executor
        self.executor.finalize()

        # Return the result
        result = Result(self.index, self.reflections, self.executor.data())
        result.read_time = read_time
        result.extract_time = processor.extract_time()
        result.process_time = processor.process_time()
        result.total_time = time() - start_time
        return result
Example #13
0
  def __call__(self):
    '''
    Do the processing.

    :return: The processed data

    '''
    from dials.array_family import flex
    from time import time
    from dials.model.data import make_image
    from libtbx.introspection import machine_memory_info

    # Get the start time
    start_time = time()

    # Set the global process ID
    job.index = self.index

    # Check all reflections have same imageset and get it
    exp_id = list(set(self.reflections['id']))
    imageset = self.experiments[exp_id[0]].imageset
    for i in exp_id[1:]:
      assert self.experiments[i].imageset == imageset, "Task can only handle 1 imageset"

    # Get the sub imageset
    frame00, frame01 = self.job
    try:
      frame10, frame11 = imageset.get_array_range()
    except Exception:
      frame10, frame11 = (0, len(imageset))
    try:
      assert frame00 < frame01
      assert frame10 < frame11
      assert frame00 >= frame10
      assert frame01 <= frame11
      index0 = frame00 - frame10
      index1 = index0 + (frame01 - frame00)
      assert index0 < index1
      assert index0 >= 0
      assert index1 <= len(imageset)
      imageset = imageset[index0:index1]
    except Exception:
      raise RuntimeError('Programmer Error: bad array range')
    try:
      frame0, frame1 = imageset.get_array_range()
    except Exception:
      frame0, frame1 = (0, len(imageset))

    # Initlize the executor
    self.executor.initialize(frame0, frame1, self.reflections)

    # Set the shoeboxes (dont't allocate)
    self.reflections['shoebox'] = flex.shoebox(
      self.reflections['panel'],
      self.reflections['bbox'],
      allocate=False,
      flatten=self.params.shoebox.flatten)

    # Create the processor
    processor = ShoeboxProcessor(
      self.reflections,
      len(imageset.get_detector()),
      frame0,
      frame1,
      self.params.debug.output)

    # Compute percentage of max available. The function is not portable to
    # windows so need to add a check if the function fails. On windows no
    # warning will be printed
    memory_info = machine_memory_info()
    total_memory = memory_info.memory_total()
    sbox_memory = processor.compute_max_memory_usage()
    if total_memory is not None:
      assert total_memory > 0, "Your system appears to have no memory!"
      assert self.params.block.max_memory_usage >  0.0, "maximum memory usage must be > 0"
      assert self.params.block.max_memory_usage <= 1.0, "maximum memory usage must be <= 1"
      limit_memory = total_memory * self.params.block.max_memory_usage
      if sbox_memory > limit_memory:
        raise RuntimeError('''
        There was a problem allocating memory for shoeboxes. Possible solutions
        include increasing the percentage of memory allowed for shoeboxes or
        decreasing the block size. This could also be caused by a highly mosaic
        crystal model - is your crystal really this mosaic?
          Total system memory: %g GB
          Limit shoebox memory: %g GB
          Required shoebox memory: %g GB
        ''' % (total_memory/1e9, limit_memory/1e9, sbox_memory/1e9))
      else:
        logger.info(' Memory usage:')
        logger.info('  Total system memory: %g GB' % (total_memory/1e9))
        logger.info('  Limit shoebox memory: %g GB' % (limit_memory/1e9))
        logger.info('  Required shoebox memory: %g GB' % (sbox_memory/1e9))
        logger.info('')

    # Loop through the imageset, extract pixels and process reflections
    read_time = 0.0
    for i in range(len(imageset)):
      st = time()
      image = imageset.get_corrected_data(i)
      mask = imageset.get_mask(i)
      if self.params.lookup.mask is not None:
        assert len(mask) == len(self.params.lookup.mask), \
          "Mask/Image are incorrect size %d %d" % (
            len(mask),
            len(self.params.lookup.mask))
        mask = tuple(m1 & m2 for m1, m2 in zip(self.params.lookup.mask, mask))

      read_time += time() - st
      processor.next(make_image(image, mask), self.executor)
      del image
      del mask
    assert processor.finished(), "Data processor is not finished"

    # Optionally save the shoeboxes
    if self.params.debug.output and self.params.debug.separate_files:
      output = self.reflections
      if self.params.debug.select is not None:
        output = output.select(self.params.debug.select(output))
      if self.params.debug.split_experiments:
        output = output.split_by_experiment_id()
        for table in output:
          i = table['id'][0]
          table.as_pickle('shoeboxes_%d_%d.pickle' % (self.index, i))
      else:
        output.as_pickle('shoeboxes_%d.pickle' % self.index)

    # Delete the shoeboxes
    if self.params.debug.separate_files or not self.params.debug.output:
      del self.reflections['shoebox']

    # Finalize the executor
    self.executor.finalize()

    # Return the result
    result = Result(self.index, self.reflections, self.executor.data())
    result.read_time = read_time
    result.extract_time = processor.extract_time()
    result.process_time = processor.process_time()
    result.total_time = time() - start_time
    return result