Example #1
0
    def _mosflm_parallel_integrate(self):
        '''Perform the integration as before, but this time as a
    number of parallel Mosflm jobs (hence, in separate directories)
    and including a step of pre-refinement of the mosaic spread and
    missets. This will all be kind of explicit and hence probably
    messy!'''

        refinr = self.get_integrater_refiner()

        lattice = refinr.get_refiner_lattice()
        spacegroup_number = lattice_to_spacegroup(lattice)
        mosaic = refinr.get_refiner_payload('mosaic')
        beam = refinr.get_refiner_payload('beam')
        distance = refinr.get_refiner_payload('distance')
        matrix = refinr.get_refiner_payload('mosflm_orientation_matrix')

        integration_params = refinr.get_refiner_payload(
            'mosflm_integration_parameters')

        if integration_params:
            if 'separation' in integration_params:
                self.set_integrater_parameter(
                    'mosflm', 'separation',
                    '%s %s' % tuple(integration_params['separation']))
            if 'raster' in integration_params:
                self.set_integrater_parameter(
                    'mosflm', 'raster',
                    '%d %d %d %d %d' % tuple(integration_params['raster']))

        refinr.set_refiner_payload('mosflm_integration_parameters', None)
        pname, xname, dname = self.get_integrater_project_info()

        # what follows below should (i) be run in separate directories
        # and (ii) be repeated N=parallel times.

        nproc = PhilIndex.params.xia2.settings.multiprocessing.nproc
        parallel = nproc

        # FIXME this is something of a kludge - if too few frames refinement
        # and integration does not work well... ideally want at least 15
        # frames / chunk (say)
        nframes = self._intgr_wedge[1] - self._intgr_wedge[0] + 1

        if parallel > nframes / 15:
            parallel = nframes // 15

        if not parallel:
            raise RuntimeError('parallel not set')
        if parallel < 2:
            raise RuntimeError('parallel not parallel: %s' % parallel)

        jobs = []
        hklouts = []
        nref = 0

        # calculate the chunks to use
        offset = self.get_frame_offset()
        start = self._intgr_wedge[0] - offset
        end = self._intgr_wedge[1] - offset

        left_images = 1 + end - start
        left_chunks = parallel
        chunks = []

        while left_images > 0:
            size = left_images // left_chunks
            chunks.append((start, start + size - 1))
            start += size
            left_images -= size
            left_chunks -= 1

        summary_files = []

        for j in range(parallel):

            # make some working directories, as necessary - chunk-(0:N-1)
            wd = os.path.join(self.get_working_directory(), 'chunk-%d' % j)
            if not os.path.exists(wd):
                os.makedirs(wd)

            job = MosflmIntegrate()
            job.set_working_directory(wd)

            auto_logfiler(job)

            l = refinr.get_refiner_lattice()

            # create the starting point
            f = open(os.path.join(wd, 'xiaintegrate-%s.mat' % l), 'w')
            for m in matrix:
                f.write(m)
            f.close()

            spacegroup_number = lattice_to_spacegroup(lattice)

            job.set_refine_profiles(self._mosflm_refine_profiles)

            # N.B. for harvesting need to append N to dname.

            if pname is not None and xname is not None and dname is not None:
                Debug.write('Harvesting: %s/%s/%s' % (pname, xname, dname))
                harvest_dir = self.get_working_directory()
                temp_dname = '%s_%s' % \
                             (dname, self.get_integrater_sweep_name())
                job.set_pname_xname_dname(pname, xname, temp_dname)

            job.set_template(os.path.basename(self.get_template()))
            job.set_directory(self.get_directory())

            # check for ice - and if so, exclude (ranges taken from
            # XDS documentation)
            if self.get_integrater_ice() != 0:
                Debug.write('Excluding ice rings')
                job.set_exclude_ice(True)

            # exclude specified resolution ranges
            if len(self.get_integrater_excluded_regions()) != 0:
                regions = self.get_integrater_excluded_regions()
                Debug.write('Excluding regions: %s' % repr(regions))
                job.set_exclude_regions(regions)

            mask = standard_mask(self.get_detector())
            for m in mask:
                job.add_instruction(m)

            job.set_input_mat_file('xiaintegrate-%s.mat' % l)

            job.set_beam_centre(beam)
            job.set_distance(distance)
            job.set_space_group_number(spacegroup_number)
            job.set_mosaic(mosaic)

            if self.get_wavelength_prov() == 'user':
                job.set_wavelength(self.get_wavelength())

            parameters = self.get_integrater_parameters('mosflm')
            job.update_parameters(parameters)

            if self._mosflm_gain:
                job.set_gain(self._mosflm_gain)

            # check for resolution limits
            if self._intgr_reso_high > 0.0:
                job.set_d_min(self._intgr_reso_high)
            if self._intgr_reso_low:
                job.set_d_max(self._intgr_reso_low)

            if PhilIndex.params.general.backstop_mask:
                from xia2.Toolkit.BackstopMask import BackstopMask
                mask = BackstopMask(PhilIndex.params.general.backstop_mask)
                mask = mask.calculate_mask_mosflm(self.get_header())
                job.set_mask(mask)

            detector = self.get_detector()
            detector_width, detector_height = detector[0].get_image_size_mm()

            lim_x = 0.5 * detector_width
            lim_y = 0.5 * detector_height

            Debug.write('Scanner limits: %.1f %.1f' % (lim_x, lim_y))
            job.set_limits(lim_x, lim_y)

            job.set_fix_mosaic(self._mosflm_postref_fix_mosaic)

            job.set_pre_refinement(True)
            job.set_image_range(chunks[j])

            # these are now running so ...

            jobs.append(job)

            continue

        # ok, at this stage I need to ...
        #
        # (i) accumulate the statistics as a function of batch
        # (ii) mong them into a single block
        #
        # This is likely to be a pain in the arse!

        first_integrated_batch = 1.0e6
        last_integrated_batch = -1.0e6

        all_residuals = []

        threads = []

        for j in range(parallel):
            job = jobs[j]

            # now wait for them to finish - first wait will really be the
            # first one, then all should be finished...

            thread = Background(job, 'run')
            thread.start()
            threads.append(thread)

        mosaics = []
        postref_result = {}

        integrated_images_first = 1.0e6
        integrated_images_last = -1.0e6
        self._intgr_per_image_statistics = {}

        for j in range(parallel):
            thread = threads[j]
            thread.stop()
            job = jobs[j]

            # get the log file
            output = job.get_all_output()

            # record a copy of it, perhaps - though not if parallel
            if self.get_integrater_sweep_name() and False:
                pname, xname, dname = self.get_integrater_project_info()
                FileHandler.record_log_file(
                    '%s %s %s %s mosflm integrate' % \
                    (self.get_integrater_sweep_name(),
                     pname, xname, '%s_%d' % (dname, j)),
                    job.get_log_file())

            # look for things that we want to know...
            # that is, the output reflection file name, the updated
            # value for the gain (if present,) any warnings, errors,
            # or just interesting facts.

            batches = job.get_batches_out()
            integrated_images_first = min(batches[0], integrated_images_first)
            integrated_images_last = max(batches[1], integrated_images_last)

            mosaics.extend(job.get_mosaic_spreads())

            if min(mosaics) < 0:
                raise IntegrationError('negative mosaic spread: %s' %
                                       min(mosaic))

            if (job.get_detector_gain_error()
                    and not (self.get_imageset().get_detector()[0].get_type()
                             == 'SENSOR_PAD')):
                gain = job.get_suggested_gain()
                if gain is not None:
                    self.set_integrater_parameter('mosflm', 'gain', gain)
                    self.set_integrater_export_parameter(
                        'mosflm', 'gain', gain)
                    if self._mosflm_gain:
                        Debug.write('GAIN updated to %f' % gain)
                    else:
                        Debug.write('GAIN found to be %f' % gain)

                    self._mosflm_gain = gain
                    self._mosflm_rerun_integration = True

            hklout = job.get_hklout()
            Debug.write('Integration output: %s' % hklout)
            hklouts.append(hklout)

            nref += job.get_nref()

            # if a BGSIG error happened try not refining the
            # profile and running again...

            if job.get_bgsig_too_large():
                if not self._mosflm_refine_profiles:
                    raise RuntimeError('BGSIG error with profiles fixed')

                Debug.write('BGSIG error detected - try fixing profile...')

                self._mosflm_refine_profiles = False
                self.set_integrater_done(False)

                return

            if job.get_getprof_error():
                Debug.write('GETPROF error detected - try fixing profile...')
                self._mosflm_refine_profiles = False
                self.set_integrater_done(False)

                return

            # here
            # write the report for each image as .*-#$ to Chatter -
            # detailed report will be written automagically to science...

            self._intgr_per_image_statistics.update(
                job.get_per_image_statistics())
            postref_result.update(job.get_postref_result())

            # inspect the output for e.g. very high weighted residuals

            all_residuals.extend(job.get_residuals())

        self._intgr_batches_out = (integrated_images_first,
                                   integrated_images_last)

        if mosaics and len(mosaics) > 0:
            self.set_integrater_mosaic_min_mean_max(
                min(mosaics),
                sum(mosaics) / len(mosaics), max(mosaics))
        else:
            m = indxr.get_indexer_mosaic()
            self.set_integrater_mosaic_min_mean_max(m, m, m)

        Chatter.write(self.show_per_image_statistics())

        Chatter.write('Mosaic spread: %.3f < %.3f < %.3f' % \
                      self.get_integrater_mosaic_min_mean_max())

        # gather the statistics from the postrefinement for all sweeps
        # now write this to a postrefinement log

        postref_log = os.path.join(self.get_working_directory(),
                                   'postrefinement.log')

        fout = open(postref_log, 'w')

        fout.write('$TABLE: Postrefinement for %s:\n' % \
                   self._intgr_sweep_name)
        fout.write('$GRAPHS: Missetting angles:A:1, 2, 3, 4: $$\n')
        fout.write('Batch PhiX PhiY PhiZ $$ Batch PhiX PhiY PhiZ $$\n')

        for image in sorted(postref_result):
            phix = postref_result[image].get('phix', 0.0)
            phiy = postref_result[image].get('phiy', 0.0)
            phiz = postref_result[image].get('phiz', 0.0)

            fout.write('%d %5.2f %5.2f %5.2f\n' % \
                       (image, phix, phiy, phiz))

        fout.write('$$\n')
        fout.close()

        if self.get_integrater_sweep_name():
            pname, xname, dname = self.get_integrater_project_info()
            FileHandler.record_log_file('%s %s %s %s postrefinement' % \
                                        (self.get_integrater_sweep_name(),
                                         pname, xname, dname),
                                        postref_log)

        hklouts.sort()

        hklout = os.path.join(self.get_working_directory(),
                              os.path.split(hklouts[0])[-1])

        Debug.write('Sorting data to %s' % hklout)
        for hklin in hklouts:
            Debug.write('<= %s' % hklin)

        sortmtz = Sortmtz()
        sortmtz.set_hklout(hklout)
        for hklin in hklouts:
            sortmtz.add_hklin(hklin)

        sortmtz.sort()

        self._mosflm_hklout = hklout

        return self._mosflm_hklout
Example #2
0
    def _index_prepare(self):
        """Prepare to do autoindexing - in XDS terms this will mean
        calling xycorr, init and colspot on the input images."""

        # decide on images to work with

        logger.debug("XDS INDEX PREPARE:")
        logger.debug("Wavelength: %.6f", self.get_wavelength())
        logger.debug("Distance: %.2f", self.get_distance())

        if self._indxr_images == []:
            _select_images_function = getattr(
                self, "_index_select_images_%s" % self._index_select_images
            )
            wedges = _select_images_function()
            for wedge in wedges:
                self.add_indexer_image_wedge(wedge)
            self.set_indexer_prepare_done(True)

        all_images = self.get_matching_images()

        first = min(all_images)
        last = max(all_images)

        # next start to process these - first xycorr

        xycorr = self.Xycorr()

        xycorr.set_data_range(first, last)
        xycorr.set_background_range(self._indxr_images[0][0], self._indxr_images[0][1])

        converter = to_xds(self.get_imageset())
        xds_beam_centre = converter.detector_origin
        xycorr.set_beam_centre(xds_beam_centre[0], xds_beam_centre[1])
        for block in self._indxr_images:
            xycorr.add_spot_range(block[0], block[1])

        # FIXME need to set the origin here

        xycorr.run()

        for file in ["X-CORRECTIONS.cbf", "Y-CORRECTIONS.cbf"]:
            self._indxr_payload[file] = xycorr.get_output_data_file(file)

        # next start to process these - then init

        if PhilIndex.params.xia2.settings.input.format.dynamic_shadowing:
            imageset = self._indxr_imagesets[0]
            masker = (
                imageset.get_format_class()
                .get_instance(imageset.paths()[0])
                .get_masker()
            )
            if masker is None:
                # disable dynamic_shadowing
                PhilIndex.params.xia2.settings.input.format.dynamic_shadowing = False

        if PhilIndex.params.xia2.settings.input.format.dynamic_shadowing:
            # find the region of the scan with the least predicted shadow
            # to use for background determination in XDS INIT step
            from dxtbx.model.experiment_list import ExperimentListFactory

            imageset = self._indxr_imagesets[0]
            xsweep = self._indxr_sweeps[0]
            sweep_filename = os.path.join(
                self.get_working_directory(), "%s_indexed.expt" % xsweep.get_name()
            )
            ExperimentListFactory.from_imageset_and_crystal(imageset, None).as_file(
                sweep_filename
            )

            from xia2.Wrappers.Dials.ShadowPlot import ShadowPlot

            shadow_plot = ShadowPlot()
            shadow_plot.set_working_directory(self.get_working_directory())
            auto_logfiler(shadow_plot)
            shadow_plot.set_sweep_filename(sweep_filename)
            shadow_plot.set_json_filename(
                os.path.join(
                    self.get_working_directory(),
                    "%s_shadow_plot.json" % shadow_plot.get_xpid(),
                )
            )
            shadow_plot.run()
            results = shadow_plot.get_results()

            fraction_shadowed = flex.double(results["fraction_shadowed"])
            if flex.max(fraction_shadowed) == 0:
                PhilIndex.params.xia2.settings.input.format.dynamic_shadowing = False
            else:
                scan_points = flex.double(results["scan_points"])

                scan = imageset.get_scan()
                oscillation = scan.get_oscillation()

                if self._background_images is not None:
                    bg_images = self._background_images
                    bg_range_deg = (
                        scan.get_angle_from_image_index(bg_images[0]),
                        scan.get_angle_from_image_index(bg_images[1]),
                    )
                    bg_range_width = bg_range_deg[1] - bg_range_deg[0]

                    min_shadow = 100
                    best_bg_range = bg_range_deg
                    from libtbx.utils import frange

                    for bg_range_start in frange(
                        flex.min(scan_points),
                        flex.max(scan_points) - bg_range_width,
                        step=oscillation[1],
                    ):
                        bg_range_deg = (bg_range_start, bg_range_start + bg_range_width)
                        sel = (scan_points >= bg_range_deg[0]) & (
                            scan_points <= bg_range_deg[1]
                        )
                        mean_shadow = flex.mean(fraction_shadowed.select(sel))
                        if mean_shadow < min_shadow:
                            min_shadow = mean_shadow
                            best_bg_range = bg_range_deg

                    self._background_images = (
                        scan.get_image_index_from_angle(best_bg_range[0]),
                        scan.get_image_index_from_angle(best_bg_range[1]),
                    )
                    logger.debug(
                        "Setting background images: %s -> %s" % self._background_images
                    )

        init = self.Init()

        for file in ["X-CORRECTIONS.cbf", "Y-CORRECTIONS.cbf"]:
            init.set_input_data_file(file, self._indxr_payload[file])

        init.set_data_range(first, last)

        if self._background_images:
            init.set_background_range(
                self._background_images[0], self._background_images[1]
            )
        else:
            init.set_background_range(
                self._indxr_images[0][0], self._indxr_images[0][1]
            )

        for block in self._indxr_images:
            init.add_spot_range(block[0], block[1])

        init.run()

        # at this stage, need to (perhaps) modify the BKGINIT.cbf image
        # to mark out the back stop

        if PhilIndex.params.xds.backstop_mask:
            logger.debug("Applying mask to BKGINIT.pck")

            # copy the original file
            cbf_old = os.path.join(init.get_working_directory(), "BKGINIT.cbf")
            cbf_save = os.path.join(init.get_working_directory(), "BKGINIT.sav")
            shutil.copyfile(cbf_old, cbf_save)

            # modify the file to give the new mask
            from xia2.Toolkit.BackstopMask import BackstopMask

            mask = BackstopMask(PhilIndex.params.xds.backstop_mask)
            mask.apply_mask_xds(self.get_header(), cbf_save, cbf_old)

            init.reload()

        for file in ["BLANK.cbf", "BKGINIT.cbf", "GAIN.cbf"]:
            self._indxr_payload[file] = init.get_output_data_file(file)

        if PhilIndex.params.xia2.settings.developmental.use_dials_spotfinder:

            spotfinder = self.DialsSpotfinder()

            for block in self._indxr_images:
                spotfinder.add_spot_range(block[0], block[1])

            spotfinder.run()
            export = self.DialsExportSpotXDS()
            export.set_input_data_file(
                "observations.refl",
                spotfinder.get_output_data_file("observations.refl"),
            )
            export.run()

            for file in ["SPOT.XDS"]:
                self._indxr_payload[file] = export.get_output_data_file(file)

        else:

            # next start to process these - then colspot

            colspot = self.Colspot()

            for file in (
                "X-CORRECTIONS.cbf",
                "Y-CORRECTIONS.cbf",
                "BLANK.cbf",
                "BKGINIT.cbf",
                "GAIN.cbf",
            ):
                colspot.set_input_data_file(file, self._indxr_payload[file])

            colspot.set_data_range(first, last)
            colspot.set_background_range(
                self._indxr_images[0][0], self._indxr_images[0][1]
            )
            for block in self._indxr_images:
                colspot.add_spot_range(block[0], block[1])

            colspot.run()

            for file in ["SPOT.XDS"]:
                self._indxr_payload[file] = colspot.get_output_data_file(file)
Example #3
0
    def _mosflm_integrate(self):
        '''Perform the actual integration, based on the results of the
    cell refinement or indexing (they have the equivalent form.)'''

        refinr = self.get_integrater_refiner()

        if not refinr.get_refiner_payload('mosflm_orientation_matrix'):
            raise RuntimeError('unexpected situation in indexing')

        lattice = refinr.get_refiner_lattice()
        spacegroup_number = lattice_to_spacegroup(lattice)
        mosaic = refinr.get_refiner_payload('mosaic')
        beam = refinr.get_refiner_payload('beam')
        distance = refinr.get_refiner_payload('distance')
        matrix = refinr.get_refiner_payload('mosflm_orientation_matrix')

        integration_params = refinr.get_refiner_payload(
            'mosflm_integration_parameters')

        if integration_params:
            if 'separation' in integration_params:
                self.set_integrater_parameter(
                    'mosflm', 'separation',
                    '%s %s' % tuple(integration_params['separation']))
            if 'raster' in integration_params:
                self.set_integrater_parameter(
                    'mosflm', 'raster',
                    '%d %d %d %d %d' % tuple(integration_params['raster']))

        refinr.set_refiner_payload('mosflm_integration_parameters', None)

        f = open(
            os.path.join(self.get_working_directory(), 'xiaintegrate.mat'),
            'w')
        for m in matrix:
            f.write(m)
        f.close()

        # then start the integration
        integrater = MosflmIntegrate()
        integrater.set_working_directory(self.get_working_directory())
        auto_logfiler(integrater)

        integrater.set_refine_profiles(self._mosflm_refine_profiles)

        pname, xname, dname = self.get_integrater_project_info()

        if pname is not None and xname is not None and dname is not None:
            Debug.write('Harvesting: %s/%s/%s' % (pname, xname, dname))
            harvest_dir = self.get_working_directory()
            # harvest file name will be %s.mosflm_run_start_end % dname
            temp_dname = '%s_%s' % \
                         (dname, self.get_integrater_sweep_name())
            integrater.set_pname_xname_dname(pname, xname, temp_dname)

        integrater.set_template(os.path.basename(self.get_template()))
        integrater.set_directory(self.get_directory())

        # check for ice - and if so, exclude (ranges taken from
        # XDS documentation)
        if self.get_integrater_ice() != 0:
            Debug.write('Excluding ice rings')
            integrater.set_exclude_ice(True)

        # exclude specified resolution ranges
        if len(self.get_integrater_excluded_regions()) != 0:
            regions = self.get_integrater_excluded_regions()
            Debug.write('Excluding regions: %s' % repr(regions))
            integrater.set_exclude_regions(regions)

        mask = standard_mask(self.get_detector())
        for m in mask:
            integrater.add_instruction(m)

        integrater.set_input_mat_file('xiaintegrate.mat')

        integrater.set_beam_centre(beam)
        integrater.set_distance(distance)
        integrater.set_space_group_number(spacegroup_number)
        integrater.set_mosaic(mosaic)

        if self.get_wavelength_prov() == 'user':
            integrater.set_wavelength(self.get_wavelength())

        parameters = self.get_integrater_parameters('mosflm')
        integrater.update_parameters(parameters)

        if self._mosflm_gain:
            integrater.set_gain(self._mosflm_gain)

        # check for resolution limits
        if self._intgr_reso_high > 0.0:
            integrater.set_d_min(self._intgr_reso_high)
        if self._intgr_reso_low:
            integrater.set_d_max(self._intgr_reso_low)

        if PhilIndex.params.general.backstop_mask:
            from xia2.Toolkit.BackstopMask import BackstopMask
            mask = BackstopMask(PhilIndex.params.general.backstop_mask)
            mask = mask.calculate_mask_mosflm(self.get_header())
            integrater.set_mask(mask)

        detector = self.get_detector()
        detector_width, detector_height = detector[0].get_image_size_mm()

        lim_x = 0.5 * detector_width
        lim_y = 0.5 * detector_height

        Debug.write('Scanner limits: %.1f %.1f' % (lim_x, lim_y))
        integrater.set_limits(lim_x, lim_y)

        integrater.set_fix_mosaic(self._mosflm_postref_fix_mosaic)
        offset = self.get_frame_offset()

        integrater.set_image_range(
            (self._intgr_wedge[0] - offset, self._intgr_wedge[1] - offset))

        try:
            integrater.run()
        except RuntimeError as e:
            if 'integration failed: reason unknown' in str(e):
                Chatter.write('Mosflm has failed in integration')
                message = 'The input was:\n\n'
                for input in integrater.get_all_input():
                    message += '  %s' % input
                Chatter.write(message)
            raise

        FileHandler.record_log_file(
            '%s %s %s %s mosflm integrate' % \
            (self.get_integrater_sweep_name(),
             pname, xname, dname),
            integrater.get_log_file())

        self._intgr_per_image_statistics = integrater.get_per_image_statistics(
        )

        self._mosflm_hklout = integrater.get_hklout()
        Debug.write('Integration output: %s' % self._mosflm_hklout)

        self._intgr_n_ref = integrater.get_nref()

        # if a BGSIG error happened try not refining the
        # profile and running again...

        if integrater.get_bgsig_too_large():
            if not self._mosflm_refine_profiles:
                raise RuntimeError('BGSIG error with profiles fixed')

            Debug.write('BGSIG error detected - try fixing profile...')

            self._mosflm_refine_profiles = False
            self.set_integrater_done(False)

            return

        if integrater.get_getprof_error():
            Debug.write('GETPROF error detected - try fixing profile...')
            self._mosflm_refine_profiles = False
            self.set_integrater_done(False)

            return

        if (integrater.get_detector_gain_error()
                and not (self.get_imageset().get_detector()[0].get_type()
                         == 'SENSOR_PAD')):
            gain = integrater.get_suggested_gain()
            if gain is not None:
                self.set_integrater_parameter('mosflm', 'gain', gain)
                self.set_integrater_export_parameter('mosflm', 'gain', gain)
                if self._mosflm_gain:
                    Debug.write('GAIN updated to %f' % gain)
                else:
                    Debug.write('GAIN found to be %f' % gain)

                self._mosflm_gain = gain
                self._mosflm_rerun_integration = True

        if not self._mosflm_hklout:
            raise RuntimeError('processing abandoned')

        self._intgr_batches_out = integrater.get_batches_out()

        mosaics = integrater.get_mosaic_spreads()
        if mosaics and len(mosaics) > 0:
            self.set_integrater_mosaic_min_mean_max(
                min(mosaics),
                sum(mosaics) / len(mosaics), max(mosaics))
        else:
            m = indxr.get_indexer_mosaic()
            self.set_integrater_mosaic_min_mean_max(m, m, m)

        # write the report for each image as .*-#$ to Chatter -
        # detailed report will be written automagically to science...

        Chatter.write(self.show_per_image_statistics())

        Chatter.write('Mosaic spread: %.3f < %.3f < %.3f' % \
                      self.get_integrater_mosaic_min_mean_max())

        # gather the statistics from the postrefinement
        postref_result = integrater.get_postref_result()

        # now write this to a postrefinement log
        postref_log = os.path.join(self.get_working_directory(),
                                   'postrefinement.log')

        fout = open(postref_log, 'w')

        fout.write('$TABLE: Postrefinement for %s:\n' % \
                   self._intgr_sweep_name)
        fout.write('$GRAPHS: Missetting angles:A:1, 2, 3, 4: $$\n')
        fout.write('Batch PhiX PhiY PhiZ $$ Batch PhiX PhiY PhiZ $$\n')

        for image in sorted(postref_result):
            phix = postref_result[image].get('phix', 0.0)
            phiy = postref_result[image].get('phiy', 0.0)
            phiz = postref_result[image].get('phiz', 0.0)

            fout.write('%d %5.2f %5.2f %5.2f\n' % \
                       (image, phix, phiy, phiz))

        fout.write('$$\n')
        fout.close()

        if self.get_integrater_sweep_name():
            pname, xname, dname = self.get_integrater_project_info()
            FileHandler.record_log_file('%s %s %s %s postrefinement' % \
                                        (self.get_integrater_sweep_name(),
                                         pname, xname, dname),
                                        postref_log)

        return self._mosflm_hklout
Example #4
0
  def _mosflm_parallel_integrate(self):
    '''Perform the integration as before, but this time as a
    number of parallel Mosflm jobs (hence, in separate directories)
    and including a step of pre-refinement of the mosaic spread and
    missets. This will all be kind of explicit and hence probably
    messy!'''

    refinr = self.get_integrater_refiner()

    lattice = refinr.get_refiner_lattice()
    spacegroup_number = lattice_to_spacegroup(lattice)
    mosaic = refinr.get_refiner_payload('mosaic')
    beam = refinr.get_refiner_payload('beam')
    distance = refinr.get_refiner_payload('distance')
    matrix = refinr.get_refiner_payload('mosflm_orientation_matrix')

    integration_params = refinr.get_refiner_payload(
      'mosflm_integration_parameters')

    if integration_params:
      if 'separation' in integration_params:
        self.set_integrater_parameter(
            'mosflm', 'separation',
            '%s %s' % tuple(integration_params['separation']))
      if 'raster' in integration_params:
        self.set_integrater_parameter(
            'mosflm', 'raster',
            '%d %d %d %d %d' % tuple(integration_params['raster']))

    refinr.set_refiner_payload('mosflm_integration_parameters', None)
    pname, xname, dname = self.get_integrater_project_info()

    # what follows below should (i) be run in separate directories
    # and (ii) be repeated N=parallel times.

    nproc = PhilIndex.params.xia2.settings.multiprocessing.nproc
    parallel = nproc

    # FIXME this is something of a kludge - if too few frames refinement
    # and integration does not work well... ideally want at least 15
    # frames / chunk (say)
    nframes = self._intgr_wedge[1] - self._intgr_wedge[0] + 1

    if parallel > nframes / 15:
      parallel = nframes // 15

    if not parallel:
      raise RuntimeError, 'parallel not set'
    if parallel < 2:
      raise RuntimeError, 'parallel not parallel: %s' % parallel

    jobs = []
    hklouts = []
    nref = 0

    # calculate the chunks to use
    offset = self.get_frame_offset()
    start = self._intgr_wedge[0] - offset
    end = self._intgr_wedge[1] - offset

    left_images = 1 + end - start
    left_chunks = parallel
    chunks = []

    while left_images > 0:
      size = left_images // left_chunks
      chunks.append((start, start + size - 1))
      start += size
      left_images -= size
      left_chunks -= 1

    summary_files = []

    for j in range(parallel):

      # make some working directories, as necessary - chunk-(0:N-1)
      wd = os.path.join(self.get_working_directory(),
                        'chunk-%d' % j)
      if not os.path.exists(wd):
        os.makedirs(wd)

      job = MosflmIntegrate()
      job.set_working_directory(wd)

      auto_logfiler(job)

      l = refinr.get_refiner_lattice()

      # create the starting point
      f = open(os.path.join(wd, 'xiaintegrate-%s.mat' % l), 'w')
      for m in matrix:
        f.write(m)
      f.close()

      spacegroup_number = lattice_to_spacegroup(lattice)

      job.set_refine_profiles(self._mosflm_refine_profiles)

      # N.B. for harvesting need to append N to dname.

      if pname is not None and xname is not None and dname is not None:
        Debug.write('Harvesting: %s/%s/%s' %
                    (pname, xname, dname))
        harvest_dir = self.get_working_directory()
        temp_dname = '%s_%s' % \
                     (dname, self.get_integrater_sweep_name())
        job.set_pname_xname_dname(pname, xname, temp_dname)

      job.set_template(os.path.basename(self.get_template()))
      job.set_directory(self.get_directory())

      # check for ice - and if so, exclude (ranges taken from
      # XDS documentation)
      if self.get_integrater_ice() != 0:
        Debug.write('Excluding ice rings')
        job.set_exclude_ice(True)

      # exclude specified resolution ranges
      if len(self.get_integrater_excluded_regions()) != 0:
        regions = self.get_integrater_excluded_regions()
        Debug.write('Excluding regions: %s' % `regions`)
        job.set_exclude_regions(regions)

      mask = standard_mask(self.get_detector())
      for m in mask:
        job.add_instruction(m)

      job.set_input_mat_file('xiaintegrate-%s.mat' % l)

      job.set_beam_centre(beam)
      job.set_distance(distance)
      job.set_space_group_number(spacegroup_number)
      job.set_mosaic(mosaic)

      if self.get_wavelength_prov() == 'user':
        job.set_wavelength(self.get_wavelength())

      parameters = self.get_integrater_parameters('mosflm')
      job.update_parameters(parameters)

      if self._mosflm_gain:
        job.set_gain(self._mosflm_gain)

      # check for resolution limits
      if self._intgr_reso_high > 0.0:
        job.set_d_min(self._intgr_reso_high)
      if self._intgr_reso_low:
        job.set_d_max(self._intgr_reso_low)

      if PhilIndex.params.general.backstop_mask:
        from xia2.Toolkit.BackstopMask import BackstopMask
        mask = BackstopMask(PhilIndex.params.general.backstop_mask)
        mask = mask.calculate_mask_mosflm(self.get_header())
        job.set_mask(mask)

      detector = self.get_detector()
      detector_width, detector_height = detector[0].get_image_size_mm()

      lim_x = 0.5 * detector_width
      lim_y = 0.5 * detector_height

      Debug.write('Scanner limits: %.1f %.1f' % (lim_x, lim_y))
      job.set_limits(lim_x, lim_y)

      job.set_fix_mosaic(self._mosflm_postref_fix_mosaic)

      job.set_pre_refinement(True)
      job.set_image_range(chunks[j])


      # these are now running so ...

      jobs.append(job)

      continue

    # ok, at this stage I need to ...
    #
    # (i) accumulate the statistics as a function of batch
    # (ii) mong them into a single block
    #
    # This is likely to be a pain in the arse!

    first_integrated_batch = 1.0e6
    last_integrated_batch = -1.0e6

    all_residuals = []

    threads = []

    for j in range(parallel):
      job = jobs[j]

      # now wait for them to finish - first wait will really be the
      # first one, then all should be finished...

      thread = Background(job, 'run')
      thread.start()
      threads.append(thread)

    mosaics = []
    postref_result = { }

    integrated_images_first = 1.0e6
    integrated_images_last = -1.0e6
    self._intgr_per_image_statistics = {}

    for j in range(parallel):
      thread = threads[j]
      thread.stop()
      job = jobs[j]

      # get the log file
      output = job.get_all_output()

      # record a copy of it, perhaps - though not if parallel
      if self.get_integrater_sweep_name() and False:
        pname, xname, dname = self.get_integrater_project_info()
        FileHandler.record_log_file(
            '%s %s %s %s mosflm integrate' % \
            (self.get_integrater_sweep_name(),
             pname, xname, '%s_%d' % (dname, j)),
            job.get_log_file())

      # look for things that we want to know...
      # that is, the output reflection file name, the updated
      # value for the gain (if present,) any warnings, errors,
      # or just interesting facts.

      batches = job.get_batches_out()
      integrated_images_first = min(batches[0], integrated_images_first)
      integrated_images_last = max(batches[1], integrated_images_last)

      mosaics.extend(job.get_mosaic_spreads())

      if min(mosaics) < 0:
        raise IntegrationError, 'negative mosaic spread: %s' % min(mosaic)

      if (job.get_detector_gain_error() and not
          (self.get_imageset().get_detector()[0].get_type() == 'SENSOR_PAD')):
        gain = job.get_suggested_gain()
        if gain is not None:
          self.set_integrater_parameter('mosflm', 'gain', gain)
          self.set_integrater_export_parameter('mosflm', 'gain', gain)
          if self._mosflm_gain:
            Debug.write('GAIN updated to %f' % gain)
          else:
            Debug.write('GAIN found to be %f' % gain)

          self._mosflm_gain = gain
          self._mosflm_rerun_integration = True

      hklout = job.get_hklout()
      Debug.write('Integration output: %s' % hklout)
      hklouts.append(hklout)

      nref += job.get_nref()

      # if a BGSIG error happened try not refining the
      # profile and running again...

      if job.get_bgsig_too_large():
        if not self._mosflm_refine_profiles:
          raise RuntimeError, 'BGSIG error with profiles fixed'

        Debug.write(
            'BGSIG error detected - try fixing profile...')

        self._mosflm_refine_profiles = False
        self.set_integrater_done(False)

        return

      if job.get_getprof_error():
        Debug.write(
            'GETPROF error detected - try fixing profile...')
        self._mosflm_refine_profiles = False
        self.set_integrater_done(False)

        return

      # here
      # write the report for each image as .*-#$ to Chatter -
      # detailed report will be written automagically to science...

      self._intgr_per_image_statistics.update(job.get_per_image_statistics())
      postref_result.update(job.get_postref_result())

      # inspect the output for e.g. very high weighted residuals

      all_residuals.extend(job.get_residuals())

    self._intgr_batches_out = (integrated_images_first,
                               integrated_images_last)

    if mosaics and len(mosaics) > 0:
      self.set_integrater_mosaic_min_mean_max(
          min(mosaics), sum(mosaics) / len(mosaics), max(mosaics))
    else:
      m = indxr.get_indexer_mosaic()
      self.set_integrater_mosaic_min_mean_max(m, m, m)

    Chatter.write(self.show_per_image_statistics())

    Chatter.write('Mosaic spread: %.3f < %.3f < %.3f' % \
                  self.get_integrater_mosaic_min_mean_max())

    # gather the statistics from the postrefinement for all sweeps
    # now write this to a postrefinement log

    postref_log = os.path.join(self.get_working_directory(),
                               'postrefinement.log')

    fout = open(postref_log, 'w')

    fout.write('$TABLE: Postrefinement for %s:\n' % \
               self._intgr_sweep_name)
    fout.write('$GRAPHS: Missetting angles:A:1, 2, 3, 4: $$\n')
    fout.write('Batch PhiX PhiY PhiZ $$ Batch PhiX PhiY PhiZ $$\n')

    for image in sorted(postref_result):
      phix = postref_result[image].get('phix', 0.0)
      phiy = postref_result[image].get('phiy', 0.0)
      phiz = postref_result[image].get('phiz', 0.0)

      fout.write('%d %5.2f %5.2f %5.2f\n' % \
                 (image, phix, phiy, phiz))

    fout.write('$$\n')
    fout.close()

    if self.get_integrater_sweep_name():
      pname, xname, dname = self.get_integrater_project_info()
      FileHandler.record_log_file('%s %s %s %s postrefinement' % \
                                  (self.get_integrater_sweep_name(),
                                   pname, xname, dname),
                                  postref_log)

    hklouts.sort()

    hklout = os.path.join(self.get_working_directory(),
                          os.path.split(hklouts[0])[-1])

    Debug.write('Sorting data to %s' % hklout)
    for hklin in hklouts:
      Debug.write('<= %s' % hklin)

    sortmtz = Sortmtz()
    sortmtz.set_hklout(hklout)
    for hklin in hklouts:
      sortmtz.add_hklin(hklin)

    sortmtz.sort()

    self._mosflm_hklout = hklout

    return self._mosflm_hklout
Example #5
0
  def _mosflm_integrate(self):
    '''Perform the actual integration, based on the results of the
    cell refinement or indexing (they have the equivalent form.)'''

    refinr = self.get_integrater_refiner()

    if not refinr.get_refiner_payload('mosflm_orientation_matrix'):
      raise RuntimeError, 'unexpected situation in indexing'

    lattice = refinr.get_refiner_lattice()
    spacegroup_number = lattice_to_spacegroup(lattice)
    mosaic = refinr.get_refiner_payload('mosaic')
    beam = refinr.get_refiner_payload('beam')
    distance = refinr.get_refiner_payload('distance')
    matrix = refinr.get_refiner_payload('mosflm_orientation_matrix')

    integration_params = refinr.get_refiner_payload(
      'mosflm_integration_parameters')

    if integration_params:
      if 'separation' in integration_params:
        self.set_integrater_parameter(
          'mosflm', 'separation',
          '%s %s' % tuple(integration_params['separation']))
      if 'raster' in integration_params:
        self.set_integrater_parameter(
          'mosflm', 'raster',
          '%d %d %d %d %d' % tuple(integration_params['raster']))

    refinr.set_refiner_payload('mosflm_integration_parameters', None)

    f = open(os.path.join(self.get_working_directory(),
                          'xiaintegrate.mat'), 'w')
    for m in matrix:
      f.write(m)
    f.close()

    # then start the integration
    integrater = MosflmIntegrate()
    integrater.set_working_directory(self.get_working_directory())
    auto_logfiler(integrater)

    integrater.set_refine_profiles(self._mosflm_refine_profiles)

    pname, xname, dname = self.get_integrater_project_info()

    if pname is not None and xname is not None and dname is not None:
      Debug.write('Harvesting: %s/%s/%s' % (pname, xname, dname))
      harvest_dir = self.get_working_directory()
      # harvest file name will be %s.mosflm_run_start_end % dname
      temp_dname = '%s_%s' % \
                   (dname, self.get_integrater_sweep_name())
      integrater.set_pname_xname_dname(pname, xname, temp_dname)

    integrater.set_template(os.path.basename(self.get_template()))
    integrater.set_directory(self.get_directory())

    # check for ice - and if so, exclude (ranges taken from
    # XDS documentation)
    if self.get_integrater_ice() != 0:
      Debug.write('Excluding ice rings')
      integrater.set_exclude_ice(True)

    # exclude specified resolution ranges
    if len(self.get_integrater_excluded_regions()) != 0:
      regions = self.get_integrater_excluded_regions()
      Debug.write('Excluding regions: %s' % `regions`)
      integrater.set_exclude_regions(regions)

    mask = standard_mask(self.get_detector())
    for m in mask:
      integrater.add_instruction(m)

    integrater.set_input_mat_file('xiaintegrate.mat')

    integrater.set_beam_centre(beam)
    integrater.set_distance(distance)
    integrater.set_space_group_number(spacegroup_number)
    integrater.set_mosaic(mosaic)

    if self.get_wavelength_prov() == 'user':
      integrater.set_wavelength(self.get_wavelength())

    parameters = self.get_integrater_parameters('mosflm')
    integrater.update_parameters(parameters)

    if self._mosflm_gain:
      integrater.set_gain(self._mosflm_gain)

    # check for resolution limits
    if self._intgr_reso_high > 0.0:
      integrater.set_d_min(self._intgr_reso_high)
    if self._intgr_reso_low:
      integrater.set_d_max(self._intgr_reso_low)

    if PhilIndex.params.general.backstop_mask:
      from xia2.Toolkit.BackstopMask import BackstopMask
      mask = BackstopMask(PhilIndex.params.general.backstop_mask)
      mask = mask.calculate_mask_mosflm(self.get_header())
      integrater.set_mask(mask)

    detector = self.get_detector()
    detector_width, detector_height = detector[0].get_image_size_mm()

    lim_x = 0.5 * detector_width
    lim_y = 0.5 * detector_height

    Debug.write('Scanner limits: %.1f %.1f' % (lim_x, lim_y))
    integrater.set_limits(lim_x, lim_y)

    integrater.set_fix_mosaic(self._mosflm_postref_fix_mosaic)
    offset = self.get_frame_offset()

    integrater.set_image_range(
      (self._intgr_wedge[0] - offset, self._intgr_wedge[1] - offset))

    try:
      integrater.run()
    except RuntimeError, e:
      if 'integration failed: reason unknown' in str(e):
        Chatter.write('Mosflm has failed in integration')
        message = 'The input was:\n\n'
        for input in integrater.get_all_input():
          message += '  %s' % input
        Chatter.write(message)
      raise
Example #6
0
  def _index_prepare(self):
    '''Prepare to do autoindexing - in XDS terms this will mean
    calling xycorr, init and colspot on the input images.'''

    # decide on images to work with

    Debug.write('XDS INDEX PREPARE:')
    Debug.write('Wavelength: %.6f' % self.get_wavelength())
    Debug.write('Distance: %.2f' % self.get_distance())

    if self._indxr_images == []:
      _select_images_function = getattr(
          self, '_index_select_images_%s' % (self._index_select_images))
      wedges = _select_images_function()
      for wedge in wedges:
        self.add_indexer_image_wedge(wedge)
      self.set_indexer_prepare_done(True)

    all_images = self.get_matching_images()

    first = min(all_images)
    last = max(all_images)

    # next start to process these - first xycorr

    xycorr = self.Xycorr()

    xycorr.set_data_range(first, last)
    xycorr.set_background_range(self._indxr_images[0][0],
                                self._indxr_images[0][1])
    from dxtbx.serialize.xds import to_xds
    converter = to_xds(self.get_imageset())
    xds_beam_centre = converter.detector_origin
    xycorr.set_beam_centre(xds_beam_centre[0],
                           xds_beam_centre[1])
    for block in self._indxr_images:
      xycorr.add_spot_range(block[0], block[1])

    # FIXME need to set the origin here

    xycorr.run()

    for file in ['X-CORRECTIONS.cbf',
                 'Y-CORRECTIONS.cbf']:
      self._indxr_payload[file] = xycorr.get_output_data_file(file)

    # next start to process these - then init

    if PhilIndex.params.xia2.settings.input.format.dynamic_shadowing:
      # find the region of the scan with the least predicted shadow
      # to use for background determination in XDS INIT step
      from dxtbx.serialize import dump
      from dxtbx.datablock import DataBlock
      imageset = self._indxr_imagesets[0]
      xsweep = self._indxr_sweeps[0]
      sweep_filename = os.path.join(
        self.get_working_directory(), '%s_datablock.json' %xsweep.get_name())
      dump.datablock(DataBlock([imageset]), sweep_filename)

      from xia2.Wrappers.Dials.ShadowPlot import ShadowPlot
      shadow_plot = ShadowPlot()
      shadow_plot.set_working_directory(self.get_working_directory())
      auto_logfiler(shadow_plot)
      shadow_plot.set_sweep_filename(sweep_filename)
      shadow_plot.set_json_filename(
        os.path.join(
          self.get_working_directory(),
          '%s_shadow_plot.json' %shadow_plot.get_xpid()))
      shadow_plot.run()
      results = shadow_plot.get_results()
      from scitbx.array_family import flex
      fraction_shadowed = flex.double(results['fraction_shadowed'])
      scan_points = flex.double(results['scan_points'])

      phi_width = self.get_phi_width()
      scan = imageset.get_scan()
      oscillation_range = scan.get_oscillation_range()
      oscillation = scan.get_oscillation()
      bg_images = self._background_images
      bg_range_deg = (scan.get_angle_from_image_index(bg_images[0]),
                      scan.get_angle_from_image_index(bg_images[1]))
      bg_range_width = bg_range_deg[1] - bg_range_deg[0]

      min_shadow = 100
      best_bg_range = bg_range_deg
      from libtbx.utils import frange
      for bg_range_start in frange(flex.min(scan_points), flex.max(scan_points) - bg_range_width, step=oscillation[1]):
        bg_range_deg = (bg_range_start, bg_range_start + bg_range_width)
        sel = (scan_points >= bg_range_deg[0]) & (scan_points <= bg_range_deg[1])
        mean_shadow = flex.mean(fraction_shadowed.select(sel))
        if mean_shadow < min_shadow:
          min_shadow = mean_shadow
          best_bg_range = bg_range_deg

      self._background_images = (
        scan.get_image_index_from_angle(best_bg_range[0]),
        scan.get_image_index_from_angle(best_bg_range[1]))
      Debug.write('Setting background images: %s -> %s' %self._background_images)

    init = self.Init()

    for file in ['X-CORRECTIONS.cbf',
                 'Y-CORRECTIONS.cbf']:
      init.set_input_data_file(file, self._indxr_payload[file])

    init.set_data_range(first, last)

    if self._background_images:
      init.set_background_range(self._background_images[0],
                                self._background_images[1])
    else:
      init.set_background_range(self._indxr_images[0][0],
                                self._indxr_images[0][1])

    for block in self._indxr_images:
      init.add_spot_range(block[0], block[1])

    init.run()

    # at this stage, need to (perhaps) modify the BKGINIT.cbf image
    # to mark out the back stop

    if PhilIndex.params.general.backstop_mask:
      Debug.write('Applying mask to BKGINIT.pck')

      # copy the original file
      cbf_old = os.path.join(init.get_working_directory(),
                             'BKGINIT.cbf')
      cbf_save = os.path.join(init.get_working_directory(),
                              'BKGINIT.sav')
      shutil.copyfile(cbf_old, cbf_save)

      # modify the file to give the new mask
      from xia2.Toolkit.BackstopMask import BackstopMask
      mask = BackstopMask(PhilIndex.params.general.backstop_mask)
      mask.apply_mask_xds(self.get_header(), cbf_save, cbf_old)

      init.reload()

    for file in ['BLANK.cbf',
                 'BKGINIT.cbf',
                 'GAIN.cbf']:
      self._indxr_payload[file] = init.get_output_data_file(file)

    if PhilIndex.params.xia2.settings.developmental.use_dials_spotfinder:

      spotfinder = self.DialsSpotfinder()

      for block in self._indxr_images:
        spotfinder.add_spot_range(block[0], block[1])

      spotfinder.run()
      export = self.DialsExportSpotXDS()
      export.set_input_data_file(
          'reflections.pickle',
          spotfinder.get_output_data_file('reflections.pickle'))
      export.run()

      for file in ['SPOT.XDS']:
        self._indxr_payload[file] = export.get_output_data_file(file)

    else:

      # next start to process these - then colspot

      colspot = self.Colspot()

      for file in ['X-CORRECTIONS.cbf',
                   'Y-CORRECTIONS.cbf',
                   'BLANK.cbf',
                   'BKGINIT.cbf',
                   'GAIN.cbf']:
        colspot.set_input_data_file(file, self._indxr_payload[file])

      colspot.set_data_range(first, last)
      colspot.set_background_range(self._indxr_images[0][0],
                                   self._indxr_images[0][1])
      for block in self._indxr_images:
        colspot.add_spot_range(block[0], block[1])

      colspot.run()

      for file in ['SPOT.XDS']:
        self._indxr_payload[file] = colspot.get_output_data_file(file)

    # that should be everything prepared... all of the important
    # files should be loaded into memory to be able to cope with
    # integration happening somewhere else

    return
Example #7
0
    def _index_prepare(self):
        '''Prepare to do autoindexing - in XDS terms this will mean
    calling xycorr, init and colspot on the input images.'''

        # decide on images to work with

        Debug.write('XDS INDEX PREPARE:')
        Debug.write('Wavelength: %.6f' % self.get_wavelength())
        Debug.write('Distance: %.2f' % self.get_distance())

        if self._indxr_images == []:
            _select_images_function = getattr(
                self, '_index_select_images_%s' % (self._index_select_images))
            wedges = _select_images_function()
            for wedge in wedges:
                self.add_indexer_image_wedge(wedge)
            self.set_indexer_prepare_done(True)

        all_images = self.get_matching_images()

        first = min(all_images)
        last = max(all_images)

        # next start to process these - first xycorr

        xycorr = self.Xycorr()

        xycorr.set_data_range(first, last)
        xycorr.set_background_range(self._indxr_images[0][0],
                                    self._indxr_images[0][1])
        from dxtbx.serialize.xds import to_xds
        converter = to_xds(self.get_imageset())
        xds_beam_centre = converter.detector_origin
        xycorr.set_beam_centre(xds_beam_centre[0], xds_beam_centre[1])
        for block in self._indxr_images:
            xycorr.add_spot_range(block[0], block[1])

        # FIXME need to set the origin here

        xycorr.run()

        for file in ['X-CORRECTIONS.cbf', 'Y-CORRECTIONS.cbf']:
            self._indxr_payload[file] = xycorr.get_output_data_file(file)

        # next start to process these - then init

        if PhilIndex.params.xia2.settings.input.format.dynamic_shadowing:
            imageset = self._indxr_imagesets[0]
            masker = imageset.masker().format_class(
                imageset.paths()[0]).get_goniometer_shadow_masker()
            if masker is None:
                # disable dynamic_shadowing
                PhilIndex.params.xia2.settings.input.format.dynamic_shadowing = False

        if PhilIndex.params.xia2.settings.input.format.dynamic_shadowing:
            # find the region of the scan with the least predicted shadow
            # to use for background determination in XDS INIT step
            from dxtbx.serialize import dump
            from dxtbx.datablock import DataBlock
            imageset = self._indxr_imagesets[0]
            xsweep = self._indxr_sweeps[0]
            sweep_filename = os.path.join(
                self.get_working_directory(),
                '%s_datablock.json' % xsweep.get_name())
            dump.datablock(DataBlock([imageset]), sweep_filename)

            from xia2.Wrappers.Dials.ShadowPlot import ShadowPlot
            shadow_plot = ShadowPlot()
            shadow_plot.set_working_directory(self.get_working_directory())
            auto_logfiler(shadow_plot)
            shadow_plot.set_sweep_filename(sweep_filename)
            shadow_plot.set_json_filename(
                os.path.join(self.get_working_directory(),
                             '%s_shadow_plot.json' % shadow_plot.get_xpid()))
            shadow_plot.run()
            results = shadow_plot.get_results()
            from scitbx.array_family import flex
            fraction_shadowed = flex.double(results['fraction_shadowed'])
            if flex.max(fraction_shadowed) == 0:
                PhilIndex.params.xia2.settings.input.format.dynamic_shadowing = False
            else:
                scan_points = flex.double(results['scan_points'])

                phi_width = self.get_phi_width()
                scan = imageset.get_scan()
                oscillation_range = scan.get_oscillation_range()
                oscillation = scan.get_oscillation()

                if self._background_images is not None:
                    bg_images = self._background_images
                    bg_range_deg = (scan.get_angle_from_image_index(
                        bg_images[0]),
                                    scan.get_angle_from_image_index(
                                        bg_images[1]))
                    bg_range_width = bg_range_deg[1] - bg_range_deg[0]

                    min_shadow = 100
                    best_bg_range = bg_range_deg
                    from libtbx.utils import frange
                    for bg_range_start in frange(flex.min(scan_points),
                                                 flex.max(scan_points) -
                                                 bg_range_width,
                                                 step=oscillation[1]):
                        bg_range_deg = (bg_range_start,
                                        bg_range_start + bg_range_width)
                        sel = (scan_points >= bg_range_deg[0]) & (
                            scan_points <= bg_range_deg[1])
                        mean_shadow = flex.mean(fraction_shadowed.select(sel))
                        if mean_shadow < min_shadow:
                            min_shadow = mean_shadow
                            best_bg_range = bg_range_deg

                    self._background_images = (scan.get_image_index_from_angle(
                        best_bg_range[0]),
                                               scan.get_image_index_from_angle(
                                                   best_bg_range[1]))
                    Debug.write('Setting background images: %s -> %s' %
                                self._background_images)

        init = self.Init()

        for file in ['X-CORRECTIONS.cbf', 'Y-CORRECTIONS.cbf']:
            init.set_input_data_file(file, self._indxr_payload[file])

        init.set_data_range(first, last)

        if self._background_images:
            init.set_background_range(self._background_images[0],
                                      self._background_images[1])
        else:
            init.set_background_range(self._indxr_images[0][0],
                                      self._indxr_images[0][1])

        for block in self._indxr_images:
            init.add_spot_range(block[0], block[1])

        init.run()

        # at this stage, need to (perhaps) modify the BKGINIT.cbf image
        # to mark out the back stop

        if PhilIndex.params.general.backstop_mask:
            Debug.write('Applying mask to BKGINIT.pck')

            # copy the original file
            cbf_old = os.path.join(init.get_working_directory(), 'BKGINIT.cbf')
            cbf_save = os.path.join(init.get_working_directory(),
                                    'BKGINIT.sav')
            shutil.copyfile(cbf_old, cbf_save)

            # modify the file to give the new mask
            from xia2.Toolkit.BackstopMask import BackstopMask
            mask = BackstopMask(PhilIndex.params.general.backstop_mask)
            mask.apply_mask_xds(self.get_header(), cbf_save, cbf_old)

            init.reload()

        for file in ['BLANK.cbf', 'BKGINIT.cbf', 'GAIN.cbf']:
            self._indxr_payload[file] = init.get_output_data_file(file)

        if PhilIndex.params.xia2.settings.developmental.use_dials_spotfinder:

            spotfinder = self.DialsSpotfinder()

            for block in self._indxr_images:
                spotfinder.add_spot_range(block[0], block[1])

            spotfinder.run()
            export = self.DialsExportSpotXDS()
            export.set_input_data_file(
                'reflections.pickle',
                spotfinder.get_output_data_file('reflections.pickle'))
            export.run()

            for file in ['SPOT.XDS']:
                self._indxr_payload[file] = export.get_output_data_file(file)

        else:

            # next start to process these - then colspot

            colspot = self.Colspot()

            for file in [
                    'X-CORRECTIONS.cbf', 'Y-CORRECTIONS.cbf', 'BLANK.cbf',
                    'BKGINIT.cbf', 'GAIN.cbf'
            ]:
                colspot.set_input_data_file(file, self._indxr_payload[file])

            colspot.set_data_range(first, last)
            colspot.set_background_range(self._indxr_images[0][0],
                                         self._indxr_images[0][1])
            for block in self._indxr_images:
                colspot.add_spot_range(block[0], block[1])

            colspot.run()

            for file in ['SPOT.XDS']:
                self._indxr_payload[file] = colspot.get_output_data_file(file)

        # that should be everything prepared... all of the important
        # files should be loaded into memory to be able to cope with
        # integration happening somewhere else

        return