Ejemplo n.º 1
0
    def compute_functional_gradients_and_curvatures(self):
        """overwrite method to avoid calls to 'blocks' methods of target"""
        self.prepare_for_step()

        if self._scaler.Ih_table.free_Ih_table:
            blocks = self._scaler.Ih_table.blocked_data_list[:-1]
            free_block_id = len(self._scaler.Ih_table.blocked_data_list) - 1
            self._scaler.update_for_minimisation(self._parameters, free_block_id)
            self._scaler.clear_memory_from_derivs(free_block_id)
        else:
            blocks = self._scaler.Ih_table.blocked_data_list

        # if self._scaler.params.scaling_options.nproc > 1:
        f = []
        gi = []
        for block_id, block in enumerate(blocks):
            self._scaler.update_for_minimisation(self._parameters, block_id)
            fb, gb = self._target.compute_functional_gradients(block)
            f.append(fb)
            gi.append(gb)
            self._scaler.clear_memory_from_derivs(block_id)
        """task_results = easy_mp.parallel_map(
      func=self._target.compute_functional_gradients,
      iterable=blocks,
      processes=self._scaler.params.scaling_options.nproc,
      method="multiprocessing",
      preserve_exception_message=True
      )
    f, gi = zip(*task_results)"""
        f = sum(f)
        g = gi[0]
        for i in range(1, len(gi)):
            g += gi[i]
        """else:
      f = 0.0
      g = None
      for block in blocks:
        fi, gi = self._target.compute_functional_gradients(block)
        f += fi
        if g:
          g += gi
        else:
          g = gi"""

        restraints = self._target.compute_restraints_functional_gradients_and_curvatures(
            self._parameters
        )

        if restraints:
            f += restraints[0]
            g += restraints[1]
        logger.debug("Functional : %s" % f)
        logger.debug("Gradients : %s" % list(g))
        log_memory_usage()
        logger.debug("\n")
        return f, g, None
Ejemplo n.º 2
0
 def __init__(self, params, experiments, reflections):
     self.scaler = None
     self.scaled_miller_array = None
     self.merging_statistics_result = None
     self.anom_merging_statistics_result = None
     self.filtering_results = None
     self.params, self.experiments, self.reflections = prepare_input(
         params, experiments, reflections)
     self._create_model_and_scaler()
     logger.debug("Initialised scaling script object")
     log_memory_usage()
Ejemplo n.º 3
0
    def build_up(self, objective_only=False):
        "overwrite method from Adaptlstbx"
        # set current parameter values
        self.prepare_for_step()

        # Reset the state to construction time, i.e. no equations accumulated
        self.reset()

        if self._scaler.Ih_table.free_Ih_table:
            blocks = self._scaler.Ih_table.blocked_data_list[:-1]
            free_block_id = len(self._scaler.Ih_table.blocked_data_list) - 1
            self._scaler.update_for_minimisation(self._parameters, free_block_id)
            self._scaler.clear_memory_from_derivs(free_block_id)
        else:
            blocks = self._scaler.Ih_table.blocked_data_list

        # observation terms
        if objective_only:
            # if self._scaler.params.scaling_options.nproc > 1: #no mp option yet
            for block_id, block in enumerate(blocks):
                self._scaler.update_for_minimisation(self._parameters, block_id)
                residuals, weights = self._target.compute_residuals(block)
                self.add_residuals(residuals, weights)
        else:
            # if self._scaler.params.scaling_options.nproc: #no mp option yet

            self._jacobian = None

            for block_id, block in enumerate(blocks):
                self._scaler.update_for_minimisation(self._parameters, block_id)
                residuals, jacobian, weights = self._target.compute_residuals_and_gradients(
                    block
                )
                self.add_equations(residuals, jacobian, weights)
            """task_results = easy_mp.pool_map(
        fixed_func=self._target.compute_residuals_and_gradients,
        iterable=blocks,
        processes=self._scaler.params.scaling_options.nproc
        )
      for result in task_results:
        self.add_equations(result[0], result[1], result[2])"""

        restraints = self._target.compute_restraints_residuals_and_gradients(
            self._parameters
        )
        if restraints:
            if objective_only:
                self.add_residuals(restraints[0], restraints[2])
            else:
                self.add_equations(restraints[0], restraints[1], restraints[2])
        logger.debug("added equations for all blocks")
        log_memory_usage()
        logger.debug("\n")
        return
Ejemplo n.º 4
0
    def compute_functional_gradients_and_curvatures(self):
        """overwrite method to avoid calls to 'blocks' methods of target"""
        self.prepare_for_step()

        work_blocks = self._scaler.get_blocks_for_minimisation()

        f = []
        gi = []
        for block_id, block in enumerate(work_blocks):
            self._scaler.update_for_minimisation(self._parameters, block_id)
            fb, gb = self._parameters.compute_functional_gradients(block)
            f.append(fb)
            gi.append(gb)
        """task_results = easy_mp.parallel_map(
            func=self._target.compute_functional_gradients,
            iterable=blocks,
            processes=self._scaler.params.scaling_options.nproc,
            method="multiprocessing",
            preserve_exception_message=True
        )
        f, gi = zip(*task_results)"""

        f = sum(f)
        g = gi[0]
        for i in range(1, len(gi)):
            g += gi[i]

        restraints = self._parameters.compute_restraints_functional_gradients(
            self._parameters
        )

        if restraints:
            f += restraints[0]
            g += restraints[1]
        logger.debug("Functional : %s", f)
        logger.debug("Gradients : %s", list(g))
        log_memory_usage()
        logger.debug("\n")
        return f, g, None