def run_dssa(self): start_matrix=[] for ii in range(self.n): start_matrix.append( flex.random_double(self.n)*2.0-1.0 ) start_matrix.append( flex.double(self.n,0) ) self.optimizer = dssa.dssa( dimension=self.n, matrix=start_matrix, evaluator=self, tolerance=1e-5, further_opt=True ) self.x = self.optimizer.get_solution()
def __init__(self, name): self.name = name self.fcount = 0 self.n = 2 self.x = start.deep_copy() self.starting_simplex = [] for ii in range(self.n + 1): self.starting_simplex.append(3.1 * (flex.random_double(self.n) / 2 - 1) + self.x) self.optimizer = direct_search_simulated_annealing.dssa( dimension=self.n, matrix=self.starting_simplex, evaluator=self, further_opt=True, coolfactor=0.5, simplex_scale=1, ) self.x = self.optimizer.get_solution() print "DSSA ITERATIONS", self.optimizer.count, self.fcount, "SOLUTION", list(self.x), self.target(self.x)
def run_dssa(self): self.starting_simplex = [] for ii in range(self.n): self.starting_simplex.append( (flex.random_double(self.n * self.n_refine) * 2 - 1.0) * 1.0 + self.x) self.starting_simplex.append(self.x) dssa_optimizer = direct_search_simulated_annealing.dssa( dimension=self.n, matrix=self.starting_simplex, evaluator=self, max_iter=5000, further_opt=True, tolerance=1e-4) candidates = dssa_optimizer.get_candi() self.solution = dssa_optimizer.get_solution() self.score = self.target(self.solution)
def __init__(self, name): self.name = name self.fcount = 0 self.n = 2 self.x = start.deep_copy() self.starting_simplex = [] for ii in range(self.n + 1): self.starting_simplex.append(3.1 * (flex.random_double(self.n) / 2 - 1) + self.x) self.optimizer = direct_search_simulated_annealing.dssa( dimension=self.n, matrix=self.starting_simplex, evaluator=self, further_opt=True, coolfactor=0.5, simplex_scale=1) self.x = self.optimizer.get_solution() print("DSSA ITERATIONS", self.optimizer.count, self.fcount, "SOLUTION", list(self.x), self.target(self.x))
def scale(self): '''Find scale factors s, b that minimise target function.''' from scitbx.direct_search_simulated_annealing import dssa from scitbx.simplex import simplex_opt from scitbx.array_family import flex # only scale second and subsequent scale factors - first ones are # constrained to 0.0 self.n = 2 * len(self._frame_sizes) - 2 self.x = flex.double(self._scales_s[1:] + self._scales_b[1:]) self.starting_matrix = [self.x + flex.random_double(self.n) \ for j in range(self.n + 1)] if False: self.optimizer = dssa(dimension = self.n, matrix = self.starting_matrix, evaluator = self, tolerance = 1.e-6, further_opt = True) else: self.optimizer = simplex_opt(dimension = self.n, matrix = self.starting_matrix, evaluator = self, tolerance = 1.e-3) # save the best scale factors self.x = self.optimizer.get_solution() self._scales_s = flex.double(1, 0.0) self._scales_s.extend(self.x[:len(self._frame_sizes) - 1]) self._scales_b = flex.double(1, 0.0) self._scales_b.extend(self.x[len(self._frame_sizes) - 1:]) # scale the raw intensity data for later reference scaled_intensities = [] j = 0 for f, fs in enumerate(self._frame_sizes): for k in range(fs): hkl = self._indices[j] g_hl = self.scale_factor(f, hkl) scaled_intensities.append((self._intensities[j] / g_hl)) j += 1 self._scaled_intensities = scaled_intensities # now compute reflections with > 1 observation as starting point for # computing the Rmerge from collections import defaultdict multiplicity = defaultdict(list) for j, i in enumerate(self._indices): multiplicity[i].append(j) rmerge_n = 0.0 rmerge_d = 0.0 import math for i in multiplicity: if len(multiplicity[i]) == 1: continue imean = self._imean[i] for j in multiplicity[i]: rmerge_n += math.fabs(scaled_intensities[j] - imean) rmerge_d += imean return rmerge_n / rmerge_d