def __init__(self, crystal, beam, detector, goniometer, scan, reflections): """Initialise the optimization.""" from scitbx import simplex # FIXME in here this code is very unstable or actually broken if # we pass in a few lone images i.e. screening shots - propose need # for alternative algorithm, in meantime can avoid code death if # something like this # # if scan.get_num_images() == 1: # self.sigma = 0.5 * scan.get_oscillation_range()[1] * math.pi / 180.0 # return # # is used... @JMP please could we discuss? assert best method to get # sigma_m in that case is actually to look at present / absent # reflections as per how Mosflm does it... # Initialise the function used in likelihood estimation. self._R = FractionOfObservedIntensity( crystal, beam, detector, goniometer, scan, reflections ) # Set the starting values to try 1, 3 degrees seems sensible for # crystal mosaic spread start = 1 * math.pi / 180 stop = 3 * math.pi / 180 starting_simplex = [flex.double([start]), flex.double([stop])] # Initialise the optimizer optimizer = simplex.simplex_opt( 1, matrix=starting_simplex, evaluator=self, tolerance=1e-7 ) # Get the solution self.sigma = math.exp(optimizer.get_solution()[0])
def simplex(self): self.n = self.n_params if (self.x is None): self.x = flex.double([1] + [0] * (self.n_params - 1)) else: self.x = self.x.concatenate( flex.double([0] * (self.n_params - self.n_fst_pass))) self.pofr = pr_tools.pofr(self.d_max, self.n, self.prior) self.simplex_scores = [] self.simplex_solutions = [] for ii in xrange(self.simplex_trial): #make a random simplex self.starting_simplex = [] for ii in range(self.n): self.starting_simplex.append((flex.random_double(self.n) * 2 - 1.0) + self.x) self.starting_simplex.append(self.x) self.optimizer = simplex.simplex_opt(dimension=self.n, matrix=self.starting_simplex, evaluator=self, tolerance=1e-4) self.solution = self.optimizer.get_solution() self.score = self.target(self.solution) self.simplex_scores.append(self.score) self.simplex_solutions.append(self.solution) best_index = flex.min_index(flex.double(self.simplex_scores)) self.solution = self.simplex_solutions[best_index] #self.cma(m=self.solution) self.score = self.simplex_scores[best_index] self.pofr.update(self.solution)
def __init__(self, n_params, d_max, data, n_int=35, alpha=1.0): self.n = n_params self.d_max = d_max self.data = data self.n_int = n_int self.alpha = alpha self.gaussian = 2.0 self.bounds = b.bounds(self.n, "cheb_coef.dat") if (self.gaussian > 0): self.bounds.min = self.bounds.mean - self.gaussian * self.bounds.var self.bounds.max = self.bounds.mean + self.gaussian * self.bounds.var # make a pofr please self.pofr = pr_tools.pofr(self.d_max, self.n, n_int=self.n_int, m_int=self.n_int) #make a random simplex please self.starting_simplex = [] for ii in range(self.n + 1): self.starting_simplex.append(flex.random_double(self.n) * 2 - 1) self.optimizer = simplex.simplex_opt(dimension=self.n, matrix=self.starting_simplex, evaluator=self, tolerance=1e-3) self.solution = self.optimizer.GetResult()
def __init__(self, values, parameterization, data, indices, bins, seed = None, log=None): """ @param values parameterization of the SD terms @param data ISIGI dictionary of unmerged intensities @param indices array of miller indices to refine against @param bins array of flex.bool object specifying the bins to use to calculate the functional @param log Log to print to (none for stdout) """ if log is None: log = sys.stdout self.log = log self.data = data self.intensity_bin_selections = bins self.indices = indices self.parameterization = parameterization self.n = 3 self.x = flex.double([values.SDFAC, values.SDB, values.SDADD]) self.starting_simplex = [] if seed is None: random_func = flex.random_double else: print("Using random seed %d"%seed, file=self.log) mt = flex.mersenne_twister(seed) random_func = mt.random_double for i in range(self.n+1): self.starting_simplex.append(random_func(self.n)) self.optimizer = simplex_opt( dimension = self.n, matrix = self.starting_simplex, evaluator = self, tolerance = 1e-1) self.x = self.optimizer.get_solution()
def __init__(self, frames, z, zeta, sweep): from scitbx import simplex from scitbx.array_family import flex import numpy self.L = Likelihood(FractionOfObservedIntensity(frames, z, zeta, sweep.get_scan())) x = 0.1 + numpy.arange(1000) / 2000.0 l = [self.L(xx) for xx in x] from matplotlib import pylab pylab.plot(x, l) pylab.show() # print 1/0 startA = 0.3 startB = 0.4 # startA = 0.2*3.14159 / 180 # startB = 0.3*3.14159 / 180 print "Start: ", startA, startB starting_simplex=[flex.double([startA]), flex.double([startB])] # for ii in range(2): # starting_simplex.append(flex.double([start]))#flex.random_double(1)) self.optimizer = simplex.simplex_opt( 1, matrix=starting_simplex, evaluator=self, tolerance=1e-7)
def run_simplex(self): start_matrix=[] for ii in range(self.n): start_matrix.append( flex.random_double(self.n)*2.0-1.0 ) start_matrix.append( flex.double(self.n,0) ) self.optimizer = simplex.simplex_opt( dimension=self.n, matrix=start_matrix, evaluator=self, tolerance=1e-5 ) self.x = self.optimizer.get_solution()
def __init__(self, frames, z, zeta, sweep): from scitbx import simplex from scitbx.array_family import flex import numpy self.L = Likelihood( FractionOfObservedIntensity(frames, z, zeta, sweep.get_scan())) x = 0.1 + numpy.arange(1000) / 2000.0 l = [self.L(xx) for xx in x] from matplotlib import pylab pylab.plot(x, l) pylab.show() # print 1/0 startA = 0.3 startB = 0.4 # startA = 0.2*3.14159 / 180 # startB = 0.3*3.14159 / 180 print("Start: ", startA, startB) starting_simplex = [flex.double([startA]), flex.double([startB])] # for ii in range(2): # starting_simplex.append(flex.double([start]))#flex.random_double(1)) self.optimizer = simplex.simplex_opt(1, matrix=starting_simplex, evaluator=self, tolerance=1e-7)
def simplex(self): self.simplex_scores = [] self.simplex_solutions = [] for ii in xrange(self.simplex_trial): #make a random simplex self.starting_simplex = [] for ii in range(self.n * self.n_refine): self.starting_simplex.append( (flex.random_double(self.n * self.n_refine) * 2 - 1.0) * 1.0 + self.x) self.starting_simplex.append(self.x) self.optimizer = simplex.simplex_opt(dimension=self.n * self.n_refine, matrix=self.starting_simplex, evaluator=self, max_iter=500, tolerance=1e-4) self.solution = self.optimizer.get_solution() self.score = self.target(self.solution) self.simplex_scores.append(self.score) self.simplex_solutions.append(self.solution) best_index = flex.min_index(flex.double(self.simplex_scores)) self.solution = self.simplex_solutions[best_index] self.score = self.target(self.solution)
def __init__(self, n_params, d_max, data, bounds, n_int=25, alpha=1.0): self.n = n_params self.d_max = d_max self.data = data self.n_int = n_int self.alpha = alpha self.gaussian = 0.0 self.bounds = bounds self.step_size = 2 if (self.gaussian > 0): self.bounds.min = self.bounds.mean - self.gaussian * self.bounds.var self.bounds.max = self.bounds.mean + self.gaussian * self.bounds.var # make a pofr please self.pofr = pr_tools.pofr(self.d_max, self.n, n_int=self.n_int, m_int=self.n_int) #make a random simplex please self.starting_simplex = [] cand = flex.random_double(self.n) for ii in range(self.n): self.starting_simplex.append( flex.double(self.orth(ii, self.n)) * self.step_size + cand) self.starting_simplex.append(cand) # self.starting_simplex.append((flex.random_double(self.n)*2-1)*self.step_size) self.optimizer = simplex.simplex_opt(dimension=self.n, matrix=self.starting_simplex, evaluator=self, tolerance=1e-8) self.solution = self.optimizer.GetResult()
def simplex(self): self.simplex_scores = [] self.simplex_solutions = [] for ii in xrange(self.simplex_trial): #make a random simplex self.starting_simplex = [] for ii in range(self.n): self.starting_simplex.append( random.random() * (flex.random_double(3) * 2 - 1.0) * 2 + self.x) self.starting_simplex.append(self.x) self.optimizer = simplex.simplex_opt(dimension=self.n, matrix=self.starting_simplex, evaluator=self, max_iter=50, tolerance=1e-4) self.solution = self.optimizer.GetResult() self.score = self.target(self.solution) self.simplex_scores.append(self.score) self.simplex_solutions.append(self.solution) best_index = flex.min_index(flex.double(self.simplex_scores)) self.score = self.simplex_scores[best_index] self.solution = self.simplex_solutions[best_index] if (self.translate): self.vector = self.solution.deep_copy() print "translate:", list(self.vector) else: self.angle = self.solution.deep_copy() print "rotate:", list(self.angle) self.target(self.solution)
def __init__(self, crystal, beam, detector, goniometer, scan, reflections): '''Initialise the optmization.''' from scitbx import simplex from scitbx.array_family import flex from math import pi, exp import random # Initialise the function used in likelihood estimation. self._R = FractionOfObservedIntensity(crystal, beam, detector, goniometer, scan, reflections) # Set the starting values to try start = random.random() * pi / 180 stop = random.random() * pi / 180 starting_simplex = [flex.double([start]), flex.double([stop])] # Initialise the optimizer optimizer = simplex.simplex_opt( 1, matrix=starting_simplex, evaluator=self, tolerance=1e-7) # Get the solution self.sigma = exp(optimizer.get_solution()[0])
def optimize(self): start_matrix = [] for ii in range(self.n + 1): start_matrix.append((flex.random_double(self.n) - 1.0) * 0.2) optimizer = simplex.simplex_opt(dimension=self.n, matrix=start_matrix, evaluator=self, tolerance=1e-4) self.solution = optimizer.get_solution() self.score = self.target(self.solution)
def __init__(selfOO): selfOO.starting_simplex=[] selfOO.n = 2 for ii in range(selfOO.n+1): selfOO.starting_simplex.append(flex.random_double(selfOO.n)) selfOO.optimizer = simplex_opt( dimension=selfOO.n, matrix = selfOO.starting_simplex, evaluator = selfOO, tolerance=1e-7) selfOO.x = selfOO.optimizer.get_solution()
def run_simplex(self, start, max_iter=500): dim = 3 starting_matrix = [start] for ii in range(dim): starting_matrix.append(start + (flex.random_double(dim) * 2 - 1) * self.dx) optimizer = simplex.simplex_opt( dimension=dim, matrix=starting_matrix, evaluator=self, max_iter=max_iter, tolerance=1e-5 ) result = optimizer.get_solution() return result
def __init__(selfOO): selfOO.starting_simplex=[] selfOO.n = 1 for ii in range(selfOO.n+1): selfOO.starting_simplex.append(flex.random_double(selfOO.n)) selfOO.optimizer = simplex_opt( dimension=selfOO.n, matrix = selfOO.starting_simplex, evaluator = selfOO, tolerance=1e-4) selfOO.x = selfOO.optimizer.get_solution()
def __init__( self, crystal, beam, detector, goniometer, scan, reflections, n_macro_cycles=10, ): from dials.array_family import flex from scitbx import simplex # Get the oscillation width dphi2 = scan.get_oscillation(deg=False)[1] / 2.0 # Calculate a list of angles and zeta's tau, zeta, n, indices = self._calculate_tau_and_zeta( crystal, beam, detector, goniometer, scan, reflections) # Calculate zeta * (tau +- dphi / 2) / math.sqrt(2) self.e1 = (tau + dphi2) * flex.abs(zeta) / math.sqrt(2.0) self.e2 = (tau - dphi2) * flex.abs(zeta) / math.sqrt(2.0) self.n = n self.indices = indices if len(self.e1) == 0: raise RuntimeError( "Something went wrong. Zero pixels selected for estimation of profile parameters." ) # Compute intensity self.K = flex.double() for i0, i1 in zip(self.indices[:-1], self.indices[1:]): selection = flex.size_t(range(i0, i1)) self.K.append(flex.sum(self.n.select(selection))) # Set the starting values to try 1, 3 degrees seems sensible for # crystal mosaic spread start = math.log(0.1 * math.pi / 180) stop = math.log(1 * math.pi / 180) starting_simplex = [flex.double([start]), flex.double([stop])] # Initialise the optimizer optimizer = simplex.simplex_opt(1, matrix=starting_simplex, evaluator=self, tolerance=1e-3) # Get the solution sigma = math.exp(optimizer.get_solution()[0]) # Save the result self.sigma = sigma
def run_simplex( self, start, max_iter=500 ): dim = 3 starting_matrix = [ start ] for ii in range( dim ): starting_matrix.append( start + (flex.random_double(dim)*2-1)*self.dx ) optimizer = simplex.simplex_opt( dimension = dim, matrix = starting_matrix, evaluator = self, max_iter = max_iter, tolerance=1e-5) result = optimizer.get_solution() return result
def iterate(self): if (self.Niter > 0): # need to build PDB object from the last PDB file iter_name = self.root + str(self.Niter) + ".pdb" t1 = time.time() self.pdb = PDB(iter_name, method=self.method) self.nmode = self.pdb.Hessian(self.cutoff, self.nmode_init, self.scale_factor) - 1 self.time_nm += (time.time() - t1) self.n1 = self.nmodes + 1 score = [] candidates = [] for kk in range(self.nmode_init - self.nmodes): self.modes = flex.int(range(self.nmodes)) + 7 self.modes.append(kk + 7 + self.nmodes) self.starting_simplex = [] cand = flex.double(self.n1, 0) for ii in range(self.n1): self.starting_simplex.append( flex.double(self.orth(ii, self.n1)) * self.step_size + cand) self.starting_simplex.append(cand) self.optimizer = simplex.simplex_opt(dimension=self.n1, matrix=self.starting_simplex, evaluator=self, monitor_cycle=4, tolerance=1e-1) self.x = self.optimizer.get_solution() candidates.append(self.x.deep_copy()) score.append(self.optimizer.get_score()) minscore = min(score[0:self.topn]) print self.Niter, minscore, self.counter if ((self.Niter % self.optNum) > 1): self.stopCheck(minscore) self.updateScore(minscore) minvec = candidates[score.index(minscore)] new_coord = flex.vec3_double(self.pdb.NMPerturb(self.modes, minvec)) self.Niter = self.Niter + 1 iter_name = self.root + str(self.Niter) + ".pdb" self.pdb.writePDB(new_coord, iter_name) if (self.Niter % self.optNum == 0): processed_pdb, pdb_inp = self.pdb_processor.process_pdb_files( pdb_file_names=[iter_name]) new_coord = geo_opt(processed_pdb, self.log) self.pdb.writePDB(new_coord, iter_name) if (not self.stop): # if(self.Niter < 50): self.iterate()
def __init__(self, name): self.n = 2 self.x = start.deep_copy() self.name = name self.starting_simplex = [] self.fcount = 0 for ii in range(self.n + 1): self.starting_simplex.append((flex.random_double(self.n) / 2 - 1) * 0.0003 + self.x) self.optimizer = simplex.simplex_opt( dimension=self.n, matrix=self.starting_simplex, evaluator=self, tolerance=1e-10 ) self.x = self.optimizer.get_solution() print "SIMPLEX ITRATIONS", self.optimizer.count, self.fcount, "SOLUTION", list(self.x), self.target(self.x)
def __init__(self, wide_search_offset): self.n = 2 self.wide_search_offset = wide_search_offset self.optimizer = simplex_opt( dimension=self.n, matrix=[flex.random_double(self.n) for _ in range(self.n + 1)], evaluator=self, tolerance=1e-7, ) self.x = self.optimizer.get_solution() self.offset = self.x[0] * 0.2 * beamr1 + self.x[1] * 0.2 * beamr2 if self.wide_search_offset is not None: self.offset += self.wide_search_offset
def __init__(selfOO, wide_search_offset=None): selfOO.starting_simplex=[] selfOO.n = 2 selfOO.wide_search_offset = wide_search_offset for ii in range(selfOO.n+1): selfOO.starting_simplex.append(flex.random_double(selfOO.n)) selfOO.optimizer = simplex_opt(dimension=selfOO.n, matrix=selfOO.starting_simplex, evaluator=selfOO, tolerance=1e-7) selfOO.x = selfOO.optimizer.get_solution() selfOO.offset = selfOO.x[0]*0.2*beamr1 + selfOO.x[1]*0.2*beamr2 if selfOO.wide_search_offset is not None: selfOO.offset += selfOO.wide_search_offset
def __init__(self): """ """ self.n = 2 self.x = flex.double([0.5,0.0]) self.starting_simplex = [] for i in xrange(self.n+1): self.starting_simplex.append(flex.random_double(self.n)) self.optimizer = simplex_opt( dimension = self.n, matrix = self.starting_simplex, evaluator = self, tolerance = 1e-1) self.x = self.optimizer.get_solution()
def __init__(self): """ """ self.n = 2 self.x = flex.double([0.5, 0.0]) self.starting_simplex = [] for i in xrange(self.n + 1): self.starting_simplex.append(flex.random_double(self.n)) self.optimizer = simplex_opt(dimension=self.n, matrix=self.starting_simplex, evaluator=self, tolerance=1e-1) self.x = self.optimizer.get_solution()
def __init__(self, values, offset, evaluator, max_iter, tolerance=1e-10): """ Init the simplex """ self.n = len(values) self.x = values self.starting_simplex = self.generate_start(values, offset) optimizer = simplex.simplex_opt( dimension=self.n, matrix=self.starting_simplex, evaluator=evaluator, tolerance=tolerance, max_iter=max_iter, ) self.x = optimizer.get_solution()
def run(self, initial_simplex): """Calculate scaling""" # Optimise the simplex self.optimised = simplex.simplex_opt(dimension=len(initial_simplex[0]), matrix=initial_simplex, evaluator=self) # Extract solution self.optimised_values = self.optimised.get_solution() # Scaled input values self.out_values = self.transform(values=self.scl_values) # Calculate rmsds self.unscaled_rmsd = self.rmsd_to_ref(values=self.scl_values) self.scaled_rmsd = self.rmsd_to_ref(values=self.out_values) return self.optimised_values
def __init__(self, values, offset, evaluator, max_iter): self.n = len(values) self.x = values self.starting_simplex = generate_start(values, offset) self.fcount = 0 optimizer = simplex.simplex_opt( dimension=self.n, matrix=self.starting_simplex, evaluator=evaluator, tolerance=1e-10, max_iter=max_iter, ) self.x = optimizer.get_solution() return
def __init__(self, name): self.n = 2 self.x = start.deep_copy() self.name = name self.starting_simplex = [] self.fcount = 0 for ii in range(self.n + 1): self.starting_simplex.append((flex.random_double(self.n) / 2 - 1) * 0.0003 + self.x) self.optimizer = simplex.simplex_opt(dimension=self.n, matrix=self.starting_simplex, evaluator=self, tolerance=1e-10) self.x = self.optimizer.get_solution() print("SIMPLEX ITRATIONS", self.optimizer.count, self.fcount, "SOLUTION", list(self.x), self.target(self.x))
def optimize_further(self): self.solutions=[] self.scores= flex.double() for candi in self.candidates: starting_simplex = [] for ii in range(self.dimension+1): starting_simplex.append(flex.random_double(self.dimension)*self.simplex_scale + candi) optimizer = simplex.simplex_opt( dimension=self.dimension, matrix = starting_simplex, evaluator = self.evaluator, tolerance=self.tolerance ) self.solutions.append( optimizer.get_solution() ) self.scores.append( optimizer.get_score() ) min_index = flex.min_index( self.scores ) self.best_solution = self.solutions[ min_index ] self.best_score = self.scores[ min_index ]
def __init__(self, reflections, sweep): '''Initialise the optmization.''' from scitbx import simplex from scitbx.array_family import flex from math import pi import random # Initialise the function used in likelihood estimation. self._R = FractionOfObservedIntensity(reflections, sweep) # Set the starting values to try start = 0.1 * random.random() * pi / 180 stop = 0.1 * random.random() * pi / 180 starting_simplex = [flex.double([start]), flex.double([stop])] # Initialise the optimizer self._optimizer = simplex.simplex_opt(1, matrix=starting_simplex, evaluator=self, tolerance=1e-7)
def __init__( self, experiments, reflections, initial_mosaic_parameters, wavelength_func, refine_bandpass=False, ): """Initialize the minimizer and perform the minimization @param experiments ExperimentList @param reflections flex.reflection_table @param initial_mosaic_parameters Tuple of domain size (angstroms) and half mosaic angle (degrees) @param wavelength_func Function to compute wavelengths @param refine_bandpass If True, refine band pass for each experiment. """ self.experiments = experiments self.reflections = reflections self.wavelength_func = wavelength_func self.refine_bandpass = refine_bandpass self.x = flex.double(initial_mosaic_parameters) self.n = 2 if refine_bandpass: for expt_id, expt in enumerate(experiments): refls = reflections.select(reflections["id"] == expt_id) wavelength_min, wavelength_max = estimate_bandpass(refls) expt.crystal.bandpass = wavelength_min, wavelength_max # actually refine the midpoint and the width. That ensures the two values don't cross over. self.x.append(wavelength_max - wavelength_min) self.x.append((wavelength_min + wavelength_max) / 2) self.n += 2 self.starting_simplex = [] for i in xrange(self.n + 1): self.starting_simplex.append( (0.9 + (flex.random_double(self.n) / 10)) * self.x) self.optimizer = simplex_opt( dimension=self.n, matrix=self.starting_simplex, evaluator=self, tolerance=1e-1, ) self.x = self.optimizer.get_solution()
def __init__(self, r, x, seed=None, plot=False): from dials.array_family import flex self.plot = plot assert seed is not None, 'seed should not be None' self.n = r.focus()[0] * 2 # Number of parameters self.x = x self.r = r self.starting_simplex = [] mt = flex.mersenne_twister(seed) random_scale = .5 self.n_datapoints = r.focus()[0] self.data = r for i in range(self.n + 1): self.starting_simplex.append(random_scale * (( (mt.random_double(self.n)) / 2.0) - 1.0) + self.x) self.optimizer = simplex_opt(dimension=self.n, matrix=self.starting_simplex, evaluator=self, tolerance=1e-3) self.x = self.optimizer.get_solution()
def __init__(selfOO, alt_crystal, Ncells_abc, host_runner, PP, n_cycles, s_cycles): selfOO.n_cycles = n_cycles selfOO.PP = PP selfOO.crnm = host_runner # count the full number of parameters selfOO.n = 0 selfOO.iteration = 0 selfOO.alt_crystal = alt_crystal selfOO.Ncells_abc = Ncells_abc initial_values = flex.double() selfOO.starting_simplex = [initial_values] bounding_values = flex.double() for key in selfOO.crnm.ref_params: selfOO.crnm.ref_params[key].accept() for label in selfOO.crnm.ref_params[key].display_labels: selfOO.n += 1 print(label, selfOO.crnm.ref_params[key].chain[label][-1]) initial_values.append( selfOO.crnm.ref_params[key].chain[label][-1]) vals = selfOO.crnm.ref_params[key].generate_simplex_interval() for il, label in enumerate( selfOO.crnm.ref_params[key].display_labels): print(label, vals[il]) bounding_values.append(vals[il]) #print("there are %d parameters"%selfOO.n) selfOO.crnm.plot_all(selfOO.iteration + 1, of=n_cycles) for ii in range(selfOO.n): vertex = copy.deepcopy(initial_values) vertex[ii] = bounding_values[ii] selfOO.starting_simplex.append(vertex) print(selfOO.starting_simplex) selfOO.optimizer = simplex_opt(dimension=selfOO.n, matrix=selfOO.starting_simplex, evaluator=selfOO, monitor_cycle=20, max_iter=s_cycles - 1, tolerance=1e-7) selfOO.x = selfOO.optimizer.get_solution()
def __init__(self, crystal, beam, detector, goniometer, scan, reflections): '''Initialise the optmization.''' from scitbx import simplex from scitbx.array_family import flex from math import pi, exp # FIXME in here this code is very unstable or actually broken if # we pass in a few lone images i.e. screening shots - propose need # for alternative algorithm, in meantime can avoid code death if # something like this # # if scan.get_num_images() == 1: # self.sigma = 0.5 * scan.get_oscillation_range()[1] * pi / 180.0 # return # # is used... @JMP please could we discuss? assert best method to get # sigma_m in that case is actually to look at present / absent # reflections as per how Mosflm does it... # Initialise the function used in likelihood estimation. self._R = FractionOfObservedIntensity(crystal, beam, detector, goniometer, scan, reflections) # Set the starting values to try 1, 3 degrees seems sensible for # crystal mosaic spread start = 1 * pi / 180 stop = 3 * pi / 180 starting_simplex = [flex.double([start]), flex.double([stop])] # Initialise the optimizer optimizer = simplex.simplex_opt( 1, matrix=starting_simplex, evaluator=self, tolerance=1e-7) # Get the solution self.sigma = exp(optimizer.get_solution()[0])
def scale(self): '''Find scale factors s, b that minimise target function.''' from scitbx.direct_search_simulated_annealing import dssa from scitbx.simplex import simplex_opt from scitbx.array_family import flex # only scale second and subsequent scale factors - first ones are # constrained to 0.0 self.n = 2 * len(self._frame_sizes) - 2 self.x = flex.double(self._scales_s[1:] + self._scales_b[1:]) self.starting_matrix = [self.x + flex.random_double(self.n) \ for j in range(self.n + 1)] if False: self.optimizer = dssa(dimension = self.n, matrix = self.starting_matrix, evaluator = self, tolerance = 1.e-6, further_opt = True) else: self.optimizer = simplex_opt(dimension = self.n, matrix = self.starting_matrix, evaluator = self, tolerance = 1.e-3) # save the best scale factors self.x = self.optimizer.get_solution() self._scales_s = flex.double(1, 0.0) self._scales_s.extend(self.x[:len(self._frame_sizes) - 1]) self._scales_b = flex.double(1, 0.0) self._scales_b.extend(self.x[len(self._frame_sizes) - 1:]) # scale the raw intensity data for later reference scaled_intensities = [] j = 0 for f, fs in enumerate(self._frame_sizes): for k in range(fs): hkl = self._indices[j] g_hl = self.scale_factor(f, hkl) scaled_intensities.append((self._intensities[j] / g_hl)) j += 1 self._scaled_intensities = scaled_intensities # now compute reflections with > 1 observation as starting point for # computing the Rmerge from collections import defaultdict multiplicity = defaultdict(list) for j, i in enumerate(self._indices): multiplicity[i].append(j) rmerge_n = 0.0 rmerge_d = 0.0 import math for i in multiplicity: if len(multiplicity[i]) == 1: continue imean = self._imean[i] for j in multiplicity[i]: rmerge_n += math.fabs(scaled_intensities[j] - imean) rmerge_d += imean return rmerge_n / rmerge_d
def __init__(self, miller_obs, miller_calc, min_d_star_sq=0.0, max_d_star_sq=2.0, n_points=2000, level=6.0): assert miller_obs.indices().all_eq(miller_calc.indices()) if (miller_obs.is_xray_amplitude_array()): miller_obs = miller_obs.f_as_f_sq() if (miller_calc.is_xray_amplitude_array()): miller_calc = miller_calc.f_as_f_sq() self.obs = miller_obs.deep_copy() self.calc = miller_calc.deep_copy() self.mind = min_d_star_sq self.maxd = max_d_star_sq self.m = n_points self.n = 2 self.level = level norma_obs = absolute_scaling.kernel_normalisation( miller_array=self.obs, auto_kernel=True, n_bins=45, n_term=17) norma_calc = absolute_scaling.kernel_normalisation( miller_array=self.calc, auto_kernel=True, n_bins=45, n_term=17) obs_d_star_sq = norma_obs.d_star_sq_array calc_d_star_sq = norma_calc.d_star_sq_array sel_calc_obs = norma_calc.bin_selection.select(norma_obs.bin_selection) sel_obs_calc = norma_obs.bin_selection.select(norma_calc.bin_selection) sel = ((obs_d_star_sq > low_lim) & (obs_d_star_sq < high_lim) & (norma_obs.mean_I_array > 0)) sel = sel.select(sel_calc_obs) self.obs_d_star_sq = obs_d_star_sq.select(sel) self.calc_d_star_sq = calc_d_star_sq.select(sel_obs_calc).select(sel) self.mean_obs = norma_obs.mean_I_array.select(sel) self.mean_calc = norma_calc.mean_I_array.select(sel_obs_calc).select( sel) self.var_obs = norma_obs.var_I_array.select(sel) self.var_calc = norma_calc.var_I_array.select(sel_obs_calc).select(sel) # make an interpolator object please self.interpol = scale_curves.curve_interpolator( self.mind, self.maxd, self.m) # do the interpolation tmp_obs_d_star_sq , self.mean_obs,self.obs_a , self.obs_b = \ self.interpol.interpolate(self.obs_d_star_sq,self.mean_obs) self.obs_d_star_sq , self.var_obs,self.obs_a , self.obs_b = \ self.interpol.interpolate(self.obs_d_star_sq, self.var_obs) tmp_calc_d_star_sq , self.mean_calc,self.calc_a, self.calc_b = \ self.interpol.interpolate(self.calc_d_star_sq,self.mean_calc) self.calc_d_star_sq, self.var_calc,self.calc_a , self.calc_b = \ self.interpol.interpolate(self.calc_d_star_sq,self.var_calc) self.mean_ratio_engine = chebyshev_polynome(mean_coefs.size(), low_lim - 1e-3, high_lim + 1e-3, mean_coefs) self.std_ratio_engine = chebyshev_polynome(std_coefs.size(), low_lim - 1e-3, high_lim + 1e-3, std_coefs) self.x = flex.double([0, 0]) self.low_lim_for_scaling = 1.0 / (4.0 * 4.0) #0.0625 selection = (self.calc_d_star_sq > self.low_lim_for_scaling) if (selection.count(True) == 0): raise Sorry( "No reflections within required resolution range after " + "filtering.") self.weight_array = selection.as_double() / (2.0 * self.var_obs) assert (not self.weight_array.all_eq(0.0)) self.mean = flex.double( [1.0 / (flex.sum(self.mean_calc) / flex.sum(self.mean_obs)), 0.0]) self.sigmas = flex.double([0.5, 0.5]) s = 1.0 / (flex.sum(self.weight_array * self.mean_calc) / flex.sum(self.weight_array * self.mean_obs)) b = 0.0 self.sart_simplex = [ flex.double([s, b]), flex.double([s + 0.1, b + 1.1]), flex.double([s - 0.1, b - 1.1]) ] self.opti = simplex.simplex_opt(2, self.sart_simplex, self) sol = self.opti.get_solution() self.scale = abs(sol[0]) self.b_value = sol[1] self.modify_weights() self.all_bad_z_scores = self.weight_array.all_eq(0.0) if (not self.all_bad_z_scores): s = 1.0 / (flex.sum(self.weight_array * self.mean_calc) / flex.sum(self.weight_array * self.mean_obs)) b = 0.0 self.sart_simplex = [ flex.double([s, b]), flex.double([s + 0.1, b + 1.1]), flex.double([s - 0.1, b - 1.1]) ] self.opti = simplex.simplex_opt(2, self.sart_simplex, self)
def __init__(self, miller_obs, miller_calc, min_d_star_sq=0.0, max_d_star_sq=2.0, n_points=2000, level=6.0): assert miller_obs.indices().all_eq(miller_calc.indices()) if (miller_obs.is_xray_amplitude_array()) : miller_obs = miller_obs.f_as_f_sq() if (miller_calc.is_xray_amplitude_array()) : miller_calc = miller_calc.f_as_f_sq() self.obs = miller_obs.deep_copy() self.calc = miller_calc.deep_copy() self.mind = min_d_star_sq self.maxd = max_d_star_sq self.m = n_points self.n = 2 self.level = level norma_obs = absolute_scaling.kernel_normalisation( miller_array=self.obs, auto_kernel=True, n_bins=45, n_term=17) norma_calc = absolute_scaling.kernel_normalisation( miller_array=self.calc, auto_kernel=True, n_bins=45, n_term=17) obs_d_star_sq = norma_obs.d_star_sq_array calc_d_star_sq = norma_calc.d_star_sq_array sel_calc_obs = norma_calc.bin_selection.select(norma_obs.bin_selection) sel_obs_calc = norma_obs.bin_selection.select(norma_calc.bin_selection) sel = ((obs_d_star_sq > low_lim) & (obs_d_star_sq < high_lim) & (norma_obs.mean_I_array > 0)) sel = sel.select(sel_calc_obs) self.obs_d_star_sq = obs_d_star_sq.select( sel ) self.calc_d_star_sq = calc_d_star_sq.select( sel_obs_calc ).select(sel) self.mean_obs = norma_obs.mean_I_array.select(sel) self.mean_calc = norma_calc.mean_I_array.select( sel_obs_calc).select(sel) self.var_obs = norma_obs.var_I_array.select(sel) self.var_calc = norma_calc.var_I_array.select( sel_obs_calc).select(sel) # make an interpolator object please self.interpol = scale_curves.curve_interpolator( self.mind, self.maxd, self.m) # do the interpolation tmp_obs_d_star_sq , self.mean_obs,self.obs_a , self.obs_b = \ self.interpol.interpolate(self.obs_d_star_sq,self.mean_obs) self.obs_d_star_sq , self.var_obs,self.obs_a , self.obs_b = \ self.interpol.interpolate(self.obs_d_star_sq, self.var_obs) tmp_calc_d_star_sq , self.mean_calc,self.calc_a, self.calc_b = \ self.interpol.interpolate(self.calc_d_star_sq,self.mean_calc) self.calc_d_star_sq, self.var_calc,self.calc_a , self.calc_b = \ self.interpol.interpolate(self.calc_d_star_sq,self.var_calc) self.mean_ratio_engine = chebyshev_polynome( mean_coefs.size(), low_lim-1e-3, high_lim+1e-3,mean_coefs) self.std_ratio_engine = chebyshev_polynome( std_coefs.size(), low_lim-1e-3, high_lim+1e-3,std_coefs) self.x = flex.double([0,0]) self.low_lim_for_scaling = 1.0/(4.0*4.0) #0.0625 selection = (self.calc_d_star_sq > self.low_lim_for_scaling) if (selection.count(True) == 0) : raise Sorry("No reflections within required resolution range after "+ "filtering.") self.weight_array = selection.as_double() / (2.0 * self.var_obs) assert (not self.weight_array.all_eq(0.0)) self.mean = flex.double( [1.0/(flex.sum(self.mean_calc) / flex.sum(self.mean_obs)), 0.0 ] ) self.sigmas = flex.double( [0.5, 0.5] ) s = 1.0/(flex.sum(self.weight_array*self.mean_calc)/ flex.sum(self.weight_array*self.mean_obs)) b = 0.0 self.sart_simplex = [ flex.double([s,b]), flex.double([s+0.1,b+1.1]), flex.double([s-0.1,b-1.1]) ] self.opti = simplex.simplex_opt( 2, self.sart_simplex, self) sol = self.opti.get_solution() self.scale = abs(sol[0]) self.b_value = sol[1] self.modify_weights() self.all_bad_z_scores = self.weight_array.all_eq(0.0) if (not self.all_bad_z_scores) : s = 1.0/(flex.sum(self.weight_array*self.mean_calc) / flex.sum(self.weight_array*self.mean_obs)) b = 0.0 self.sart_simplex = [ flex.double([s,b]), flex.double([s+0.1,b+1.1]), flex.double([s-0.1,b-1.1]) ] self.opti = simplex.simplex_opt( 2, self.sart_simplex, self)
def iterate(self): self.n1 = self.nmodes if (self.Niter > 0): # need to build PDB object from the last PDB file iter_name = self.root + str(self.Niter) + ".pdb" self.pdb = PDB(iter_name, method=self.method) self.pdb.Hessian(self.cutoff, self.nmode_init, self.scale_factor) # Generate random normal modes self.modes = flex.int(range(self.nmodes - 1)) + 7 self.modes.append( int(random.random() * (self.nmode_init - self.nmodes)) + 7 + self.nmodes) self.scale = 0 candidates = [] score = [] for kk in range(self.topn * 10): if (kk == 0): vec = flex.random_double(self.nmodes) * 0 else: vec = (flex.random_double(self.nmodes) - 0.5) * 2 * self.step_size result = self.target(vec) insert = 0 for ii in range(len(score)): if (score[ii] > result): score.insert(ii, result) candidates.insert(ii, vec) insert = 1 break if (insert == 0): score.append(result) candidates.append(vec) for kk in range(self.topn): self.starting_simplex = [] cand = candidates[kk] for ii in range(self.n1): self.starting_simplex.append( flex.double(self.orth(ii, self.n1)) * self.step_size + cand) self.starting_simplex.append(cand) self.optimizer = simplex.simplex_opt(dimension=self.n1, matrix=self.starting_simplex, evaluator=self, monitor_cycle=5, tolerance=1e-2) self.x = self.optimizer.get_solution() candidates[kk] = self.x.deep_copy() score[kk] = self.optimizer.get_score() minscore = min(score[0:self.topn]) print self.Niter, minscore, self.counter print >> self.chi, self.Niter, minscore if ((self.Niter % self.optNum) > 1): self.stopCheck(minscore) self.updateScore(minscore) minvec = candidates[score.index(minscore)] new_coord = self.pdb.NMPerturb(self.modes, minvec) print list(minvec) self.Niter = self.Niter + 1 iter_base = self.root + str(self.Niter) iter_name = iter_base + ".pdb" self.pdb.writePDB(new_coord, iter_name) if (self.Niter % self.optNum == 0): print self.pulchra run_command(command=self.pulchra + ' ' + iter_name) run_command(command='mv ' + iter_base + '.rebuilt.pdb ' + iter_name) #processed_pdb, pdb_inp = self.pdb_processor.process_pdb_files( pdb_file_names=[iter_name] ) #new_coord = geo_opt(processed_pdb, self.log) #self.pdb.writePDB(new_coord,iter_name) # if(not self.stop): if (self.Niter < 40): self.iterate()
def optimize(self): self.candidate = flex.random_double(self.Nb) candidates = [] score = [] for kk in range(self.topn * 10): if (kk == 0): ab_s = self.candidate else: ab_s = (flex.random_double(self.Nb) * self.step_size + self.candidate) result = self.target(ab_s) insert = 0 for ii in range(len(score)): if (score[ii] > result): score.insert(ii, result) candidates.insert(ii, ab_s) insert = 1 break if (insert == 0): score.append(result) candidates.append(ab_s) for kk in range(self.topn): self.starting_simplex = [] cand = candidates[kk] for ii in range(self.Nb): self.starting_simplex.append( flex.double(self.orth(ii, self.Nb)) * self.step_size + cand) self.starting_simplex.append(cand) self.optimizer = simplex.simplex_opt(dimension=self.Nb, matrix=self.starting_simplex, evaluator=self, tolerance=1e-10) self.x = self.optimizer.GetResult() candidates[kk] = self.x.deep_copy() score[kk] = self.target(self.x) minscore = min(score[0:self.topn]) minvec = candidates[score.index(minscore)] self.fpr.load_coefs(minvec[0:]) k1 = minvec[0] new_I = pr2I(self.N_pr, self.fpr, self.q, k1) print '&', list(minvec), "coef", minscore for r, n in zip(self.q, new_I): #,self.calc_I): print r, n self.candidate = minvec.deep_copy() self.calc_I = new_I.deep_copy() self.Niter = self.Niter + 1 print '&' for r, n in zip(self.q, self.expt_I): #,self.calc_I): print r, n r = flex.double(range(-50, 50)) / 50.0 + 1e-7 pr = self.fpr.get_p_of_r(r) sum_pr = flex.sum(pr) * self.dMax pr = pr / sum_pr * 100 r = (r + 1) * self.dMax / 2.0 print '&' for ri, pri in zip(r, pr): print ri, pri self.prior = pr.deep_copy() self.Nb = self.Nb + self.Nb self.fpr = function(self.Nb, self.N_pr, 1.0, self.dMax) if (self.Niter < 2): self.optimize()
def estimate(self, num_reflections): """ Estimate the model parameters """ from scitbx import simplex from copy import deepcopy from dials.array_family import flex from random import uniform # Select the reflections reflections = self._select_reflections(self.reflections, num_reflections) # The number of parameters num_parameters = len(self.history.names) # Setup the starting simplex if self.simplex is None: self.simplex = [] for i in range(num_parameters + 1): self.simplex.append( flex.log( flex.double([ uniform(0.0001, 0.01) for j in range(num_parameters) ]))) class Evaluator(object): """ Evaluator to simplex """ def __init__( self, history, experiment, reflections, num_integral, use_mosaic_block_angular_spread, use_wavelength_spread, ): """ Initialise """ from dials_scratch.jmp.profile_modelling import MLTarget3D self.func = MLTarget3D( experiment, reflections, num_integral=num_integral, use_mosaic_block_angular_spread= use_mosaic_block_angular_spread, use_wavelength_spread=use_wavelength_spread, ) self.count = 1 self.history = history self.logL = None def target(self, log_parameters): """ Compute the negative log likelihood """ from dials.array_family import flex parameters = flex.exp(log_parameters) self.logL = self.func.log_likelihood(parameters) logL = flex.sum(self.logL) self.count += 1 print(self.count, list(parameters), logL) self.history.append(parameters, logL) # Return negative log likelihood return -logL # Setup the simplex optimizer optimizer = simplex.simplex_opt( num_parameters, matrix=self.simplex, evaluator=Evaluator( self.history, self.experiments[0], reflections, num_integral=self.num_integral, use_mosaic_block_angular_spread=self. use_mosaic_block_angular_spread, use_wavelength_spread=self.use_wavelength_spread, ), tolerance=1e-7, ) # Get the solution self.parameters = flex.exp(optimizer.get_solution()) # Get the final simplex self.simplex = optimizer.matrix # Save the likelihood for each reflection self.log_likelihood = optimizer.evaluator.logL
def __init__(self, n_params, n_fst_pass, d_max, data, n_int=35, simplex_trial=5): self.n = n_fst_pass self.n_coeff = n_params self.delta = self.n_coeff - self.n self.data = data self.d_max = max(self.data.q) self.n_int = n_int self.x = None self.ent = entropic_restraints.entropy_restraint() # make a pofr please self.pofr = pr_tools.pofr(self.d_max, self.n, n_int=self.n_int, m_int=self.n_int) self.q_weight = flex.bool(self.data.q < 0.1).as_double() self.weight = 1.0 # first we do a global optimisation using a diferentail evolution search self.domain = [] for ii in range(self.n): self.domain.append((-0.1, 0.1)) self.optimizer = de.differential_evolution_optimizer( self, population_size=self.n, n_cross=1, f=0.85, eps=1e-5, monitor_cycle=50, show_progress=False) self.q_weight = self.q_weight * 0.0 + 1.0 self.x = self.x.concatenate(flex.double([0] * self.delta)) self.n = self.n + self.delta self.pofr = pr_tools.pofr(self.d_max, self.n, n_int=self.n_int, m_int=self.n_int) self.simplex_trial = simplex_trial self.simplex_scores = [] self.simplex_solutions = [] for ii in xrange(self.simplex_trial): #make a random simplex please self.weight = 1.0 self.starting_simplex = [] for ii in range(self.n + 1): self.starting_simplex.append( 0.10 * (flex.random_double(self.n) * 2 - 1.0) + self.x) self.optimizer = simplex.simplex_opt(dimension=self.n, matrix=self.starting_simplex, evaluator=self, tolerance=1e-3) self.solution = self.optimizer.get_solution() self.score = self.target(self.solution) self.simplex_scores.append(self.score) self.simplex_solutions.append(self.solution) best_simplex_score = self.simplex_scores[0] this_simplex = 0 for ii in xrange(self.simplex_trial): if self.simplex_scores[ii] < best_simplex_score: best_simplex_score = self.simplex_scores[ii] this_simplex = ii self.solution = self.simplex_solutions[this_simplex] #self.optimizer = simulated_annealing.sa_optimizer( self, self.solution, flex.double( self.n*[0.0051] ), start_t=2.1, end_t=0.001, burn_in=100, burn_out=50000, steps=5000 , show_progress=True) #self.solution, self.score = self.optimizer.get_solution() self.pofr.update(self.solution) self.calc_data = self.pofr.f(self.data.q)