def run(self, experiments, reflections): result = flex.reflection_table() for expt_id, experiment in enumerate(experiments): refls = reflections.select(reflections['id'] == expt_id) A = flex.mat3_double(len(refls), experiment.crystal.get_A()) s0 = flex.vec3_double(len(refls), experiment.beam.get_s0()) q = A * refls['miller_index'].as_vec3_double() rh = (q + s0).norms() - 1/experiment.beam.get_wavelength() eta = 2*math.pi/180 * experiment.crystal.get_half_mosaicity_deg() rs = (1/experiment.crystal.get_domain_size_ang()) + (eta/2/refls['d']) p_G = flex.exp(-2*ln2*rh**2/rs**2) dp_G_drs = p_G * 4 * ln2 * rh**2 / rs**2 refls['rh'] = rh refls['rs'] = rs refls['p_G'] = p_G refls['dp_G_drs'] = dp_G_drs result.extend(refls) return experiments, result
def jacobian_callable(self,values): PB = self.get_partiality_array(values) EXP = flex.exp(-2.*values.BFACTOR*self.DSSQ) G_terms = (EXP * PB * self.ICALCVEC) B_terms = (values.G * EXP * PB * self.ICALCVEC)*(-2.*self.DSSQ) P_terms = (values.G * EXP * self.ICALCVEC) thetax = values.thetax; thetay = values.thetay; Rx = matrix.col((1,0,0)).axis_and_angle_as_r3_rotation_matrix(thetax) dRx_dthetax = matrix.col((1,0,0)).axis_and_angle_as_r3_derivative_wrt_angle(thetax) Ry = matrix.col((0,1,0)).axis_and_angle_as_r3_rotation_matrix(thetay) dRy_dthetay = matrix.col((0,1,0)).axis_and_angle_as_r3_derivative_wrt_angle(thetay) ref_ori = matrix.sqr(self.ORI.reciprocal_matrix()) miller_vec = self.MILLER.as_vec3_double() ds1_dthetax = flex.mat3_double(len(self.MILLER),Ry * dRx_dthetax * ref_ori) * miller_vec ds1_dthetay = flex.mat3_double(len(self.MILLER),dRy_dthetay * Rx * ref_ori) * miller_vec s1vec = self.get_s1_array(values) s1lenvec = flex.sqrt(s1vec.dot(s1vec)) dRh_dthetax = s1vec.dot(ds1_dthetax)/s1lenvec dRh_dthetay = s1vec.dot(ds1_dthetay)/s1lenvec rs = values.RS Rh = self.get_Rh_array(values) rs_sq = rs*rs denomin = (2. * Rh * Rh + rs_sq) dPB_dRh = { "lorentzian": -PB * 4. * Rh / denomin, "gaussian": -PB * 4. * math.log(2) * Rh / rs_sq }[self.profile_shape] dPB_dthetax = dPB_dRh * dRh_dthetax dPB_dthetay = dPB_dRh * dRh_dthetay Px_terms = P_terms * dPB_dthetax; Py_terms = P_terms * dPB_dthetay return [G_terms,B_terms,0,Px_terms,Py_terms]
def calculate_scales(self, block_id=0): """Calculate and return inverse scales for a given block.""" scales = flex.exp( flex.double(self._n_refl[block_id], self._parameters[0]) / (2.0 * (self._d_values[block_id] * self._d_values[block_id])) ) return scales
def jacobian_callable(self,values): PB = self.get_partiality_array(values) EXP = flex.exp(-2.*values.BFACTOR*self.DSSQ) G_terms = (EXP * PB * self.ICALCVEC) B_terms = (values.G * EXP * PB * self.ICALCVEC)*(-2.*self.DSSQ) P_terms = (values.G * EXP * self.ICALCVEC) thetax = values.thetax; thetay = values.thetay; Rx = matrix.col((1,0,0)).axis_and_angle_as_r3_rotation_matrix(thetax) dRx_dthetax = matrix.col((1,0,0)).axis_and_angle_as_r3_derivative_wrt_angle(thetax) Ry = matrix.col((0,1,0)).axis_and_angle_as_r3_rotation_matrix(thetay) dRy_dthetay = matrix.col((0,1,0)).axis_and_angle_as_r3_derivative_wrt_angle(thetay) ref_ori = matrix.sqr(self.ORI.reciprocal_matrix()) miller_vec = self.MILLER.as_vec3_double() ds1_dthetax = flex.mat3_double(len(self.MILLER),Ry * dRx_dthetax * ref_ori) * miller_vec ds1_dthetay = flex.mat3_double(len(self.MILLER),dRy_dthetay * Rx * ref_ori) * miller_vec s1vec = self.get_s1_array(values) s1lenvec = flex.sqrt(s1vec.dot(s1vec)) dRh_dthetax = s1vec.dot(ds1_dthetax)/s1lenvec dRh_dthetay = s1vec.dot(ds1_dthetay)/s1lenvec rs = values.RS Rh = self.get_Rh_array(values) rs_sq = rs*rs denomin = (2. * Rh * Rh + rs_sq) dPB_dRh = -PB * 4. * Rh / denomin dPB_dthetax = dPB_dRh * dRh_dthetax dPB_dthetay = dPB_dRh * dRh_dthetay Px_terms = P_terms * dPB_dthetax; Py_terms = P_terms * dPB_dthetay dPB_drs = 4 * rs * Rh * Rh / (denomin * denomin) Prs_terms = P_terms * dPB_drs return [G_terms,B_terms,Prs_terms,Px_terms,Py_terms]
def show_image(c,b,s, BB=None, SS=None): import numpy.ma from dials.array_family import flex N = 100 im = flex.double(flex.grid(N, N)) mask = flex.bool(flex.grid(N, N)) for j in range(N): for i in range(N): B = -1.0 + j * 10.0 / N S = -1.0 + i * 10.0 / N im[j,i], mask[j,i] = func(c,b,s,B,S) im[j,i] = -im[j,i] masked_im = numpy.ma.array( # im.as_numpy_array(), flex.exp(im).as_numpy_array(), mask = mask.as_numpy_array()) mask2 = flex.bool(flex.grid(N, N)) indices = [] for i in range(len(mask)): if mask[i] == False: indices.append(i) indices = flex.size_t(indices) ind = flex.max_index(im.select(indices)) ind = indices[ind] maxy = -1.0 + (ind % N) * 10.0 / N maxx = -1.0 + (ind // N) * 10.0 / N from matplotlib import pylab pylab.imshow(masked_im, origin='bottom', extent=[-1.0, 9.0, -1.0, 9.0]) if YY is not None and XX is not None: pylab.plot(YY, XX) pylab.scatter([maxy], [maxx]) pylab.show()
def plot_prob_for_zero(c, b, s): from math import log, exp, factorial from dials.array_family import flex L = flex.double(flex.grid(100, 100)) MASK = flex.bool(flex.grid(100, 100)) c = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0] b = [bb / sum(b) for bb in b] s = [ss / sum(s) for ss in s] for BB in range(0, 100): for SS in range(0, 100): B = 0 + BB / 10000.0 S = 0 + SS / 40.0 LL = 0 for i in range(len(b)): if B*b[i] + S*s[i] <= 0: MASK[BB, SS] = True LL = -999999 break else: LL += c[i]*log(B*b[i]+S*s[i]) - log(factorial(c[i])) - B*b[i] - S*s[i] L[BB, SS] = LL index = flex.max_index(L) i = index % 100 j = index // 100 B = 0 + j / 10000.0 S = 0 + i / 40.0 print flex.max(L), B, S from matplotlib import pylab import numpy im = numpy.ma.masked_array(flex.exp(L).as_numpy_array(), mask=MASK.as_numpy_array()) pylab.imshow(im) pylab.show() exit(0)
def abs_correction_flex(self, s1_flex): s1_flex_length = len(s1_flex) surface_normal = flex.vec3_double(s1_flex_length, self.surface_normal) edge_of_tape_normal = flex.vec3_double(s1_flex_length, self.edge_of_tape_normal) dsurf1 = flex.double(len(s1_flex), 0) dsurf2 = flex.double(len(s1_flex), 0) dot_product = s1_flex.dot(surface_normal) sel = dot_product != 0 dsurf1.set_selected(sel, self.sn1 / dot_product.select(sel)) dsurf2.set_selected(sel, self.sn2 / dot_product.select(sel)) dsurf3 = self.sn3 / (s1_flex.dot(edge_of_tape_normal)) # determine path length through kapton tape kapton_path_mm = flex.double(s1_flex_length, 0) unshadowed_sel = (dsurf3 < dsurf1) | (dsurf1 < 0) nearsel = ~unshadowed_sel & (dsurf3 < dsurf2) kapton_path_mm.set_selected(nearsel, (dsurf3 - dsurf1).select(nearsel)) farsel = ~unshadowed_sel & (dsurf3 >= dsurf2) kapton_path_mm.set_selected(farsel, (dsurf2 - dsurf1).select(farsel)) # determine absorption correction absorption_correction = 1 / flex.exp( -self.abs_coeff * kapton_path_mm ) # unitless, >=1 return absorption_correction
def fvec_callable(self, values): PB = self.get_partiality_array(values) EXP = flex.exp(-2. * values.BFACTOR * self.DSSQ) terms = (values.G * EXP * PB * self.ICALCVEC - self.IOBSVEC) # Ideas for improvement # straightforward to also include sigma weighting # add extra terms representing rotational excursion: terms.concatenate(1.e7*Rh) return terms
def fvec_callable(self, values): PB = self.get_partiality_array(values) EXP = flex.exp(-2.*values.BFACTOR*self.DSSQ) terms = (values.G * EXP * PB * self.ICALCVEC - self.IOBSVEC) # Ideas for improvement # straightforward to also include sigma weighting # add extra terms representing rotational excursion: terms.concatenate(1.e7*Rh) return terms
def calculate_scales_and_derivatives(self, block_id=0): """Calculate and return inverse scales and derivatives for a given block.""" scales = flex.exp(self._parameters[0] * self._x[block_id] / (self._d_values[block_id]**2)) derivatives = sparse.matrix(self._n_refl[block_id], 1) for i in range(self._n_refl[block_id]): derivatives[i, 0] = scales[i] * (self._x[block_id][i] / (self._d_values[block_id][i]**2)) return scales, derivatives
def calculate_scales_and_derivatives(self, block_id=0): scales, derivatives = super( SmoothBScaleComponent1D, self ).calculate_scales_and_derivatives(block_id) if self._n_refl[block_id] == 0: return flex.double([]), sparse.matrix(0, 0) prefac = 1.0 / (2.0 * (self._d_values[block_id] * self._d_values[block_id])) s = flex.exp(scales * prefac) d = row_multiply(derivatives, s * prefac) return s, d
def calculate_scales_and_derivatives(self, block_id=0): """Calculate and return inverse scales and derivatives for a given block.""" d_squared = self._d_values[block_id] * self._d_values[block_id] scales = flex.exp( flex.double(self._n_refl[block_id], self._parameters[0]) / (2.0 * d_squared) ) derivatives = sparse.matrix(self._n_refl[block_id], 1) for i in range(self._n_refl[block_id]): derivatives[i, 0] = scales[i] / (2.0 * d_squared[i]) return scales, derivatives
def abs_correction_flex(self, s1_flex): """ Compute the absorption correction using beers law. Takes in a flex array of s1 vectors, determines path lengths for each and then determines absorption correction for each s1 vector """ kapton_faces = self.faces from dials.algorithms.integration import get_kapton_path_cpp # new style, much faster kapton_path_mm = get_kapton_path_cpp(kapton_faces, s1_flex) # old style, really slow # for s1 in s1_flex: # kapton_path_mm.append(self.get_kapton_path_mm(s1)) # determine absorption correction if kapton_path_mm is not None: absorption_correction = 1 / flex.exp( -self.abs_coeff * kapton_path_mm) # unitless, >=1 return absorption_correction
def target(self, log_parameters): """ Compute the negative log likelihood """ from dials.array_family import flex parameters = flex.exp(log_parameters) self.logL = self.func.log_likelihood(parameters) logL = flex.sum(self.logL) self.count += 1 print(self.count, list(parameters), logL) self.history.append(parameters, logL) # Return negative log likelihood return -logL
def plot_prob_for_zero(c, b, s): from math import log, exp, factorial from dials.array_family import flex L = flex.double(flex.grid(100, 100)) MASK = flex.bool(flex.grid(100, 100)) c = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0] b = [bb / sum(b) for bb in b] s = [ss / sum(s) for ss in s] for BB in range(0, 100): for SS in range(0, 100): B = 0 + BB / 10000.0 S = 0 + SS / 40.0 LL = 0 for i in range(len(b)): if B * b[i] + S * s[i] <= 0: MASK[BB, SS] = True LL = -999999 break else: LL += ( c[i] * log(B * b[i] + S * s[i]) - log(factorial(c[i])) - B * b[i] - S * s[i] ) L[BB, SS] = LL index = flex.max_index(L) i = index % 100 j = index // 100 B = 0 + j / 10000.0 S = 0 + i / 40.0 print(flex.max(L), B, S) from matplotlib import pylab import numpy im = numpy.ma.masked_array(flex.exp(L).as_numpy_array(), mask=MASK.as_numpy_array()) pylab.imshow(im) pylab.show() exit(0)
def show_image(c, b, s, BB=None, SS=None): import numpy.ma from dials.array_family import flex N = 100 im = flex.double(flex.grid(N, N)) mask = flex.bool(flex.grid(N, N)) for j in range(N): for i in range(N): B = -1.0 + j * 10.0 / N S = -1.0 + i * 10.0 / N im[j, i], mask[j, i] = func(c, b, s, B, S) im[j, i] = -im[j, i] masked_im = numpy.ma.array( # im.as_numpy_array(), flex.exp(im).as_numpy_array(), mask=mask.as_numpy_array(), ) mask2 = flex.bool(flex.grid(N, N)) indices = [] for i in range(len(mask)): if mask[i] == False: indices.append(i) indices = flex.size_t(indices) ind = flex.max_index(im.select(indices)) ind = indices[ind] maxy = -1.0 + (ind % N) * 10.0 / N maxx = -1.0 + (ind // N) * 10.0 / N from matplotlib import pylab pylab.imshow(masked_im, origin="bottom", extent=[-1.0, 9.0, -1.0, 9.0]) if YY is not None and XX is not None: pylab.plot(YY, XX) pylab.scatter([maxy], [maxx]) pylab.show()
def calculate_scales(self, block_id=0): """Calculate and return inverse scales for a given block.""" scales = flex.exp(self._parameters[0] * self._x[block_id] / (self._d_values[block_id]**2)) return scales
def scaler_callable(self, values): PB = self.get_partiality_array(values) EXP = flex.exp(-2. * values.BFACTOR * self.DSSQ) terms = values.G * EXP * PB return terms
def get_gaussian_partiality_array(self, values): rs = values.RS Rh = self.get_Rh_array(values) immersion = Rh / rs gaussian = flex.exp(-2. * math.log(2) * (immersion * immersion)) return gaussian
def calculate_scales(self, block_id=0): s = super(SmoothBScaleComponent1D, self).calculate_scales(block_id) return flex.exp(s / (2.0 * flex.pow2(self._d_values[block_id])))
def compute_intensity_parameters(self): """ Create a new reflection table with all the derived parameters needed to apply corrections from RS postrefinement """ refls = self.scaler.ISIGI ct = self.scaler.crystal_table rx = flex.mat3_double() # crystal rotation around x ry = flex.mat3_double() # crystal rotation around y u = flex.mat3_double() # U matrix (orientation) b = flex.mat3_double() # B matrix (cell parameters) wavelength = flex.double() G = flex.double() # scaling gfactor B = flex.double() # wilson B factor s0 = flex.vec3_double() # beam vector deff = flex.double() # effective domain size eta = flex.double() # effective mosaic domain misorientation angle ex = col((1,0,0)) # crystal rotation x axis ey = col((0,1,0)) # crystal rotation y axis for i in xrange(len(ct)): # Need to copy crystal specific terms for each reflection. Equivalent to a JOIN in SQL. n_refl = ct['n_refl'][i] rx.extend(flex.mat3_double(n_refl, ex.axis_and_angle_as_r3_rotation_matrix(ct['thetax'][i]))) ry.extend(flex.mat3_double(n_refl, ey.axis_and_angle_as_r3_rotation_matrix(ct['thetay'][i]))) u.extend(flex.mat3_double(n_refl, ct['u_matrix'][i])) b.extend(flex.mat3_double(n_refl, ct['b_matrix'][i])) wavelength.extend(flex.double(n_refl, ct['wavelength'][i])) G.extend(flex.double(n_refl, ct['G'][i])) B.extend(flex.double(n_refl, ct['B'][i])) s0.extend(flex.vec3_double(n_refl, (0,0,-1)) * (1/ct['wavelength'][i])) deff.extend(flex.double(n_refl, ct['deff'][i])) eta.extend(flex.double(n_refl, ct['eta'][i])) iobs = refls['iobs'] h = refls['miller_index_original'].as_vec3_double() q = ry * rx * u * b * h # vector pointing from origin of reciprocal space to RLP qlen = q.norms() # length of q d = 1/q.norms() # resolution #rs = (1/deff)+(eta/(2*d)) # proper formulation of RS rs = 1/deff # assumes eta is zero rs_sq = rs*rs # square of rs s = (s0+q) # vector from center of Ewald sphere to RLP slen = s.norms() # length of s rh = slen-(1/wavelength) # distance from RLP to Ewald sphere p_n = rs_sq # numerator of partiality lorenzian expression p_d = (2. * (rh * rh)) + rs_sq # denominator of partiality lorenzian expression partiality = p_n/p_d theta = flex.asin(wavelength/(2*d)) epsilon = -8*B*(flex.sin(theta)/wavelength)**2 # exponential term in partiality eepsilon = flex.exp(epsilon) # e^epsilon D = partiality * G * eepsilon # denominator of partiality lorenzian expression thetah = flex.asin(wavelength/(2*d)) # reflecting angle sinthetah = flex.sin(thetah) er = sinthetah/wavelength # ratio term in epsilon # save all the columns r = flex.reflection_table() r['rx'] = rx r['ry'] = ry r['u'] = u r['b'] = b r['h'] = h r['q'] = q r['qlen'] = qlen r['D'] = D r['rs'] = rs r['eta'] = eta r['deff'] = deff r['d'] = d r['s'] = s r['slen'] = slen r['wavelength'] = wavelength r['p_n'] = p_n r['p_d'] = p_d r['partiality'] = partiality r['G'] = G r['B'] = B r['eepsilon'] = eepsilon r['thetah'] = thetah r['sinthetah'] = sinthetah r['er'] = er return r
def paper_test(B, S): from numpy.random import poisson from math import exp background_shape = [1 for i in range(20)] signal_shape = [1 if i >= 6 and i < 15 else 0 for i in range(20)] background = [poisson(bb * B,1)[0] for bb in background_shape] signal = [poisson(ss * S, 1)[0] for ss in signal_shape] # background = [bb * B for bb in background_shape] # signal = [ss * S for ss in signal_shape] total = [b + s for b, s in zip(background, signal)] # from matplotlib import pylab # pylab.plot(total) # pylab.plot(signal) # pylab.plot(background) # pylab.show() total = [0, 1, 0, 0, 0, 0, 3, 1, 3, 3, 6, 6, 4, 1, 4, 0, 2, 0, 1, 1] total = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0] # plot_prob_for_zero(total, background_shape, signal_shape) # signal_shape = [exp(-(x - 10.0)**2 / (2*3.0**2)) for x in range(20)] # signal_shape = [ss / sum(signal_shape) for ss in signal_shape] # print signal_shape #plot_valid(background_shape, signal_shape) B = 155.0 / 296.0 from dials.array_family import flex from math import log, factorial V = flex.double(flex.grid(100, 100)) L = flex.double(flex.grid(100, 100)) DB = flex.double(flex.grid(100, 100)) DS = flex.double(flex.grid(100, 100)) P = flex.double(flex.grid(100, 100)) Fb = sum(background_shape) Fs = sum(signal_shape) SV = [] MASK = flex.bool(flex.grid(100, 100), False) for BB in range(100): for SS in range(100): B = -5.0 + (BB) / 10.0 S = -5.0 + (SS) / 10.0 # SV.append(S) VV = 0 LL = 0 DDB = 0 DDS = 0 for i in range(20): s = signal_shape[i] b = background_shape[i] c = total[i] if B*b + S*s <= 0: # MASK[BB, SS] = True LL = 0 if b == 0: DDB += 0 else: DDB += 1e7 if s == 0: DDS += 0 else: DDS += 1e7 # break else: # VV += (b + s)*c / (B*b + S*s) # LL += c*log(B*b+S*s) - log(factorial(c)) - B*b - S*s DDB += c*b/(B*b+S*s) - b DDS += c*s/(B*b+S*s) - s VV -= (Fb + Fs) # print B, S, VV # V[BB, SS] = abs(VV) L[BB, SS] = LL DB[BB,SS] = DDB DS[BB,SS] = DDS max_ind = flex.max_index(L) j = max_ind // 100 i = max_ind % 100 print "Approx: ", (j+1) / 20.0, (i+1) / 20.0 print "Min/Max DB: ", flex.min(DB), flex.max(DB) print "Min/Max DS: ", flex.min(DS), flex.max(DS) from matplotlib import pylab # pylab.imshow(flex.log(V).as_numpy_array(), extent=[0.05, 5.05, 5.05, 0.05]) # pylab.plot(SV, V) # pylab.plot(SV, [0] * 100) # pylab.show() im = flex.exp(L).as_numpy_array() import numpy # im = numpy.ma.masked_array(im, mask=MASK.as_numpy_array()) # pylab.imshow(im)#, extent=[-5.0, 5.0, 5.0, -5.0], origin='lower') pylab.imshow(DB.as_numpy_array(), vmin=-100, vmax=100)#, extent=[-5.0, 5.0, 5.0, -5.0], origin='lower') pylab.contour(DB.as_numpy_array(), levels=[0], colors=['red']) pylab.contour(DS.as_numpy_array(), levels=[0], colors=['black']) pylab.show() # im = numpy.ma.masked_array(DB.as_numpy_array(), mask=MASK.as_numpy_array()) # pylab.imshow(im, extent=[-5.0, 5.0, 5.0, -5.0], vmin=-20, vmax=100) # pylab.show() # im = numpy.ma.masked_array(DS.as_numpy_array(), mask=MASK.as_numpy_array()) # pylab.imshow(im, extent=[-5.0, 5.0, 5.0, -5.0], vmin=-20, vmax=100) # pylab.show() # exit(0) S1, B1 = value(total, background_shape, signal_shape) exit(0) try: S1, B1 = value(total, background_shape, signal_shape) print "Result:" print S1, B1 exit(0) except Exception, e: raise e import sys import traceback traceback.print_exc() from dials.array_family import flex Fs = sum(signal_shape) Fb = sum(background_shape) Rs = flex.double(flex.grid(100, 100)) print "-----" print B, S # from matplotlib import pylab # pylab.plot(total) # pylab.show() print background_shape print signal_shape print total from math import exp, factorial, log minx = -1 miny = -1 minr = 9999 for BB in range(0, 100): for SS in range(0, 100): B = -10 + (BB) / 5.0 S = -10 + (SS) / 5.0 L = 0 Fb2 = 0 Fs2 = 0 for i in range(len(total)): c = total[i] b = background_shape[i] s = signal_shape[i] # P = exp(-(B*b + S*s)) * (B*b+S*s)**c / factorial(c) # print P # if P > 0: # L += log(P) den = B*b + S*s num1 = b*c num2 = s*c if den != 0: Fb2 += num1 / den Fs2 += num2 / den R = (Fb2 - Fb)**2 + (Fs2 - Fs)**2 if R > 1000: R = 0 # Rs[BB,SS] = L#R#Fs2 - Fs Rs[BB,SS] = R#Fs2 - Fs from matplotlib import pylab pylab.imshow(flex.log(Rs).as_numpy_array(), extent=[-5,5,5,-5]) pylab.show() exit(0)
def paper_test(B, S): from numpy.random import poisson from math import exp background_shape = [1 for i in range(20)] signal_shape = [1 if i >= 6 and i < 15 else 0 for i in range(20)] background = [poisson(bb * B, 1)[0] for bb in background_shape] signal = [poisson(ss * S, 1)[0] for ss in signal_shape] # background = [bb * B for bb in background_shape] # signal = [ss * S for ss in signal_shape] total = [b + s for b, s in zip(background, signal)] # from matplotlib import pylab # pylab.plot(total) # pylab.plot(signal) # pylab.plot(background) # pylab.show() total = [0, 1, 0, 0, 0, 0, 3, 1, 3, 3, 6, 6, 4, 1, 4, 0, 2, 0, 1, 1] total = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0] # plot_prob_for_zero(total, background_shape, signal_shape) # signal_shape = [exp(-(x - 10.0)**2 / (2*3.0**2)) for x in range(20)] # signal_shape = [ss / sum(signal_shape) for ss in signal_shape] # print signal_shape # plot_valid(background_shape, signal_shape) B = 155.0 / 296.0 from dials.array_family import flex from math import log, factorial V = flex.double(flex.grid(100, 100)) L = flex.double(flex.grid(100, 100)) DB = flex.double(flex.grid(100, 100)) DS = flex.double(flex.grid(100, 100)) P = flex.double(flex.grid(100, 100)) Fb = sum(background_shape) Fs = sum(signal_shape) SV = [] MASK = flex.bool(flex.grid(100, 100), False) for BB in range(100): for SS in range(100): B = -5.0 + (BB) / 10.0 S = -5.0 + (SS) / 10.0 # SV.append(S) VV = 0 LL = 0 DDB = 0 DDS = 0 for i in range(20): s = signal_shape[i] b = background_shape[i] c = total[i] if B * b + S * s <= 0: # MASK[BB, SS] = True LL = 0 if b == 0: DDB += 0 else: DDB += 1e7 if s == 0: DDS += 0 else: DDS += 1e7 # break else: # VV += (b + s)*c / (B*b + S*s) # LL += c*log(B*b+S*s) - log(factorial(c)) - B*b - S*s DDB += c * b / (B * b + S * s) - b DDS += c * s / (B * b + S * s) - s VV -= Fb + Fs # print B, S, VV # V[BB, SS] = abs(VV) L[BB, SS] = LL DB[BB, SS] = DDB DS[BB, SS] = DDS max_ind = flex.max_index(L) j = max_ind // 100 i = max_ind % 100 print("Approx: ", (j + 1) / 20.0, (i + 1) / 20.0) print("Min/Max DB: ", flex.min(DB), flex.max(DB)) print("Min/Max DS: ", flex.min(DS), flex.max(DS)) from matplotlib import pylab # pylab.imshow(flex.log(V).as_numpy_array(), extent=[0.05, 5.05, 5.05, 0.05]) # pylab.plot(SV, V) # pylab.plot(SV, [0] * 100) # pylab.show() im = flex.exp(L).as_numpy_array() import numpy # im = numpy.ma.masked_array(im, mask=MASK.as_numpy_array()) # pylab.imshow(im)#, extent=[-5.0, 5.0, 5.0, -5.0], origin='lower') pylab.imshow( DB.as_numpy_array(), vmin=-100, vmax=100 ) # , extent=[-5.0, 5.0, 5.0, -5.0], origin='lower') pylab.contour(DB.as_numpy_array(), levels=[0], colors=["red"]) pylab.contour(DS.as_numpy_array(), levels=[0], colors=["black"]) pylab.show() # im = numpy.ma.masked_array(DB.as_numpy_array(), mask=MASK.as_numpy_array()) # pylab.imshow(im, extent=[-5.0, 5.0, 5.0, -5.0], vmin=-20, vmax=100) # pylab.show() # im = numpy.ma.masked_array(DS.as_numpy_array(), mask=MASK.as_numpy_array()) # pylab.imshow(im, extent=[-5.0, 5.0, 5.0, -5.0], vmin=-20, vmax=100) # pylab.show() # exit(0) S1, B1 = value(total, background_shape, signal_shape) exit(0) try: S1, B1 = value(total, background_shape, signal_shape) print("Result:") print(S1, B1) exit(0) except Exception as e: raise e import sys import traceback traceback.print_exc() from dials.array_family import flex Fs = sum(signal_shape) Fb = sum(background_shape) Rs = flex.double(flex.grid(100, 100)) print("-----") print(B, S) # from matplotlib import pylab # pylab.plot(total) # pylab.show() print(background_shape) print(signal_shape) print(total) from math import exp, factorial, log minx = -1 miny = -1 minr = 9999 for BB in range(0, 100): for SS in range(0, 100): B = -10 + (BB) / 5.0 S = -10 + (SS) / 5.0 L = 0 Fb2 = 0 Fs2 = 0 for i in range(len(total)): c = total[i] b = background_shape[i] s = signal_shape[i] # P = exp(-(B*b + S*s)) * (B*b+S*s)**c / factorial(c) # print P # if P > 0: # L += log(P) den = B * b + S * s num1 = b * c num2 = s * c if den != 0: Fb2 += num1 / den Fs2 += num2 / den R = (Fb2 - Fb) ** 2 + (Fs2 - Fs) ** 2 if R > 1000: R = 0 # Rs[BB,SS] = L#R#Fs2 - Fs Rs[BB, SS] = R # Fs2 - Fs from matplotlib import pylab pylab.imshow(flex.log(Rs).as_numpy_array(), extent=[-5, 5, 5, -5]) pylab.show() exit(0) exit(0) # print S, B, sum(signal), sum(background), S1, B1 return S, B, sum(signal), sum(background), S1, B1
def scaler_callable(self, values): PB = self.get_partiality_array(values) EXP = flex.exp(-2.*values.BFACTOR*self.DSSQ) terms = values.G * EXP * PB return terms
def test_RefinerCalculator(small_reflection_table): """Test for the RefinerCalculator class. This calculates scale factors and derivatives for reflections based on the model components.""" # To test the basis function, need a scaling active parameter manager - to set # this up we need a components dictionary with some reflection data. # Let's use KB model components for simplicity - and have an extra fake 'abs' # component. rt = small_reflection_table components = { "scale": SingleScaleFactor(flex.double([1.0])), "decay": SingleBScaleFactor(flex.double([0.0])), "abs": SingleScaleFactor(flex.double([1.0])), } # Create empty components. components["scale"].data = {"id": rt["id"]} components["decay"].data = {"d": rt["d"]} components["abs"].data = {"id": rt["id"]} for component in components.values(): component.update_reflection_data() # Add some data to components. apm = scaling_active_parameter_manager(components, ["decay", "scale"]) # First test that scale factors can be successfully updated. # Manually change the parameters in the apm. decay = components["decay"] # Define alias _ = components["scale"] # Define alias # Note, order of params in apm.x depends on order in scaling model components. new_B = 1.0 new_S = 2.0 apm.set_param_vals(flex.double([new_S, new_B])) s, d = RefinerCalculator.calculate_scales_and_derivatives(apm, 0) slist, dlist = RefinerCalculator._calc_component_scales_derivatives(apm, 0) # Now test that the inverse scale factor is correctly calculated. calculated_sfs = s assert list(calculated_sfs) == pytest.approx( list(new_S * flex.exp(new_B / (2.0 * flex.pow2(decay.d_values[0]))))) # Now check that the derivative matrix is correctly calculated. calc_derivs = d assert calc_derivs[0, 0] == dlist[0][0, 0] * slist[1][0] assert calc_derivs[1, 0] == dlist[0][1, 0] * slist[1][1] assert calc_derivs[2, 0] == dlist[0][2, 0] * slist[1][2] assert calc_derivs[0, 1] == dlist[1][0, 0] * slist[0][0] assert calc_derivs[1, 1] == dlist[1][1, 0] * slist[0][1] assert calc_derivs[2, 1] == dlist[1][2, 0] * slist[0][2] # Repeat the test when there is only one active parameter. # First reset the parameters components["decay"].parameters = flex.double([0.0]) components["scale"].parameters = flex.double([1.0]) components["abs"].parameters = flex.double([1.0]) components["decay"].calculate_scales_and_derivatives() components["scale"].calculate_scales_and_derivatives() components["abs"].calculate_scales_and_derivatives() # Now generate a parameter manager for a single component. apm = scaling_active_parameter_manager(components, ["scale"]) new_S = 2.0 apm.set_param_vals(flex.double(components["scale"].n_params, new_S)) s, d = RefinerCalculator.calculate_scales_and_derivatives(apm, 0) slist, dlist = RefinerCalculator._calc_component_scales_derivatives(apm, 0) # Test that the scales and derivatives were correctly calculated assert list(s) == list([new_S] * slist[0].size()) assert d[0, 0] == dlist[0][0, 0] assert d[1, 0] == dlist[0][1, 0] assert d[2, 0] == dlist[0][2, 0] # Test again for two components. components["decay"].parameters = flex.double([0.0]) components["scale"].parameters = flex.double([1.0]) components["abs"].parameters = flex.double([1.0]) components["decay"].calculate_scales_and_derivatives() components["scale"].calculate_scales_and_derivatives() components["abs"].calculate_scales_and_derivatives() apm = scaling_active_parameter_manager(components, ["scale", "decay"]) _, __ = RefinerCalculator.calculate_scales_and_derivatives(apm, 0) # Test for no components apm = scaling_active_parameter_manager(components, []) _, d = RefinerCalculator.calculate_scales_and_derivatives(apm, 0) assert d.n_cols == 0 and d.n_rows == 0
def glm2(y): from math import sqrt, exp, floor, log from scipy.stats import poisson from scitbx import matrix from dials.array_family import flex y = flex.double(y) x = flex.double([1.0 for yy in y]) w = flex.double([1.0 for yy in y]) X = matrix.rec(x, (len(x), 1)) c = 1.345 beta = matrix.col([0]) maxiter = 10 accuracy = 1e-3 for iter in range(maxiter): ni = flex.double([1.0 for xx in x]) sni = flex.sqrt(ni) eta = flex.double(X * beta) mu = flex.exp(eta) dmu_deta = flex.exp(eta) Vmu = mu sVF = flex.sqrt(Vmu) residP = (y - mu) * sni / sVF phi = 1 sV = sVF * sqrt(phi) residPS = residP / sqrt(phi) H = flex.floor(mu * ni - c * sni * sV) K = flex.floor(mu * ni + c * sni * sV) # print min(H) dpH = flex.double([poisson(mui).pmf(Hi) for mui, Hi in zip(mu, H)]) dpH1 = flex.double([poisson(mui).pmf(Hi - 1) for mui, Hi in zip(mu, H)]) dpK = flex.double([poisson(mui).pmf(Ki) for mui, Ki in zip(mu, K)]) dpK1 = flex.double([poisson(mui).pmf(Ki - 1) for mui, Ki in zip(mu, K)]) pHm1 = flex.double([poisson(mui).cdf(Hi - 1) for mui, Hi in zip(mu, H)]) pKm1 = flex.double([poisson(mui).cdf(Ki - 1) for mui, Ki in zip(mu, K)]) pH = pHm1 + dpH # = ppois(H,*) pK = pKm1 + dpK # = ppois(K,*) E2f = mu * (dpH1 - dpH - dpK1 + dpK) + pKm1 - pHm1 Epsi = c * (1.0 - pK - pH) + (mu / sV) * (dpH - dpK) Epsi2 = c * c * (pH + 1.0 - pK) + E2f EpsiS = c * (dpH + dpK) + E2f / sV psi = flex.double([huber(rr, c) for rr in residPS]) cpsi = psi - Epsi temp = cpsi * w * sni / sV * dmu_deta EEqMat = [0] * len(X) for j in range(X.n_rows()): for i in range(X.n_columns()): k = i + j * X.n_columns() EEqMat[k] = X[k] * temp[j] EEqMat = matrix.rec(EEqMat, (X.n_rows(), X.n_columns())) EEq = [] for i in range(EEqMat.n_columns()): col = [] for j in range(EEqMat.n_rows()): k = i + j * EEqMat.n_columns() col.append(EEqMat[k]) EEq.append(sum(col) / len(col)) EEq = matrix.col(EEq) DiagB = EpsiS / (sni * sV) * w * (ni * dmu_deta) ** 2 B = matrix.diag(DiagB) H = (X.transpose() * B * X) / len(y) dbeta = H.inverse() * EEq beta_new = beta + dbeta relE = sqrt(sum([d * d for d in dbeta]) / max(1e-10, sum([d * d for d in beta]))) beta = beta_new # print relE if relE < accuracy: break weights = [min(1, c / abs(r)) for r in residPS] eta = flex.double(X * beta) mu = flex.exp(eta) return beta
def estimate(self, num_reflections): """ Estimate the model parameters """ from scitbx import simplex from copy import deepcopy from dials.array_family import flex from random import uniform # Select the reflections reflections = self._select_reflections(self.reflections, num_reflections) # The number of parameters num_parameters = len(self.history.names) # Setup the starting simplex if self.simplex is None: self.simplex = [] for i in range(num_parameters + 1): self.simplex.append( flex.log( flex.double([ uniform(0.0001, 0.01) for j in range(num_parameters) ]))) class Evaluator(object): """ Evaluator to simplex """ def __init__( self, history, experiment, reflections, num_integral, use_mosaic_block_angular_spread, use_wavelength_spread, ): """ Initialise """ from dials_scratch.jmp.profile_modelling import MLTarget3D self.func = MLTarget3D( experiment, reflections, num_integral=num_integral, use_mosaic_block_angular_spread= use_mosaic_block_angular_spread, use_wavelength_spread=use_wavelength_spread, ) self.count = 1 self.history = history self.logL = None def target(self, log_parameters): """ Compute the negative log likelihood """ from dials.array_family import flex parameters = flex.exp(log_parameters) self.logL = self.func.log_likelihood(parameters) logL = flex.sum(self.logL) self.count += 1 print(self.count, list(parameters), logL) self.history.append(parameters, logL) # Return negative log likelihood return -logL # Setup the simplex optimizer optimizer = simplex.simplex_opt( num_parameters, matrix=self.simplex, evaluator=Evaluator( self.history, self.experiments[0], reflections, num_integral=self.num_integral, use_mosaic_block_angular_spread=self. use_mosaic_block_angular_spread, use_wavelength_spread=self.use_wavelength_spread, ), tolerance=1e-7, ) # Get the solution self.parameters = flex.exp(optimizer.get_solution()) # Get the final simplex self.simplex = optimizer.matrix # Save the likelihood for each reflection self.log_likelihood = optimizer.evaluator.logL
def calculate_scales(self, block_id=0): s = super().calculate_scales(block_id) return flex.exp(s / (2.0 * flex.pow2(self._d_values[block_id])))
def glm2(y): from math import sqrt, exp, floor, log from scipy.stats import poisson from scitbx import matrix from dials.array_family import flex y = flex.double(y) x = flex.double([1.0 for yy in y]) w = flex.double([1.0 for yy in y]) X = matrix.rec(x, (len(x), 1)) c = 1.345 beta = matrix.col([0]) maxiter = 10 accuracy = 1e-3 for iter in range(maxiter): ni = flex.double([1.0 for xx in x]) sni = flex.sqrt(ni) eta = flex.double(X * beta) mu = flex.exp(eta) dmu_deta = flex.exp(eta) Vmu = mu sVF = flex.sqrt(Vmu) residP = (y - mu) * sni / sVF phi = 1 sV = sVF * sqrt(phi) residPS = residP / sqrt(phi) H = flex.floor(mu * ni - c * sni * sV) K = flex.floor(mu * ni + c * sni * sV) # print min(H) dpH = flex.double([poisson(mui).pmf(Hi) for mui, Hi in zip(mu, H)]) dpH1 = flex.double( [poisson(mui).pmf(Hi - 1) for mui, Hi in zip(mu, H)]) dpK = flex.double([poisson(mui).pmf(Ki) for mui, Ki in zip(mu, K)]) dpK1 = flex.double( [poisson(mui).pmf(Ki - 1) for mui, Ki in zip(mu, K)]) pHm1 = flex.double( [poisson(mui).cdf(Hi - 1) for mui, Hi in zip(mu, H)]) pKm1 = flex.double( [poisson(mui).cdf(Ki - 1) for mui, Ki in zip(mu, K)]) pH = pHm1 + dpH # = ppois(H,*) pK = pKm1 + dpK # = ppois(K,*) E2f = mu * (dpH1 - dpH - dpK1 + dpK) + pKm1 - pHm1 Epsi = c * (1.0 - pK - pH) + (mu / sV) * (dpH - dpK) Epsi2 = c * c * (pH + 1.0 - pK) + E2f EpsiS = c * (dpH + dpK) + E2f / sV psi = flex.double([huber(rr, c) for rr in residPS]) cpsi = psi - Epsi temp = cpsi * w * sni / sV * dmu_deta EEqMat = [0] * len(X) for j in range(X.n_rows()): for i in range(X.n_columns()): k = i + j * X.n_columns() EEqMat[k] = X[k] * temp[j] EEqMat = matrix.rec(EEqMat, (X.n_rows(), X.n_columns())) EEq = [] for i in range(EEqMat.n_columns()): col = [] for j in range(EEqMat.n_rows()): k = i + j * EEqMat.n_columns() col.append(EEqMat[k]) EEq.append(sum(col) / len(col)) EEq = matrix.col(EEq) DiagB = EpsiS / (sni * sV) * w * (ni * dmu_deta)**2 B = matrix.diag(DiagB) H = (X.transpose() * B * X) / len(y) dbeta = H.inverse() * EEq beta_new = beta + dbeta relE = sqrt( sum([d * d for d in dbeta]) / max(1e-10, sum([d * d for d in beta]))) beta = beta_new # print relE if relE < accuracy: break weights = [min(1, c / abs(r)) for r in residPS] eta = flex.double(X * beta) mu = flex.exp(eta) return beta