def testspline(fltfile, parfile, splinefile): # print "Yop" sys.stdout.flush() os.system("rm test.flt") os.system("fix_spline.py %s test.flt %s" % (fltfile, splinefile)) tr = transformer() tr.loadfiltered("test.flt") tr.loadfileparameters(parfile) tr.fit()
def intorduce_gvectors(flt_file, par_file): obj = transformer.transformer() obj.loadfiltered(flt_file) obj.loadfileparameters(par_file) obj.compute_tth_eta() obj.addcellpeaks() obj.computegv() obj.write_colfile(str(flt_file.split(".")[0]) + "_with_gvec.flt")
def __init__(self): self.objects = { "peakmerger": peakmerge.peakmerger(), "transformer": transformer.transformer(), "indexer": indexing.indexer(), "solver": eps_sig_solver.solver(), } self.commandscript = \ """# Create objects to manipulate - they hold your data
def gridgrains( ul, flt, pars, minx=-750, maxx=750, stepx=25, miny=-750, maxy=750, stepy=25, tol=0.05, ): trn = transformer.transformer() trn.loadfiltered(flt) trn.parameterobj = parameters.parameters(**pars) peaks = [trn.getcolumn(trn.xname), trn.getcolumn(trn.yname)] peaks_xyz = transform.compute_xyz_lab(peaks, **trn.parameterobj.get_parameters()) omega = trn.getcolumn(trn.omeganame) trn.updateparameters() tx = minx - stepx n = 0 sys.stderr.write("Using tol = %f\n" % (tol)) while tx <= maxx: tx = tx + stepx ty = miny - stepy while ty <= maxy: ty = ty + stepy trn.parameterobj.set_parameters({'t_x': tx, 't_y': ty}) pars = trn.parameterobj.get_parameters() tth, eta = transform.compute_tth_eta_from_xyz( peaks_xyz, omega, **pars) if 'omegasign' in pars: om_sgn = float(pars["omegasign"]) else: om_sgn = 1.0 gv = transform.compute_g_vectors(tth, eta, omega * om_sgn, **pars) print(tx, ty, end=' ') for ubi in ul: ns = score(ubi, gv.T, tol) print(ns, end=' ') n += ns print() return n
def test_many_points(args): """ Grid index - loop over points Places the results in a multiprocessing Queue """ colfile, parameters, translations, gridpars = args s = "Hello from %s %d" % (multiprocessing.current_process().name, os.getpid()) s += " %d to do" % (len(translations)) s += "%s %s" % (colfile, parameters) print(s) mytransformer = transformer.transformer() mytransformer.loadfiltered(colfile) mytransformer.loadfileparameters(parameters) w = mytransformer.parameterobj.get("wavelength") first = True ni = len(translations) / 100.0 for i, (t_x, t_y, t_z) in enumerate(translations): mytransformer.updateparameters() mytransformer.parameterobj.set_parameters({ 't_x': t_x, 't_y': t_y, 't_z': t_z }) mytransformer.compute_tth_eta() mytransformer.computegv() # mytransformer.savegv( tmp+".gve" ) gve = np.vstack((mytransformer.colfile.gx, mytransformer.colfile.gy, mytransformer.colfile.gz)) if first: first = False grains = doindex(gve, t_x, t_y, t_z, w, gridpars) ng = len(grains) if ng > 0: grains = domap(mytransformer.parameterobj, mytransformer.colfile, grains, gridpars) nk = len(grains) if len(grains) > 0: test_many_points.q.put(grains, False) # do not wait else: nk = 0 sys.stderr.write(" % 6.2f%% Position %d %d %d" % (i / ni, t_x, t_y, t_z) + " grains found %d kept %d\n" % (ng, nk))
def initgrid(fltfile, parfile, tmp, gridpars): """ Sets up a grid indexing by preparing the unitcell for indexing and checking the columns we want are in the colfile """ mytransformer = transformer.transformer() mytransformer.loadfiltered(fltfile) mytransformer.loadfileparameters(parfile) gridpars['UC'] = unitcell.unitcell_from_parameters( mytransformer.parameterobj) col = mytransformer.colfile if not "drlv2" in col.titles: col.addcolumn(np.ones(col.nrows, float), "drlv2") if not "labels" in col.titles: col.addcolumn(np.ones(col.nrows, float) - 2, "labels") if not "sc" in col.titles: assert "xc" in col.titles col.addcolumn(col.xc.copy(), "sc") if not "fc" in col.titles: assert "yc" in col.titles col.addcolumn(col.yc.copy(), "fc") mytransformer.colfile.writefile("%s.flt" % (tmp)) return gridpars
for entry in uniq_ubis: print "\n\n" for i in range(3): for j in range(3): print "# ubi[%d,%d] = %f" % (i, j, ubi[i, j]) ubi = entry[0] for (name, i) in entry[1]: j, zh = get_z(name) print j, "%7.5f" % (zh), name, i, # we have the ubi matrix here. # we want to refine and score this ubi against some data try: t = transformer.transformer() t.loadfileparameters(parfilename) t.loadfiltered(name.replace(".ubi", "")) t.computegv() except: print name raise # bit of a hack - between 10 and 11 degrees h = np.matrixmultiply(ubi, t.gv) hint = np.floor(h + 0.5).astype(int) # rounds down diff = h - hint drlv2 = np.sum(diff * diff, 0) ind = np.compress(drlv2 < tol * tol, np.arange(t.twotheta.shape[0])) avg = 4 npix = 3
print() print("Into output file %s"%(outf)) #if raw_input("OK? [y/n]") not in ["Y","y"]: # sys.exit() allpks = open(outf,"w") allpeaks = {} always_ignore = {} goodthres = [] for v in thres: mytransformer = transformer.transformer() mytransformer.loadfileparameters( pars ) flt = "%s_t%d.flt"%(stem,v) print(flt, end=' ') try: tc = columnfile( flt ) if tc.nrows == 0: print("Skipped",tc," no peaks") continue except: print("Skipped",v," Exception reading",flt) continue goodthres.append( v ) mytransformer.loadfiltered( flt ) mytransformer.compute_tth_eta( )
def fitspline(fltfile, parfile, splinefile): tr = transformer() tr.loadfiltered(fltfile) tr.loadfileparameters(parfile) print("fitting to assign peaks") tr.fit() tr.loadfileparameters(parfile) tthobs, eta = tr.compute_tth_eta() tthcalc = tthobs * 0.0 - 1 for tthc, inds in zip(tr.tthc, tr.indices): # assignments tthcalc[inds] = tthc # project the error in the two directions sineta = np.sin(np.radians(eta)) coseta = np.cos(np.radians(eta)) # convert differences to pixels r = np.sqrt(tr.colfile.yl**2 + tr.colfile.zl**2) ps = tr.parameterobj.get("y_size") pix_obs = r / ps pix_calc = r * tthcalc / tthobs / ps diffs = pix_calc - pix_obs print(diffs) mask = tthcalc > 0 xvals = np.compress(mask, tr.colfile.s_raw) yvals = np.compress(mask, tr.colfile.f_raw) dsvals = np.compress(mask, sineta * diffs) dfvals = np.compress(mask, coseta * diffs) w = np.ones(len(yvals)) ss = 0.25 for i in range(2): m = len(yvals) s = (m - np.sqrt(2 * m)) * ss print("s=", s) rets = bisplrep(yvals, xvals, dsvals, w=w, kx=3, ky=3, xb=0, xe=4096, yb=0, ye=4096, full_output=0, s=s, task=0) retf = bisplrep(yvals, xvals, dfvals, w=w, kx=3, ky=3, xb=0, xe=4096, yb=0, ye=4096, full_output=0, s=s, task=0) print(rets, retf) dscalc = [bisplev(y, x, rets) for y, x in zip(yvals, xvals)] dfcalc = [bisplev(y, x, retf) for y, x in zip(yvals, xvals)] es = dscalc - dsvals ef = dfcalc - dfvals pylab.ion() pylab.figure(1) pylab.clf() pylab.subplot(121) pylab.xlim(0, 4096) pylab.ylim(0, 4096) pylab.scatter(xvals, yvals, c=es, edgecolors='none') pylab.colorbar() pylab.subplot(122) pylab.scatter(xvals, yvals, c=ef, edgecolors='none') pylab.xlim(0, 4096) pylab.ylim(0, 4096) pylab.colorbar() pylab.figure(2) pylab.clf() pylab.hist(es, bins=128) pylab.hist(ef, bins=128) pylab.show() print("stddev=", ((np.std(es) + np.std(ef)) / 2)) # co = ((np.std(es) + np.std(ef))/2)*3 co = float(input("cutoff")) print("Using cutoff", co) m = (np.abs(es) < co) & (np.abs(ef) < co) yvals = np.compress(m, yvals) xvals = np.compress(m, xvals) dsvals = np.compress(m, dsvals) dfvals = np.compress(m, dfvals) w = np.ones(len(yvals)) / ((np.std(es) + np.std(ef)) / 2) print("w avg = ", w.mean()) m = len(yvals) s = (m - np.sqrt(2 * m)) * ss print("s=", s) rets = bisplrep(yvals, xvals, dsvals, w=w, kx=3, ky=3, xb=0, xe=4096, yb=0, ye=4096, full_output=0, s=s, task=0) retf = bisplrep(yvals, xvals, dfvals, w=w, kx=3, ky=3, xb=0, xe=4096, yb=0, ye=4096, full_output=0, s=s, task=0) print(rets, retf) write_spline(rets, retf, splinefile) pylab.show() input("End?")