コード例 #1
0
ファイル: transformer.py プロジェクト: la-bella/ImageD11
    def addcellpeaks(self, limit=None):
        """
        Adds unit cell predicted peaks for fitting against
        Optional arg limit gives highest angle to use

        Depends on parameters:
            'cell__a','cell__b','cell__c', 'wavelength' # in angstrom
            'cell_alpha','cell_beta','cell_gamma' # in degrees
            'cell_lattice_[P,A,B,C,I,F,R]'
        """
        #
        # Given unit cell, wavelength and distance, compute the radial positions
        # in microns of the unit cell peaks
        #
        pars = self.parameterobj.get_parameters()
        cell = [
            pars[name] for name in [
                'cell__a', 'cell__b', 'cell__c', 'cell_alpha', 'cell_beta',
                'cell_gamma'
            ]
        ]
        lattice = pars['cell_lattice_[P,A,B,C,I,F,R]']
        if "tth" not in self.colfile.titles:
            self.compute_tth_eta()
        # Find last peak in radius
        if limit is None:
            highest = numpy.maximum.reduce(self.getcolumn("tth"))
        else:
            highest = limit
        w = pars['wavelength']
        ds = 2 * numpy.sin(transform.radians(highest) / 2.) / w
        self.dslimit = ds
        self.unitcell = unitcell.unitcell(cell, lattice)
        # If the space group is provided use xfab for generate unique hkls
        if 'cell_sg' in pars:
            self.theorypeaks = self.unitcell.gethkls_xfab(ds, pars['cell_sg'])
            tths = []
            self.theoryds = []
            for i in range(len(self.theorypeaks)):
                tths.append(2 * numpy.arcsin(w * self.theorypeaks[i][0] / 2.))
                self.theoryds.append(self.theorypeaks[i][0])
        else:
            # HO:  I have removed this part as it seems redundant ringds also calls gethkls
            # JPW: It was not redundant. theorypeaks is not defined anywhere else and you
            #      can't write a g-vector file without it.
            self.theorypeaks = self.unitcell.gethkls(ds)
            self.unitcell.makerings(ds)
            self.theoryds = self.unitcell.ringds
            tths = [
                numpy.arcsin(w * dstar / 2) * 2
                for dstar in self.unitcell.ringds
            ]
        self.theorytth = transform.degrees(numpy.array(tths))
コード例 #2
0
ファイル: test_gv_general.py プロジェクト: yns11/tst
    def test_0_10(self):
        """ wedge,chi = 0,10 """
        w = 0.
        c = 10.
        g = transform.compute_g_vectors(self.tth,
                                        self.eta,
                                        self.omega,
                                        self.wvln,
                                        wedge=w,
                                        chi=c)

        post = gv_general.wedgechi(wedge=w, chi=c)

        sol1, sol2, valid = gv_general.g_to_k(
            g,  #
            self.wvln,
            axis=np.array([0, 0, -1], np.float),
            pre=None,
            post=post)

        c0 = np.cos(transform.radians(self.omega))
        c1 = np.cos(transform.radians(sol1))
        c2 = np.cos(transform.radians(sol2))

        s0 = np.sin(transform.radians(self.omega))
        s1 = np.sin(transform.radians(sol1))
        s2 = np.sin(transform.radians(sol2))

        err1 = np.absolute(c1 - c0) + np.absolute(s1 - s0)
        err2 = np.absolute(c2 - c0) + np.absolute(s2 - s0)
        err = np.minimum(err1, err2)
        self.assertAlmostEqual(array_diff(err, np.zeros(self.np)), 0, 6)
コード例 #3
0
 def savegv(self, filename):
     """
     Save g-vectors into a file
     Use crappy .ascii format from previous for now (testing)
     """
     #        self.parameterobj.update_other(self)
     self.colfile.updateGeometry( self.parameterobj )
     if self.unitcell is None:
         self.addcellpeaks()
     f = open(filename, "w")
     f.write(self.unitcell.tostring())
     f.write("\n")
     pars = self.parameterobj.get_parameters()
     f.write("# wavelength = %f\n" % (float(pars['wavelength'])))
     f.write("# wedge = %f\n" % (float(pars['wedge'])))
     # Handle the axis direction somehow
     f.write("# axis %f %f %f\n" % tuple(self.getaxis()))
     # Put a copy of all the parameters in the gve file
     pkl = list(pars.keys())
     pkl.sort()
     for k in pkl:
         f.write("# %s = %s \n" % (k, pars[k]))
     f.write("# ds h k l\n")
     for peak in self.theorypeaks:
         f.write("%10.7f %4d %4d %4d\n" % (peak[0],
                                           peak[1][0],
                                           peak[1][1],
                                           peak[1][2]))
     tth = self.getcolumn("tth")
     ome = self.getcolumn("omega")
     eta = self.getcolumn("eta")
     gx = self.getcolumn("gx")
     gy = self.getcolumn("gy")
     gz = self.getcolumn("gz")
     x = self.getcolumn(self.xname)
     y = self.getcolumn(self.yname)
     spot3d_id = self.getcolumn("spot3d_id")
     xl = self.getcolumn("xl")
     yl = self.getcolumn("yl")
     zl = self.getcolumn("zl")
     order = numpy.argsort(tth)
     f.write("#  gx  gy  gz  xc  yc  ds  eta  omega  spot3d_id  xl  yl  zl\n")
     print(numpy.maximum.reduce(ome), numpy.minimum.reduce(ome))
     ds = 2 * numpy.sin(transform.radians(tth / 2)) / pars["wavelength"]
     fmt = "%f "*8 + "%d " + "%f "*3 + "\n"
     for i in order:
         f.write(fmt % (gx[i], gy[i], gz[i],
                        x[i], y[i],
                        ds[i], eta[i], ome[i],
                        spot3d_id[i],
                        xl[i], yl[i], zl[i]))
     f.close()
コード例 #4
0
      stepsize=100.0),
 par('z_size', 48.08150,
      helpstring="pixel size in vertical, same units distance",
      vary=False,
      can_vary=True,
      stepsize=0.1), # this could actually vary - a bit crazy?
 par('y_size', 46.77648,
      helpstring="pixel size in horizontal, same units as distance",
      vary=False,
      can_vary=True,
     stepsize=0.1), # this could actually vary - a bit crazy?
 par('tilt_z', 0.0,
      helpstring="detector tilt, right handed around z",
      vary=True,
      can_vary=True,
      stepsize=transform.radians(0.1)),
 par('tilt_y', 0.0,
      helpstring="detector tilt, right handed around y",
      vary=True,
      can_vary=True,
      stepsize=transform.radians(0.1)),
 par('tilt_x', 0.0,
      helpstring="detector tilt, right handed around x",
      vary=False,
      can_vary=True,
      stepsize=transform.radians(0.1)),
 par('fit_tolerance', 0.05,
      helpstring="tolerance to decide which peaks to use",
      vary=False,
      can_vary=False),
 par('wavelength', 0.155,
コード例 #5
0
class refinegrains:
    """
    Class to refine the position and orientation of grains
    and also the detector parameters
    """

    # Default parameters
    pars = {
        "cell__a": 4.1569162,
        "cell__b": 4.1569162,
        "cell__c": 4.1569162,
        "cell_alpha": 90.0,
        "cell_beta": 90.0,
        "cell_gamma": 90.0,
        "cell_lattice_[P,A,B,C,I,F,R]": 'P',
        "chi": 0.0,
        "distance": 7367.8452302,
        "fit_tolerance": 0.5,
        "o11": 1,
        "o12": 0,
        "o21": 0,
        "o22": 1,
        "omegasign": 1,
        "t_x": 33.0198146824,
        "t_y": 14.6384893741,
        "t_z": 0.0,
        "tilt_x": 0.0,
        "tilt_y": 0.0623952920101,
        "tilt_z": 0.00995011461696,
        "wavelength": 0.2646,
        "wedge": 0.0,
        "y_center": 732.950204632,
        "y_size": 4.6,
        "z_center": 517.007049626,
        "z_size": 4.6
    }

    # Default stepsizes for the
    stepsizes = {
        "wavelength": 0.001,
        'y_center': 0.2,
        'z_center': 0.2,
        'distance': 200.,
        'tilt_y': transform.radians(0.1),
        'tilt_z': transform.radians(0.1),
        'tilt_x': transform.radians(0.1),
        'wedge': transform.radians(0.1),
        'chi': transform.radians(0.1),
        't_x': 0.2,
        't_y': 0.2,
        't_z': 0.2,
        'y_size': 0.2,
        'z_size': 0.2,
    }

    def __init__(self,
                 tolerance=0.01,
                 intensity_tth_range=(6.1, 6.3),
                 latticesymmetry=triclinic,
                 OmFloat=True,
                 OmSlop=0.25):
        """

        """
        self.OMEGA_FLOAT = OmFloat
        self.slop = OmSlop
        if self.OMEGA_FLOAT:
            print("Using", self.slop, "degree slop")
        else:
            print("Omega is used as observed")
        self.tolerance = tolerance
        # list of ubi matrices (1 for each grain in each scan)
        self.grainnames = []
        self.ubisread = {}
        self.translationsread = {}
        # list of scans and corresponding data
        self.scannames = []
        self.scantitles = {}
        self.scandata = {}
        # grains in each scan
        self.grains = {}
        self.grains_to_refine = []
        self.latticesymmetry = latticesymmetry
        # ?
        self.drlv = None
        self.parameterobj = parameters.parameters(**self.pars)
        self.intensity_tth_range = intensity_tth_range
        self.recompute_xlylzl = False
        for k, s in list(self.stepsizes.items()):
            self.parameterobj.stepsizes[k] = s

    def loadparameters(self, filename):
        self.parameterobj.loadparameters(filename)

    def saveparameters(self, filename):
        self.parameterobj.saveparameters(filename)

    def readubis(self, filename):
        """
        Read ubi matrices from a text file
        """
        try:
            ul = grain.read_grain_file(filename)
        except:
            print(filename, type(filename))
            raise
        for i, g in enumerate(ul):
            # name = filename + "_" + str(i)
            # Hmmm .... multiple grain files?
            name = i
            self.grainnames.append(i)
            self.ubisread[name] = g.ubi
            self.translationsread[name] = g.translation
        #print "Grain names",self.grainnames

    def savegrains(self, filename, sort_npks=True):
        """
        Save the refined grains

        """
        ks = list(self.grains.keys())
        # sort by number of peaks indexed to write out
        if sort_npks:
            #        npks in x array
            order = numpy.argsort([self.grains[k].npks for k in ks])
            ks = [ks[i] for i in order[::-1]]
        else:
            ks.sort()
        gl = [(self.grains[k], k) for k in ks]

        # Update the datafile and grain names reflect indices in grain list
        for g, k in gl:
            name, fltname = g.name.split(":")
            assert fltname in self.scandata, "Sorry - logical flaw"
            assert len(list(self.scandata.keys())
                       ) == 1, "Sorry - need to fix for multi data"
            self.set_translation(k[0], fltname)
            self.compute_gv(g, update_columns=True)
            numpy.put(self.scandata[fltname].gx, g.ind, self.gv[:, 0])
            numpy.put(self.scandata[fltname].gy, g.ind, self.gv[:, 1])
            numpy.put(self.scandata[fltname].gz, g.ind, self.gv[:, 2])
            hkl_real = numpy.dot(g.ubi, self.gv.T)
            numpy.put(self.scandata[fltname].hr, g.ind, hkl_real[0, :])
            numpy.put(self.scandata[fltname].kr, g.ind, hkl_real[1, :])
            numpy.put(self.scandata[fltname].lr, g.ind, hkl_real[2, :])
            hkl = numpy.floor(hkl_real + 0.5)
            numpy.put(self.scandata[fltname].h, g.ind, hkl[0, :])
            numpy.put(self.scandata[fltname].k, g.ind, hkl[1, :])
            numpy.put(self.scandata[fltname].l, g.ind, hkl[2, :])
            # Count "uniq" reflections...
            sign_eta = numpy.sign(self.scandata[fltname].eta_per_grain[g.ind])
            uniq_list = [(int(h), int(k), int(l), int(s))
                         for (h, k, l), s in zip(hkl.T, sign_eta)]
            g.nuniq = len(set(uniq_list))
        grain.write_grain_file(filename, [g[0] for g in gl])

    def makeuniq(self, symmetry):
        """
        Flip orientations to a particular choice

        Be aware that if the unit cell is distorted and you do this,
        you might have problems...
        """
        from ImageD11.sym_u import find_uniq_u, getgroup
        g = getgroup(symmetry)()
        for k in list(self.ubisread.keys()):
            self.ubisread[k] = find_uniq_u(self.ubisread[k], g)
        for k in list(self.grains.keys()):
            self.grains[k].set_ubi(find_uniq_u(self.grains[k].ubi, g))

    def loadfiltered(self, filename):
        """
        Read in file containing filtered and merged peak positions
        """
        col = columnfile.columnfile(filename)
        self.scannames.append(filename)
        self.scantitles[filename] = col.titles
        if not "drlv2" in col.titles:
            col.addcolumn(numpy.ones(col.nrows, numpy.float), "drlv2")
        if not "labels" in col.titles:
            col.addcolumn(numpy.ones(col.nrows, numpy.float) - 2, "labels")
        if not "sc" in col.titles:
            assert "xc" in col.titles
            col.addcolumn(col.xc.copy(), "sc")
        if not "fc" in col.titles:
            assert "yc" in col.titles
            col.addcolumn(col.yc.copy(), "fc")
        self.scandata[filename] = col

    def generate_grains(self):
        t = numpy.array(
            [self.parameterobj.parameters[s] for s in ['t_x', 't_y', 't_z']])
        for grainname in self.grainnames:
            for scanname in self.scannames:
                try:
                    gr = self.grains[(grainname, scanname)]
                except KeyError:
                    if self.translationsread[grainname] is None:
                        self.grains[(grainname, scanname)] = grain.grain(
                            self.ubisread[grainname], translation=t)
                        self.grains[(grainname,scanname)].name = \
                            (str(grainname)+":"+scanname).replace(" ","_")
                    else:
                        self.grains[(grainname, scanname)] = grain.grain(
                            self.ubisread[grainname],
                            translation=self.translationsread[grainname])
                        self.grains[(grainname,scanname)].name = \
                            (str(grainname)+":"+scanname).replace(" ","_")
        for scanname in self.scannames:
            self.reset_labels(scanname)

    def reset_labels(self, scanname):
        print("resetting labels")
        try:
            x = self.scandata[scanname].xc
            y = self.scandata[scanname].yc
        except AttributeError:
            x = self.scandata[scanname].sc
            y = self.scandata[scanname].fc
        om = self.scandata[scanname].omega
        # only for this grain
        self.scandata[scanname].labels = self.scandata[scanname].labels * 0 - 2
        self.scandata[scanname].drlv2 = self.scandata[scanname].drlv2 * 0 + 1
        for g in self.grainnames:
            self.grains[(g, scanname)].x = x
            self.grains[(g, scanname)].y = y
            self.grains[(g, scanname)].om = om

    def compute_gv(self, thisgrain, update_columns=False):
        """
        Makes self.gv refer be g-vectors computed for this grain in this scan
        """
        peaks_xyz = thisgrain.peaks_xyz
        om = thisgrain.om
        try:
            sign = self.parameterobj.parameters['omegasign']
        except:
            sign = 1.0
        # translation should match grain translation here...
        self.tth, self.eta = transform.compute_tth_eta_from_xyz(
            peaks_xyz.T, omega=om * sign, **self.parameterobj.parameters)

        gv = transform.compute_g_vectors(
            self.tth, self.eta, om * sign,
            float(self.parameterobj.parameters['wavelength']),
            self.parameterobj.parameters['wedge'],
            self.parameterobj.parameters['chi'])

        if self.OMEGA_FLOAT:
            mat = thisgrain.ubi.copy()
            gvT = numpy.ascontiguousarray(gv.T)
            junk = cImageD11.score_and_refine(mat, gvT, self.tolerance)
            hklf = numpy.dot(mat, gv)
            hkli = numpy.round(hklf)

            gcalc = numpy.dot(numpy.linalg.inv(mat), hkli)
            tth, [eta1,
                  eta2], [omega1, omega2] = transform.uncompute_g_vectors(
                      gcalc, float(self.parameterobj.parameters['wavelength']),
                      self.parameterobj.parameters['wedge'],
                      self.parameterobj.parameters['chi'])

            e1e = numpy.abs(eta1 - self.eta)
            e2e = numpy.abs(eta2 - self.eta)
            try:
                eta_err = numpy.array([e1e, e2e])
            except:
                print(e1e.shape, e2e.shape, e1e)

                raise

            best_fitting = numpy.argmin(eta_err, axis=0)

            # These are always 1 or zero
            # pick the right omega (confuddled by take here)
            omega_calc = best_fitting * omega2 + (1 - best_fitting) * omega1
            # Take a weighted average within the omega error of the observed
            omerr = (om * sign - omega_calc)
            # Clip to 360 degree range
            omerr = omerr - (360 * numpy.round(omerr / 360.0))
            # print omerr[0:5]
            omega_calc = om * sign - numpy.clip(omerr, -self.slop, self.slop)
            # print omega_calc[0], om[0]
            thisgrain.omega_calc = omega_calc
            # Now recompute with improved omegas... (tth, eta do not change much)
            #self.tth, self.eta = transform.compute_tth_eta(
            #    numpy.array([x, y]),
            #    omega = omega_calc,
            #    **self.parameterobj.parameters)
            self.tth, self.eta = transform.compute_tth_eta_from_xyz(
                peaks_xyz.T, omega=om * sign, **self.parameterobj.parameters)

            gv = transform.compute_g_vectors(
                self.tth, self.eta, omega_calc,
                float(self.parameterobj.parameters['wavelength']),
                self.parameterobj.parameters['wedge'],
                self.parameterobj.parameters['chi'])

        else:
            thisgrain.omega_calc[:] = 0

        # update tth_per_grain and eta_per_grain
        if update_columns:
            name = thisgrain.name.split(":")[1]
            numpy.put(self.scandata[name].tth_per_grain, thisgrain.ind,
                      self.tth)
            numpy.put(self.scandata[name].eta_per_grain, thisgrain.ind,
                      self.eta)
            if self.OMEGA_FLOAT:
                numpy.put(self.scandata[name].omegacalc_per_grain,
                          thisgrain.ind, omega_calc)

        self.gv = numpy.ascontiguousarray(gv.T)
        return

    def refine(self, ubi, quiet=True):
        """
        Fit the matrix without changing the peak assignments

        """
        mat = ubi.copy()
        # print "In refine",self.tolerance, self.gv.shape
        # First time fits the mat
        self.npks, self.avg_drlv2 = cImageD11.score_and_refine(
            mat, self.gv, self.tolerance)
        # apply symmetry to mat:
        if self.latticesymmetry is not triclinic:
            cp = xfab.tools.ubi_to_cell(mat)
            U = xfab.tools.ubi_to_u(mat)
            mat = xfab.tools.u_to_ubi(U, self.latticesymmetry(cp)).copy()

        # Second time updates the score with the new mat
        self.npks, self.avg_drlv2 = cImageD11.score_and_refine(
            mat, self.gv, self.tolerance)
        # apply symmetry to mat:
        if self.latticesymmetry is not triclinic:
            cp = xfab.tools.ubi_to_cell(mat)
            U = xfab.tools.ubi_to_u(mat)
            mat = xfab.tools.u_to_ubi(U, self.latticesymmetry(cp))

        if not quiet:
            import math
            try:
                print("%-8d %.6f" % (self.npks, math.sqrt(self.avg_drlv2)))
            except:
                print(self.npks, self.avg_drlv2, mat, self.gv.shape,
                      self.tolerance)


#                raise

#print self.tolerance
#        self.npks, self.avg_drlv2 = cImageD11.score_and_refine(mat, self.gv,
#                                                             self.tolerance)
#tm = indexing.refine(ubi,self.gv,self.tolerance,quiet=quiet)
#print ubi, tm,ubi-tm,mat-tm

        return mat

    def applyargs(self, args):
        self.parameterobj.set_variable_values(args)

    def printresult(self, arg):
        # print self.parameterobj.parameters
        # return
        for i in range(len(self.parameterobj.varylist)):
            item = self.parameterobj.varylist[i]
            value = arg[i]
            try:
                self.parameterobj.parameters[item] = value
                print(item, value)
            except:
                # Hopefully a crystal translation
                pass

    def gof(self, args):
        """
        <drlv> for all of the grains in all of the scans
        """
        self.applyargs(args)
        diffs = 0.
        contribs = 0.

        # defaulting to fitting all grains
        for key in self.grains_to_refine:

            g = self.grains[key]

            ###            rotdex.fitagrain( gr, self.parameterobj )

            grainname = key[0]
            scanname = key[1]

            # Compute gv using current parameters
            # Keep labels fixed
            if self.recompute_xlylzl:
                g.peaks_xyz = transform.compute_xyz_lab(
                    [g.sc, g.fc], **self.parameterobj.parameters).T

            self.compute_gv(g)
            #print self.gv.shape
            #print self.gv[0:10,i:]

            # For stability, always start refining the read in one
            g.set_ubi(self.refine(self.ubisread[grainname]))
            #print self.npks,self.avg_drlv2 # number of peaks it got

            #print self.gv.shape

            diffs += self.npks * self.avg_drlv2
            contribs += self.npks
        if contribs > 0:
            return 1e6 * diffs / contribs
        else:
            print("No contribs???", self.grains_to_refine)
            return 1e6

    def estimate_steps(self, gof, guess, steps):
        assert len(guess) == len(steps)
        cen = self.gof(guess)
        deriv = []
        print("Estimating step sizes")
        for i in range(len(steps)):
            print(self.parameterobj.varylist[i], end=' ')
            newguess = [g for g in guess]
            newguess[i] = newguess[i] + steps[i]
            here = self.gof(newguess)
            print("npks, avgdrlv", self.npks, self.avg_drlv2, end=' ')
            deriv.append(here - cen)
            print("D_gof, d_par, dg/dp", here - cen, steps[i],
                  here - cen / steps[i])
        # Make all match the one which has the biggest impact
        j = numpy.argmax(numpy.absolute(deriv))
        print("Max step size is on", self.parameterobj.varylist[j])
        inc = [s * abs(deriv[j]) / abs(d) for d, s in zip(deriv, steps)]
        print("steps", steps)
        print("inc", inc)
        return inc

    def fit(self, maxiters=100):
        """
        Fit the global parameters
        """
        self.assignlabels()
        guess = self.parameterobj.get_variable_values()
        inc = self.parameterobj.get_variable_stepsizes()
        names = self.parameterobj.varylist
        self.printresult(guess)
        self.grains_to_refine = list(self.grains.keys())
        self.recompute_xlylzl = False
        for n in names:
            if n not in ['t_x', 't_y', 't_z']:
                self.recompute_xlylzl = True
        inc = self.estimate_steps(self.gof, guess, inc)
        s = simplex.Simplex(self.gof, guess, inc)
        newguess, error, iter = s.minimize(maxiters=maxiters, monitor=1)
        print()
        print("names", names)
        print("ng", newguess)
        for p, v in zip(names, newguess):
            # record results
            self.parameterobj.set(p, v)
            print("Setting parameter", p, v)
        trans = ["t_x", "t_y", "t_z"]
        for t in trans:
            if t in names:
                i = trans.index(t)
                # imply that we are using this translation value, not the
                # per grain values
                # This is a f*cking mess - translations should never have been
                # diffractometer parameters
                for g in self.getgrains():
                    self.grains[g].translation[i] = newguess[names.index(t)]
                    print(g, t, i, newguess[names.index(t)])
        print()
        self.printresult(newguess)

    def getgrains(self):
        return list(self.grains.keys())

    def set_translation(self, gr, sc):
        self.parameterobj.parameters['t_x'] = self.grains[(gr,
                                                           sc)].translation[0]
        self.parameterobj.parameters['t_y'] = self.grains[(gr,
                                                           sc)].translation[1]
        self.parameterobj.parameters['t_z'] = self.grains[(gr,
                                                           sc)].translation[2]

    def refinepositions(self, quiet=True, maxiters=100):
        self.assignlabels()
        ks = list(self.grains.keys())
        ks.sort()
        # assignments are now fixed
        tolcache = self.tolerance
        self.tolerance = 1.0
        for key in ks:
            g = key[0]
            self.grains_to_refine = [key]
            self.parameterobj.varylist = ['t_x', 't_y', 't_z']
            self.set_translation(key[0], key[1])
            guess = self.parameterobj.get_variable_values()
            inc = self.parameterobj.get_variable_stepsizes()

            s = simplex.Simplex(self.gof, guess, inc)

            newguess, error, iter = s.minimize(maxiters=maxiters, monitor=1)

            self.grains[key].translation[0] = self.parameterobj.parameters[
                't_x']
            self.grains[key].translation[1] = self.parameterobj.parameters[
                't_y']
            self.grains[key].translation[2] = self.parameterobj.parameters[
                't_z']
            print(key, self.grains[key].translation, end=' ')
            self.refine(self.grains[key].ubi, quiet=False)
        self.tolerance = tolcache

    def refineubis(self, quiet=True, scoreonly=False):
        #print quiet
        ks = list(self.grains.keys())
        ks.sort()
        if not quiet:
            print("%10s %10s" % ("grainname", "scanname"), end=' ')
            print("npeak   <drlv> ")
        for key in ks:
            g = self.grains[key]
            grainname = key[0]
            scanname = key[1]
            if not quiet:
                print("%10s %10s" % (grainname, scanname), end=' ')
            # Compute gv using current parameters, including grain position
            self.set_translation(key[0], key[1])
            self.compute_gv(g)
            res = self.refine(g.ubi, quiet=quiet)
            if not scoreonly:
                g.set_ubi(res)

    def assignlabels(self, quiet=False):
        """
        Fill out the appropriate labels for the spots
        """
        if not quiet:
            print("Assigning labels with XLYLZL")
        import time
        start = time.time()
        for s in self.scannames:
            self.scandata[s].labels = self.scandata[s].labels * 0 - 2  # == -1
            drlv2 = numpy.zeros(len(self.scandata[s].drlv2), numpy.float) + 1
            nr = self.scandata[s].nrows
            sc = self.scandata[s].sc
            fc = self.scandata[s].fc
            om = self.scandata[s].omega
            # Looks like this in one dataset only
            ng = len(self.grainnames)
            int_tmp = numpy.zeros(nr, numpy.int32) - 1
            tmp = transform.compute_xyz_lab(
                [self.scandata[s].sc, self.scandata[s].fc],
                **self.parameterobj.parameters)
            peaks_xyz = tmp.T.copy()
            if not quiet:
                print("Start first grain loop", time.time() - start)
            start = time.time()
            gv = numpy.zeros((nr, 3), numpy.float)
            wedge = self.parameterobj.parameters['wedge']
            omegasign = self.parameterobj.parameters['omegasign']
            chi = self.parameterobj.parameters['chi']
            wvln = self.parameterobj.parameters['wavelength']
            first_loop = time.time()
            drlv2 = (self.scandata[s].drlv2 * 0 + 1).astype(float)  # == 1
            int_tmp = numpy.zeros(nr, numpy.int32) - 1
            for ig, g in enumerate(self.grainnames):
                gr = self.grains[(g, s)]
                self.set_translation(g, s)
                cImageD11.compute_gv(peaks_xyz, self.scandata[s].omega,
                                     omegasign, wvln, wedge, chi,
                                     gr.translation, gv)
                cImageD11.score_and_assign(gr.ubi, gv, self.tolerance, drlv2,
                                           int_tmp, int(g))
            if not quiet:
                print(time.time() - first_loop, "First loop")

            self.gv = gv.copy()
            # Second loop after checking all grains
            if not quiet:
                print("End first grain loop", time.time() - start)
            start = time.time()

            if not quiet:
                print(self.scandata[s].labels.shape, \
                      numpy.minimum.reduce(self.scandata[s].labels),\
                        numpy.maximum.reduce(self.scandata[s].labels))

            self.scandata[s].addcolumn(int_tmp, "labels")
            self.scandata[s].addcolumn(drlv2, "drlv2")
            if not quiet:
                print(self.scandata[s].labels.shape, \
                      numpy.minimum.reduce(self.scandata[s].labels),\
                      numpy.maximum.reduce(self.scandata[s].labels))

            tth = numpy.zeros(nr, numpy.float32) - 1
            eta = numpy.zeros(nr, numpy.float32)

            self.scandata[s].addcolumn(tth, "tth_per_grain")
            self.scandata[s].addcolumn(eta, "eta_per_grain")
            self.scandata[s].addcolumn(om * 0, "omegacalc_per_grain")
            self.scandata[s].addcolumn(self.gv[:, 0], "gx")
            self.scandata[s].addcolumn(self.gv[:, 1], "gy")
            self.scandata[s].addcolumn(self.gv[:, 2], "gz")
            self.scandata[s].addcolumn(numpy.zeros(nr, numpy.float32), "hr")
            self.scandata[s].addcolumn(numpy.zeros(nr, numpy.float32), "kr")
            self.scandata[s].addcolumn(numpy.zeros(nr, numpy.float32), "lr")
            self.scandata[s].addcolumn(numpy.zeros(nr, numpy.float32), "h")
            self.scandata[s].addcolumn(numpy.zeros(nr, numpy.float32), "k")
            self.scandata[s].addcolumn(numpy.zeros(nr, numpy.float32), "l")

            if not quiet:
                print("Start second grain loop", time.time() - start)
            start = time.time()

            # We have the labels set in self.scandata!!!
            for g in self.grainnames:
                gr = self.grains[(g, s)]

                ind = numpy.compress(int_tmp == g, numpy.arange(nr))
                #print 'x',gr.x[:10]
                #print 'ind',ind[:10]
                gr.ind = ind  # use this to push back h,k,l later
                gr.peaks_xyz = numpy.take(peaks_xyz, ind, axis=0)
                gr.sc = numpy.take(sc, ind)
                gr.fc = numpy.take(fc, ind)
                gr.om = numpy.take(self.scandata[s].omega, ind)
                gr.omega_calc = gr.om.copy()
                gr.npks = len(gr.ind)
                self.set_translation(g, s)
                try:
                    sign = self.parameterobj.parameters['omegasign']
                except:
                    sign = 1.0

                tth, eta = transform.compute_tth_eta_from_xyz(
                    gr.peaks_xyz.T,
                    omega=gr.om * sign,
                    **self.parameterobj.parameters)

                self.scandata[s].tth_per_grain[ind] = tth
                self.scandata[s].eta_per_grain[ind] = eta
                #                self.scandata[s].omegacalc_per_grain[ind] = gr.omega_calc
                self.grains[(g, s)] = gr
                if not quiet:
                    print("Grain", g, "Scan", s, "npks=", len(ind))
                #print 'x',gr.x[:10]
            # Compute the total integrated intensity if we have enough
            # information available
            compute_lp_factor(self.scandata[s])
            for g in self.grainnames:
                gr = self.grains[(g, s)]
                gr.intensity_info = compute_total_intensity(
                    self.scandata[s],
                    gr.ind,
                    self.intensity_tth_range,
                    quiet=quiet)

            if not quiet:
                print("End second grain loop", time.time() - start)
                print()
            start = time.time()