def sxcal_obj_func(plist_fit,
                   plist_full,
                   param_flags,
                   dfuncs,
                   dparam_flags,
                   ndparams,
                   instr,
                   xyo_det,
                   hkls_idx,
                   bmat,
                   vinv_s,
                   ome_period,
                   bvec,
                   evec,
                   sim_only=False,
                   return_value_flag=None):
    """
    """
    # stack flags and force bool repr
    refine_flags = np.array(np.hstack([param_flags, dparam_flags]), dtype=bool)

    # fill out full parameter list
    # !!! no scaling for now
    plist_full[refine_flags] = plist_fit

    # instrument quantities
    wavelength = plist_full[0]
    chi = plist_full[1]
    tvec_s = plist_full[2:5]

    # calibration crystal quantities
    rmat_c = xfcapi.makeRotMatOfExpMap(plist_full[5:8])
    tvec_c = plist_full[8:11]

    # right now just stuck on the end and assumed
    # to all be the same length... FIX THIS
    dparams_all = plist_full[-len(dparam_flags):]
    xy_unwarped = {}
    meas_omes = {}
    calc_omes = {}
    calc_xy = {}
    ii = 11  # offset to start of panels...
    jj = 0
    npts_tot = 0
    for det_key, panel in instr.detectors.iteritems():
        xy_unwarped[det_key] = xyo_det[det_key][:, :2]
        npts_tot += len(xyo_det[det_key])
        dfunc = dfuncs[det_key]
        len_these_dps = ndparams[det_key]
        if dfunc is not None:  # do unwarping
            dparams = dparams_all[jj:jj + len_these_dps]
            jj += len_these_dps
            xy_unwarped[det_key] = dfunc(xy_unwarped[det_key], dparams)
            pass
        meas_omes[det_key] = xyo_det[det_key][:, 2]

        # get these panel params for convenience
        gparams = plist_full[ii:ii + 6]

        rmat_d = xfcapi.makeDetectorRotMat(gparams[:3])
        tvec_d = gparams[3:].reshape(3, 1)

        # transform G-vectors:
        # 1) convert inv. stretch tensor from MV notation in to 3x3
        # 2) take reciprocal lattice vectors from CRYSTAL to SAMPLE frame
        # 3) apply stretch tensor
        # 4) normalize reciprocal lattice vectors in SAMPLE frame
        # 5) transform unit reciprocal lattice vetors back to CRYSAL frame
        gvec_c = np.dot(bmat, hkls_idx[det_key].T)
        vmat_s = mutil.vecMVToSymm(vinv_s)
        ghat_s = mutil.unitVector(np.dot(vmat_s, np.dot(rmat_c, gvec_c)))
        ghat_c = np.dot(rmat_c.T, ghat_s)

        match_omes, calc_omes_tmp = fitting.matchOmegas(xyo_det[det_key],
                                                        hkls_idx[det_key].T,
                                                        chi,
                                                        rmat_c,
                                                        bmat,
                                                        wavelength,
                                                        vInv=vinv_s,
                                                        beamVec=bvec,
                                                        etaVec=evec,
                                                        omePeriod=ome_period)

        rmat_s_arr = xfcapi.makeOscillRotMatArray(
            chi, np.ascontiguousarray(calc_omes_tmp))
        calc_xy_tmp = xfcapi.gvecToDetectorXYArray(ghat_c.T, rmat_d,
                                                   rmat_s_arr, rmat_c, tvec_d,
                                                   tvec_s, tvec_c)
        if np.any(np.isnan(calc_xy_tmp)):
            print("infeasible parameters: " +
                  "may want to scale back finite difference step size")

        calc_omes[det_key] = calc_omes_tmp
        calc_xy[det_key] = calc_xy_tmp

        ii += 6
        pass

    # return values
    if sim_only:
        retval = {}
        for det_key in calc_xy.keys():
            # ??? calc_xy is always 2-d
            retval[det_key] = np.vstack(
                [calc_xy[det_key].T, calc_omes[det_key]]).T
    else:
        meas_xy_all = []
        calc_xy_all = []
        meas_omes_all = []
        calc_omes_all = []
        for det_key in xy_unwarped.keys():
            meas_xy_all.append(xy_unwarped[det_key])
            calc_xy_all.append(calc_xy[det_key])
            meas_omes_all.append(meas_omes[det_key])
            calc_omes_all.append(calc_omes[det_key])
            pass
        meas_xy_all = np.vstack(meas_xy_all)
        calc_xy_all = np.vstack(calc_xy_all)
        meas_omes_all = np.hstack(meas_omes_all)
        calc_omes_all = np.hstack(calc_omes_all)

        diff_vecs_xy = calc_xy_all - meas_xy_all
        diff_ome = xfcapi.angularDifference(calc_omes_all, meas_omes_all)
        retval = np.hstack([diff_vecs_xy,
                            diff_ome.reshape(npts_tot, 1)]).flatten()
        if return_value_flag == 1:
            retval = sum(abs(retval))
        elif return_value_flag == 2:
            denom = npts_tot - len(plist_fit) - 1.
            if denom != 0:
                nu_fac = 1. / denom
            else:
                nu_fac = 1.
            nu_fac = 1 / (npts_tot - len(plist_fit) - 1.)
            retval = nu_fac * sum(retval**2)
    return retval
예제 #2
0
파일: fitting.py 프로젝트: MShaffar19/hexrd
def objFuncFitGrain(gFit,
                    gFull,
                    gFlag,
                    detectorParams,
                    xyo_det,
                    hkls_idx,
                    bMat,
                    wavelength,
                    bVec,
                    eVec,
                    dFunc,
                    dParams,
                    omePeriod,
                    simOnly=False,
                    return_value_flag=return_value_flag):
    """
    gFull[0]  = expMap_c[0]
    gFull[1]  = expMap_c[1]
    gFull[2]  = expMap_c[2]
    gFull[3]  = tVec_c[0]
    gFull[4]  = tVec_c[1]
    gFull[5]  = tVec_c[2]
    gFull[6]  = vInv_MV[0]
    gFull[7]  = vInv_MV[1]
    gFull[8]  = vInv_MV[2]
    gFull[9]  = vInv_MV[3]
    gFull[10] = vInv_MV[4]
    gFull[11] = vInv_MV[5]

    detectorParams[0]  = tiltAngles[0]
    detectorParams[1]  = tiltAngles[1]
    detectorParams[2]  = tiltAngles[2]
    detectorParams[3]  = tVec_d[0]
    detectorParams[4]  = tVec_d[1]
    detectorParams[5]  = tVec_d[2]
    detectorParams[6]  = chi
    detectorParams[7]  = tVec_s[0]
    detectorParams[8]  = tVec_s[1]
    detectorParams[9]  = tVec_s[2]
    """
    npts = len(xyo_det)

    gFull[gFlag] = gFit

    xy_unwarped = dFunc(xyo_det[:, :2], dParams)

    rMat_d = xfcapi.makeDetectorRotMat(detectorParams[:3])
    tVec_d = detectorParams[3:6].reshape(3, 1)
    chi = detectorParams[6]
    tVec_s = detectorParams[7:10].reshape(3, 1)

    rMat_c = xfcapi.makeRotMatOfExpMap(gFull[:3])
    tVec_c = gFull[3:6].reshape(3, 1)
    vInv_s = gFull[6:]
    vMat_s = mutil.vecMVToSymm(vInv_s)  # NOTE: Inverse of V from F = V * R

    gVec_c = np.dot(bMat, hkls_idx)  # gVecs with magnitudes in CRYSTAL frame
    gVec_s = np.dot(vMat_s, np.dot(rMat_c,
                                   gVec_c))  # stretched gVecs in SAMPLE frame
    gHat_c = mutil.unitVector(np.dot(
        rMat_c.T, gVec_s))  # unit reciprocal lattice vectors in CRYSTAL frame

    match_omes, calc_omes = matchOmegas(xyo_det,
                                        hkls_idx,
                                        chi,
                                        rMat_c,
                                        bMat,
                                        wavelength,
                                        vInv=vInv_s,
                                        beamVec=bVec,
                                        etaVec=eVec,
                                        omePeriod=omePeriod)

    rMat_s = xfcapi.makeOscillRotMatArray(chi, calc_omes)
    calc_xy = xfcapi.gvecToDetectorXYArray(gHat_c.T,
                                           rMat_d,
                                           rMat_s,
                                           rMat_c,
                                           tVec_d,
                                           tVec_s,
                                           tVec_c,
                                           beamVec=bVec)

    if np.any(np.isnan(calc_xy)):
        print "infeasible pFull"

    # return values
    if simOnly:
        retval = np.hstack([calc_xy, calc_omes.reshape(npts, 1)])
    else:
        diff_vecs_xy = calc_xy - xy_unwarped[:, :2]
        diff_ome = xf.angularDifference(calc_omes, xyo_det[:, 2])
        retval = np.hstack([diff_vecs_xy, diff_ome.reshape(npts, 1)]).flatten()
        if return_value_flag == 1:
            retval = sum(abs(retval))
        elif return_value_flag == 2:
            denom = npts - len(gFit) - 1.
            if denom != 0:
                nu_fac = 1. / denom
            else:
                nu_fac = 1.
            retval = nu_fac * sum(retval**2 / abs(
                np.hstack([calc_xy, calc_omes.reshape(npts, 1)]).flatten()))
    return retval
예제 #3
0
def _grand_loop_precomp(image_stack, all_angles, test_crds, experiment, controller):
    """grand loop precomputing the grown image stack"""
    subprocess = 'dilate image_stack'

    dilation_shape = np.ones((2*experiment.row_dilation + 1,
                              2*experiment.col_dilation + 1),
                             dtype=np.uint8)
    image_stack_dilated = np.empty_like(image_stack)
    n_images = len(image_stack)
    controller.start(subprocess, n_images)
    for i_image in range(n_images):
        ski_dilation(image_stack[i_image], dilation_shape, out=image_stack_dilated[i_image])
        controller.update(i_image+1)
    controller.finish(subprocess)

    n_grains = experiment.n_grains
    n_coords = controller.limit('coords', len(test_crds))
    _project = xrdutil._project_on_detector_plane
    chunk_size = controller.get_chunk_size()
    ncpus = controller.get_process_count()

    # precompute per-grain stuff
    subprocess = 'precompute gVec_cs'
    controller.start(subprocess, len(all_angles))
    gvec_cs_precomp = []
    for i, angs in enumerate(all_angles):
        rMat_ss = xfcapi.makeOscillRotMatArray(experiment.chi, angs[:,2])
        gvec_cs = _anglesToGVec(angs, rMat_ss, experiment.rMat_c[i])
        gvec_cs_precomp.append((gvec_cs, rMat_ss))
    controller.finish(subprocess)

    # split on coords
    chunks = xrange(0, n_coords, chunk_size)
    subprocess = 'grand_loop'
    controller.start(subprocess, n_coords)
    finished = 0
    ncpus = min(ncpus, len(chunks))

    if ncpus > 1:
        shared_arr = multiprocessing.Array('d', n_grains * n_coords)
        confidence = np.ctypeslib.as_array(shared_arr.get_obj()).reshape(n_grains, n_coords)
        with multiproc_state(chunk_size, confidence, image_stack_dilated, all_angles,
                             gvec_cs_precomp, test_crds, experiment):
            pool = multiprocessing.Pool(ncpus)
            for count in pool.imap_unordered(multiproc_inner_loop, chunks):
                finished += count
                controller.update(finished)
            del pool
    else:
        confidence = np.empty((n_grains, n_coords))
        for chunk_start in chunks:
            chunk_stop = min(n_coords, chunk_start+chunk_size)
            count =_grand_loop_inner(confidence, image_stack_dilated,
                                     all_angles, gvec_cs_precomp, test_crds,
                                     experiment, start=chunk_start,
                                     stop=chunk_stop)
            finished += count
            controller.update(finished)

    controller.finish(subprocess)
    controller.handle_result("confidence", confidence)
예제 #4
0
def _grand_loop_precomp(image_stack, all_angles, test_crds, experiment,
                        controller):
    """grand loop precomputing the grown image stack"""
    subprocess = 'dilate image_stack'

    dilation_shape = np.ones(
        (2 * experiment.row_dilation + 1, 2 * experiment.col_dilation + 1),
        dtype=np.uint8)
    image_stack_dilated = np.empty_like(image_stack)
    n_images = len(image_stack)
    controller.start(subprocess, n_images)
    for i_image in range(n_images):
        ski_dilation(image_stack[i_image],
                     dilation_shape,
                     out=image_stack_dilated[i_image])
        controller.update(i_image + 1)
    controller.finish(subprocess)

    n_grains = experiment.n_grains
    n_coords = controller.limit('coords', len(test_crds))
    _project = xrdutil._project_on_detector_plane
    chunk_size = controller.get_chunk_size()
    ncpus = controller.get_process_count()

    # precompute per-grain stuff
    subprocess = 'precompute gVec_cs'
    controller.start(subprocess, len(all_angles))
    gvec_cs_precomp = []
    for i, angs in enumerate(all_angles):
        rMat_ss = xfcapi.makeOscillRotMatArray(experiment.chi, angs[:, 2])
        gvec_cs = _anglesToGVec(angs, rMat_ss, experiment.rMat_c[i])
        gvec_cs_precomp.append((gvec_cs, rMat_ss))
    controller.finish(subprocess)

    # split on coords
    chunks = xrange(0, n_coords, chunk_size)
    subprocess = 'grand_loop'
    controller.start(subprocess, n_coords)
    finished = 0
    ncpus = min(ncpus, len(chunks))

    if ncpus > 1:
        shared_arr = multiprocessing.Array('d', n_grains * n_coords)
        confidence = np.ctypeslib.as_array(shared_arr.get_obj()).reshape(
            n_grains, n_coords)
        with multiproc_state(chunk_size, confidence, image_stack_dilated,
                             all_angles, gvec_cs_precomp, test_crds,
                             experiment):
            pool = multiprocessing.Pool(ncpus)
            for count in pool.imap_unordered(multiproc_inner_loop, chunks):
                finished += count
                controller.update(finished)
            del pool
    else:
        confidence = np.empty((n_grains, n_coords))
        for chunk_start in chunks:
            chunk_stop = min(n_coords, chunk_start + chunk_size)
            count = _grand_loop_inner(confidence,
                                      image_stack_dilated,
                                      all_angles,
                                      gvec_cs_precomp,
                                      test_crds,
                                      experiment,
                                      start=chunk_start,
                                      stop=chunk_stop)
            finished += count
            controller.update(finished)

    controller.finish(subprocess)
    controller.handle_result("confidence", confidence)
예제 #5
0
def objFuncFitGrain(gFit, gFull, gFlag,
                    detectorParams,
                    xyo_det, hkls_idx, bMat, wavelength,
                    bVec, eVec,
                    dFunc, dParams,
                    omePeriod,
                    simOnly=False, return_value_flag=return_value_flag):
    """
    gFull[0]  = expMap_c[0]
    gFull[1]  = expMap_c[1]
    gFull[2]  = expMap_c[2]
    gFull[3]  = tVec_c[0]
    gFull[4]  = tVec_c[1]
    gFull[5]  = tVec_c[2]
    gFull[6]  = vInv_MV[0]
    gFull[7]  = vInv_MV[1]
    gFull[8]  = vInv_MV[2]
    gFull[9]  = vInv_MV[3]
    gFull[10] = vInv_MV[4]
    gFull[11] = vInv_MV[5]

    detectorParams[0]  = tiltAngles[0]
    detectorParams[1]  = tiltAngles[1]
    detectorParams[2]  = tiltAngles[2]
    detectorParams[3]  = tVec_d[0]
    detectorParams[4]  = tVec_d[1]
    detectorParams[5]  = tVec_d[2]
    detectorParams[6]  = chi
    detectorParams[7]  = tVec_s[0]
    detectorParams[8]  = tVec_s[1]
    detectorParams[9]  = tVec_s[2]
    """
    npts   = len(xyo_det)

    gFull[gFlag] = gFit

    xy_unwarped = dFunc(xyo_det[:, :2], dParams)

    rMat_d = xfcapi.makeDetectorRotMat(detectorParams[:3])
    tVec_d = detectorParams[3:6].reshape(3, 1)
    chi    = detectorParams[6]
    tVec_s = detectorParams[7:10].reshape(3, 1)

    rMat_c = xfcapi.makeRotMatOfExpMap(gFull[:3])
    tVec_c = gFull[3:6].reshape(3, 1)
    vInv_s = gFull[6:]
    vMat_s = mutil.vecMVToSymm(vInv_s)              # NOTE: Inverse of V from F = V * R

    gVec_c = np.dot(bMat, hkls_idx)                 # gVecs with magnitudes in CRYSTAL frame
    gVec_s = np.dot(vMat_s, np.dot(rMat_c, gVec_c)) # stretched gVecs in SAMPLE frame
    gHat_c = mutil.unitVector(
        np.dot(rMat_c.T, gVec_s)) # unit reciprocal lattice vectors in CRYSTAL frame

    match_omes, calc_omes = matchOmegas(xyo_det, hkls_idx, chi, rMat_c, bMat, wavelength,
                                        vInv=vInv_s, beamVec=bVec, etaVec=eVec,
                                        omePeriod=omePeriod)

    rMat_s = xfcapi.makeOscillRotMatArray(chi, calc_omes)
    calc_xy = xfcapi.gvecToDetectorXYArray(gHat_c.T,
                                           rMat_d, rMat_s, rMat_c,
                                           tVec_d, tVec_s, tVec_c,
                                           beamVec=bVec)

    if np.any(np.isnan(calc_xy)):
        print "infeasible pFull"

    # return values
    if simOnly:
        retval = np.hstack([calc_xy, calc_omes.reshape(npts, 1)])
    else:
        diff_vecs_xy = calc_xy - xy_unwarped[:, :2]
        diff_ome     = xf.angularDifference( calc_omes, xyo_det[:, 2] )
        retval = np.hstack([diff_vecs_xy,
                            diff_ome.reshape(npts, 1)
                            ]).flatten()
        if return_value_flag == 1:
            retval = sum( abs(retval) )
        elif return_value_flag == 2:
            denom = npts - len(gFit) - 1.
            if denom != 0:
                nu_fac = 1. / denom
            else:
                nu_fac = 1.
            retval = nu_fac * sum(retval**2 / abs(np.hstack([calc_xy, calc_omes.reshape(npts, 1)]).flatten()))
    return retval
예제 #6
0
def test_orientations(image_stack, experiment, controller):
    """grand loop precomputing the grown image stack

    image-stack -- is the image stack to be tested against.

    experiment  -- A bunch of experiment related parameters.

    controller  -- An external object implementing the hooks to notify progress
                   as well as figuring out what to do with results.
    """

    # extract some information needed =========================================
    # number of grains, number of coords (maybe limited by call), projection
    # function to use, chunk size to use if multiprocessing and the number
    # of cpus.
    n_grains = experiment.n_grains
    chunk_size = controller.get_chunk_size()
    ncpus = controller.get_process_count()

    # generate angles =========================================================
    # all_angles will be a list containing arrays for the different angles to
    # use, one entry per grain.
    #
    # Note that the angle generation is driven by the exp_maps in the experiment
    all_angles = evaluate_diffraction_angles(experiment, controller)

    # generate coords =========================================================
    # The grid of coords to use to test
    test_crds = generate_test_grid(-0.25, 0.25, 101)
    n_coords = controller.limit('coords', len(test_crds))

    # first, perform image dilation ===========================================
    # perform image dilation (using scikit_image dilation)
    subprocess = 'dilate image_stack'
    dilation_shape = np.ones((2*experiment.row_dilation + 1,
                              2*experiment.col_dilation + 1),
                             dtype=np.uint8)
    image_stack_dilated = np.empty_like(image_stack)
    dilated = np.empty((image_stack.shape[-2], image_stack.shape[-1]<<3),
                       dtype=np.bool)
    n_images = len(image_stack)
    controller.start(subprocess, n_images)
    for i_image in range(n_images):
        to_dilate = np.unpackbits(image_stack[i_image], axis=-1)
        ski_dilation(to_dilate, dilation_shape,
                     out=dilated)
        image_stack_dilated[i_image] = np.packbits(dilated, axis=-1)
        controller.update(i_image+1)
    controller.finish(subprocess)

    # precompute per-grain stuff ==============================================
    # gVec_cs and rmat_ss can be precomputed, do so.
    subprocess = 'precompute gVec_cs'
    controller.start(subprocess, len(all_angles))
    precomp = []
    for i, angs in enumerate(all_angles):
        rmat_ss = xfcapi.makeOscillRotMatArray(experiment.chi, angs[:,2])
        gvec_cs = _anglesToGVec(angs, rmat_ss, experiment.rMat_c[i])
        precomp.append((gvec_cs, rmat_ss))
    controller.finish(subprocess)

    # grand loop ==============================================================
    # The near field simulation 'grand loop'. Where the bulk of computing is
    # performed. We are looking for a confidence matrix that has a n_grains
    chunks = xrange(0, n_coords, chunk_size)
    subprocess = 'grand_loop'
    controller.start(subprocess, n_coords)
    finished = 0
    ncpus = min(ncpus, len(chunks))

    logging.info('Checking confidence for %d coords, %d grains.',
                 n_coords, n_grains)
    confidence = np.empty((n_grains, n_coords))
    if ncpus > 1:
        global _multiprocessing_start_method
        logging.info('Running multiprocess %d processes (%s)',
                     ncpus, _multiprocessing_start_method)
        with grand_loop_pool(ncpus=ncpus, state=(chunk_size,
                                                 image_stack_dilated,
                                                 all_angles, precomp, test_crds,
                                                 experiment)) as pool:
            for rslice, rvalues in pool.imap_unordered(multiproc_inner_loop,
                                                       chunks):
                count = rvalues.shape[1]
                confidence[:, rslice] = rvalues
                finished += count
                controller.update(finished)
    else:
        logging.info('Running in a single process')
        for chunk_start in chunks:
            chunk_stop = min(n_coords, chunk_start+chunk_size)
            rslice, rvalues = _grand_loop_inner(image_stack_dilated, all_angles,
                                                precomp, test_crds, experiment,
                                                start=chunk_start,
                                                stop=chunk_stop)
            count = rvalues.shape[1]
            confidence[:, rslice] = rvalues
            finished += count
            controller.update(finished)

    controller.finish(subprocess)
    controller.handle_result("confidence", confidence)
예제 #7
0
파일: nfutil.py 프로젝트: rachelelim/hexrd
def test_orientations(image_stack, experiment, test_crds, controller,
                      multiprocessing_start_method):
    """grand loop precomputing the grown image stack

    image-stack -- is the image stack to be tested against.

    experiment  -- A bunch of experiment related parameters.

    controller  -- An external object implementing the hooks to notify progress
                   as well as figuring out what to do with results.
    """

    # extract some information needed =========================================
    # number of grains, number of coords (maybe limited by call), projection
    # function to use, chunk size to use if multiprocessing and the number
    # of cpus.
    n_grains = experiment.n_grains
    chunk_size = controller.get_chunk_size()
    ncpus = controller.get_process_count()

    # generate angles =========================================================
    # all_angles will be a list containing arrays for the different angles to
    # use, one entry per grain.
    #
    # Note that the angle generation is driven by the exp_maps in the experiment
    all_angles = evaluate_diffraction_angles(experiment, controller)

    # generate coords =========================================================
    # The grid of coords to use to test
    #test_crds = generate_test_grid(-0.25, 0.25, 101)
    n_coords = controller.limit('coords', len(test_crds))
    #
    #    # first, perform image dilation ===========================================
    #    # perform image dilation (using scikit_image dilation)
    #    subprocess = 'dilate image_stack'
    #    dilation_shape = np.ones((2*experiment.row_dilation + 1,
    #                              2*experiment.col_dilation + 1),
    #                             dtype=np.uint8)
    #    image_stack_dilated = np.empty_like(image_stack)
    #    dilated = np.empty((image_stack.shape[-2], image_stack.shape[-1]<<3),
    #                       dtype=np.bool)
    #    n_images = len(image_stack)
    #    controller.start(subprocess, n_images)
    #    for i_image in range(n_images):
    #        to_dilate = np.unpackbits(image_stack[i_image], axis=-1)
    #        ski_dilation(to_dilate, dilation_shape,
    #                     out=dilated)
    #        image_stack_dilated[i_image] = np.packbits(dilated, axis=-1)
    #        controller.update(i_image+1)
    #    controller.finish(subprocess)

    # precompute per-grain stuff ==============================================
    # gVec_cs and rmat_ss can be precomputed, do so.
    subprocess = 'precompute gVec_cs'
    controller.start(subprocess, len(all_angles))
    precomp = []
    for i, angs in enumerate(all_angles):
        rmat_ss = xfcapi.makeOscillRotMatArray(experiment.chi, angs[:, 2])
        gvec_cs = _anglesToGVec(angs, rmat_ss, experiment.rMat_c[i])
        precomp.append((gvec_cs, rmat_ss))
    controller.finish(subprocess)

    # grand loop ==============================================================
    # The near field simulation 'grand loop'. Where the bulk of computing is
    # performed. We are looking for a confidence matrix that has a n_grains
    chunks = xrange(0, n_coords, chunk_size)
    subprocess = 'grand_loop'
    controller.start(subprocess, n_coords)
    finished = 0
    ncpus = min(ncpus, len(chunks))

    logging.info('Checking confidence for %d coords, %d grains.', n_coords,
                 n_grains)
    confidence = np.empty((n_grains, n_coords))
    if ncpus > 1:
        global _multiprocessing_start_method
        _multiprocessing_start_method = multiprocessing_start_method
        logging.info('Running multiprocess %d processes (%s)', ncpus,
                     _multiprocessing_start_method)
        with grand_loop_pool(ncpus=ncpus,
                             state=(chunk_size, image_stack, all_angles,
                                    precomp, test_crds, experiment)) as pool:
            for rslice, rvalues in pool.imap_unordered(multiproc_inner_loop,
                                                       chunks):
                count = rvalues.shape[1]
                confidence[:, rslice] = rvalues
                finished += count
                controller.update(finished)
        del _multiprocessing_start_method

        pool.close()
    else:
        logging.info('Running in a single process')
        for chunk_start in chunks:
            chunk_stop = min(n_coords, chunk_start + chunk_size)
            rslice, rvalues = _grand_loop_inner(image_stack,
                                                all_angles,
                                                precomp,
                                                test_crds,
                                                experiment,
                                                start=chunk_start,
                                                stop=chunk_stop)
            count = rvalues.shape[1]
            confidence[:, rslice] = rvalues
            finished += count
            controller.update(finished)

    controller.finish(subprocess)
    controller.handle_result("confidence", confidence)
    #del _multiprocessing_start_method

    #pool.close()

    return confidence
예제 #8
0
def objFuncFitGrain(gFit, gFull, gFlag,
                    instrument,
                    reflections_dict,
                    bMat, wavelength,
                    omePeriod,
                    simOnly=False,
                    return_value_flag=return_value_flag):
    """
    gFull[0]  = expMap_c[0]
    gFull[1]  = expMap_c[1]
    gFull[2]  = expMap_c[2]
    gFull[3]  = tVec_c[0]
    gFull[4]  = tVec_c[1]
    gFull[5]  = tVec_c[2]
    gFull[6]  = vInv_MV[0]
    gFull[7]  = vInv_MV[1]
    gFull[8]  = vInv_MV[2]
    gFull[9]  = vInv_MV[3]
    gFull[10] = vInv_MV[4]
    gFull[11] = vInv_MV[5]

    OLD CALL
    objFuncFitGrain(gFit, gFull, gFlag,
                    detectorParams,
                    xyo_det, hkls_idx, bMat, wavelength,
                    bVec, eVec,
                    dFunc, dParams,
                    omePeriod,
                    simOnly=False, return_value_flag=return_value_flag)
    """

    bVec = instrument.beam_vector
    eVec = instrument.eta_vector

    # fill out parameters
    gFull[gFlag] = gFit

    # map parameters to functional arrays
    rMat_c = xfcapi.makeRotMatOfExpMap(gFull[:3])
    tVec_c = gFull[3:6].reshape(3, 1)
    vInv_s = gFull[6:]
    vMat_s = mutil.vecMVToSymm(vInv_s)  # NOTE: Inverse of V from F = V * R

    # loop over instrument panels
    # CAVEAT: keeping track of key ordering in the "detectors" attribute of
    # instrument here because I am not sure if instatiating them using
    # dict.fromkeys() preserves the same order if using iteration...
    # <JVB 2017-10-31>
    calc_omes_dict = dict.fromkeys(instrument.detectors, [])
    calc_xy_dict = dict.fromkeys(instrument.detectors)
    meas_xyo_all = []
    det_keys_ordered = []
    for det_key, panel in instrument.detectors.iteritems():
        det_keys_ordered.append(det_key)

        rMat_d, tVec_d, chi, tVec_s = extract_detector_transformation(
            instrument.detector_parameters[det_key])

        results = reflections_dict[det_key]
        if len(results) == 0:
            continue

        """
        extract data from results list fields:
          refl_id, gvec_id, hkl, sum_int, max_int, pred_ang, meas_ang, meas_xy

        or array from spots tables:
          0:5    ID    PID    H    K    L       
          5:7    sum(int)    max(int)      
          7:10   pred tth    pred eta    pred ome                 
          10:13  meas tth    meas eta    meas ome                 
          13:15  pred X    pred Y                   
          15:17  meas X    meas Y                 
        """
        if isinstance(results, list):
            # WARNING: hkls and derived vectors below must be columnwise;
            # strictly necessary??? change affected APIs instead?
            # <JVB 2017-03-26>
            hkls = np.atleast_2d(
                np.vstack([x[2] for x in results])
            ).T
            
            meas_xyo = np.atleast_2d(
                np.vstack([np.r_[x[7], x[6][-1]] for x in results])
            )
        elif isinstance(results, np.ndarray):
            hkls = np.atleast_2d(results[:, 2:5]).T
            meas_xyo = np.atleast_2d(results[:, [15, 16, 12]])

        # FIXME: distortion handling must change to class-based
        if panel.distortion is not None:
            meas_omes = meas_xyo[:, 2]
            xy_unwarped = panel.distortion[0](
                    meas_xyo[:, :2], panel.distortion[1])
            meas_xyo = np.vstack([xy_unwarped.T, meas_omes]).T
            pass

        # append to meas_omes
        meas_xyo_all.append(meas_xyo)

        # G-vectors:
        #   1. calculate full g-vector components in CRYSTAL frame from B
        #   2. rotate into SAMPLE frame and apply stretch
        #   3. rotate back into CRYSTAL frame and normalize to unit magnitude
        # IDEA: make a function for this sequence of operations with option for
        # choosing ouput frame (i.e. CRYSTAL vs SAMPLE vs LAB)
        gVec_c = np.dot(bMat, hkls)
        gVec_s = np.dot(vMat_s, np.dot(rMat_c, gVec_c))
        gHat_c = mutil.unitVector(np.dot(rMat_c.T, gVec_s))

        # !!!: check that this operates on UNWARPED xy
        match_omes, calc_omes = matchOmegas(
            meas_xyo, hkls, chi, rMat_c, bMat, wavelength,
            vInv=vInv_s, beamVec=bVec, etaVec=eVec,
            omePeriod=omePeriod)

        # append to omes dict
        calc_omes_dict[det_key] = calc_omes

        # TODO: try Numba implementations
        rMat_s = xfcapi.makeOscillRotMatArray(chi, calc_omes)
        calc_xy = xfcapi.gvecToDetectorXYArray(gHat_c.T,
                                               rMat_d, rMat_s, rMat_c,
                                               tVec_d, tVec_s, tVec_c,
                                               beamVec=bVec)

        # append to xy dict
        calc_xy_dict[det_key] = calc_xy
        pass

    # stack results to concatenated arrays
    calc_omes_all = np.hstack([calc_omes_dict[k] for k in det_keys_ordered])
    tmp = []
    for k in det_keys_ordered:
        if calc_xy_dict[k] is not None:
            tmp.append(calc_xy_dict[k])
    calc_xy_all = np.vstack(tmp)
    meas_xyo_all = np.vstack(meas_xyo_all)

    npts = len(meas_xyo_all)
    if np.any(np.isnan(calc_xy)):
        raise RuntimeError(
            "infeasible pFull: may want to scale" +
            "back finite difference step size")

    # return values
    if simOnly:
        # return simulated values
        if return_value_flag in [None, 1]:
            retval = np.hstack([calc_xy_all, calc_omes_all.reshape(npts, 1)])
        else:
            rd = dict.fromkeys(det_keys_ordered)
            for det_key in det_keys_ordered:
                rd[det_key] = {'calc_xy': calc_xy_dict[det_key],
                               'calc_omes': calc_omes_dict[det_key]}
            retval = rd
    else:
        # return residual vector
        # IDEA: try angles instead of xys?
        diff_vecs_xy = calc_xy_all - meas_xyo_all[:, :2]
        diff_ome = xf.angularDifference(calc_omes_all, meas_xyo_all[:, 2])
        retval = np.hstack([diff_vecs_xy,
                            diff_ome.reshape(npts, 1)
                            ]).flatten()
        if return_value_flag == 1:
            # return scalar sum of squared residuals
            retval = sum(abs(retval))
        elif return_value_flag == 2:
            # return DOF-normalized chisq
            # TODO: check this calculation
            denom = 3*npts - len(gFit) - 1.
            if denom != 0:
                nu_fac = 1. / denom
            else:
                nu_fac = 1.
            retval = nu_fac * sum(retval**2)
    return retval
예제 #9
0
def check_indexing_plots(cfg_filename,
                         plot_trials=False,
                         plot_from_grains=False):
    cfg = config.open(cfg_filename)[0]  # use first block, like indexing

    working_dir = cfg.working_dir
    analysis_dir = os.path.join(working_dir, cfg.analysis_name)

    #instrument parameters
    icfg = get_instrument_parameters(cfg)
    chi = icfg['oscillation_stage']['chi']

    # load maps that were used
    oem = cPickle.load(open(cfg.find_orientations.orientation_maps.file, 'r'))
    nmaps = len(oem.dataStore)
    omeEdges = np.degrees(oem.omeEdges)
    nome = len(omeEdges) - 1
    etaEdges = np.degrees(oem.etaEdges)
    neta = len(etaEdges) - 1
    delta_ome = abs(omeEdges[1] - omeEdges[0])

    full_ome_range = xf.angularDifference(omeEdges[0], omeEdges[-1]) == 0
    full_eta_range = xf.angularDifference(etaEdges[0], etaEdges[-1]) == 0

    # grab plane data and figure out IDs of map HKLS
    pd = oem.planeData
    gvids = [
        pd.hklDataList[i]['hklID']
        for i in np.where(pd.exclusions == False)[0].tolist()
    ]

    # load orientations
    quats = np.atleast_2d(
        np.loadtxt(os.path.join(working_dir, 'accepted_orientations.dat')))
    if plot_trials:
        scored_trials = np.load(
            os.path.join(working_dir, 'scored_orientations.dat'))
        quats = scored_orientations[:4, scored_orientations[
            -1, :] >= cfg.find_orientations.clustering.completeness]
        pass
    expMaps = np.tile(2. * np.arccos(quats[:, 0]),
                      (3, 1)) * unitVector(quats[:, 1:].T)

    ##########################################
    #      SPECIAL CASE FOR FIT GRAINS       #
    ##########################################
    if plot_from_grains:
        distortion = (GE_41RT, icfg['detector']['distortion']['parameters'])
        #
        grain_table = np.atleast_2d(
            np.loadtxt(os.path.join(analysis_dir, 'grains.out')))
        ngrains = len(grain_table)
        #
        expMaps = grain_table[:, 3:6]
        tVec_c = grain_table[:, 6:9]
        vInv = grain_table[:, 6:12]
        #
        rMat_d = xf.makeDetectorRotMat(
            icfg['detector']['transform']['tilt_angles'])
        tVec_d = np.vstack(icfg['detector']['transform']['t_vec_d'])
        #
        chi = icfg['oscillation_stage']['chi']
        tVec_s = np.vstack(icfg['oscillation_stage']['t_vec_s'])
        #
        oes = np.zeros(oem.dataStore.shape)
        for i_grn in range(ngrains):
            spots_table = np.loadtxt(
                os.path.join(analysis_dir, 'spots_%05d.out' % i_grn))
            idx_m = spots_table[:, 0] >= 0
            for i_map in range(nmaps):
                idx_g = spots_table[:, 1] == gvids[i_map]
                idx = np.logical_and(idx_m, idx_g)
                nrefl = sum(idx)

                omes_fit = xf.mapAngle(spots_table[idx, 9],
                                       np.radians(
                                           cfg.find_orientations.omega.period),
                                       units='radians')
                xy_det = spots_table[idx, -3:]
                xy_det[:, 2] = np.zeros(nrefl)

                rMat_s_array = xfcapi.makeOscillRotMatArray(chi, omes_fit)

                # form in-plane vectors for detector points list in DETECTOR FRAME
                P2_d = xy_det.T

                # in LAB FRAME
                P2_l = np.dot(rMat_d, P2_d) + tVec_d  # point on detector
                P0_l = np.hstack([
                    tVec_s +
                    np.dot(rMat_s_array[j], tVec_c[i_grn, :].reshape(3, 1))
                    for j in range(nrefl)
                ])  # origin of CRYSTAL FRAME

                # diffraction unit vector components in LAB FRAME
                dHat_l = unitVector(P2_l - P0_l)
                P2_l = np.dot(rMat_d, xy_det.T) + tVec_d

                # angles for reference frame
                dHat_ref_l = unitVector(P2_l)

                # append etas and omes
                etas_fit = np.arctan2(dHat_ref_l[1, :],
                                      dHat_ref_l[0, :]).flatten()

                # find indices, then truncate or wrap
                i_ome = cellIndices(oem.omeEdges, omes_fit)
                if full_ome_range:
                    i_ome[i_ome < 0] = np.mod(i_ome, nome) + 1
                    i_ome[i_ome >= nome] = np.mod(i_ome, nome)
                else:
                    incl = np.logical_or(i_ome >= 0, i_ome < nome)
                    i_ome = i_ome[incl]
                j_eta = cellIndices(oem.etaEdges, etas_fit)
                if full_eta_range:
                    j_eta[j_eta < 0] = np.mod(j_eta, neta) + 1
                    j_eta[j_eta >= neta] = np.mod(j_eta, neta)
                else:
                    incl = np.logical_or(j_eta >= 0, j_eta < neta)
                    j_eta = j_eta[incl]

                #if np.max(i_ome) >= nome or np.min(i_ome) < 0 or np.max(j_eta) >= neta or np.min(j_eta) < 0:
                #    import pdb; pdb.set_trace()
                # add to map
                oes[i_map][i_ome, j_eta] = 1
            pass
        pass

    # simulate quaternion points
    if not plot_from_grains:
        oes = simulateOmeEtaMaps(omeEdges,
                                 etaEdges,
                                 pd,
                                 expMaps,
                                 chi=chi,
                                 etaTol=0.01,
                                 omeTol=0.01,
                                 etaRanges=None,
                                 omeRanges=None,
                                 bVec=xf.bVec_ref,
                                 eVec=xf.eta_ref,
                                 vInv=xf.vInv_ref)

    # tick labling
    omes = np.degrees(oem.omeEdges)
    etas = np.degrees(oem.etaEdges)
    num_ticks = 7
    xmin = np.amin(etas)
    xmax = np.amax(etas)
    dx = (xmax - xmin) / (num_ticks - 1.)
    dx1 = (len(etas) - 1) / (num_ticks - 1.)
    xtlab = ["%.0f" % (xmin + i * dx) for i in range(num_ticks)]
    xtloc = np.array([i * dx1 for i in range(num_ticks)]) - 0.5
    ymin = np.amin(omes)
    ymax = np.amax(omes)
    dy = (ymax - ymin) / (num_ticks - 1.)
    dy1 = (len(omes) - 1) / (num_ticks - 1.)
    ytlab = ["%.0f" % (ymin + i * dy) for i in range(num_ticks)]
    ytloc = np.array([i * dy1 for i in range(num_ticks)]) - 0.5

    # Plot the three kernel density estimates
    n_maps = len(oem.iHKLList)

    fig_list = [plt.figure(num=i + 1) for i in range(n_maps)]
    ax_list = [fig_list[i].gca() for i in range(n_maps)]
    for i_map in range(n_maps):
        y, x = np.where(oes[i_map] > 0)
        ax_list[i_map].hold(True)
        ax_list[i_map].imshow(oem.dataStore[i_map] > 0.1, cmap=cm.bone)
        ax_list[i_map].set_title(r'Map for $\{%d %d %d\}$' %
                                 tuple(pd.hkls[:, i_map]))
        ax_list[i_map].set_xlabel(
            r'Azimuth channel, $\eta$; $\Delta\eta=%.3f$' % delta_ome)
        ax_list[i_map].set_ylabel(
            r'Rotation channel, $\omega$; $\Delta\omega=%.3f$' % delta_ome)
        ax_list[i_map].plot(x, y, 'c+')
        ax_list[i_map].xaxis.set_ticks(xtloc)
        ax_list[i_map].xaxis.set_ticklabels(xtlab)
        ax_list[i_map].yaxis.set_ticks(ytloc)
        ax_list[i_map].yaxis.set_ticklabels(ytlab)
        ax_list[i_map].axis('tight')
    plt.show()
    return fig_list, oes
예제 #10
0
def check_indexing_plots(cfg_filename, plot_trials=False, plot_from_grains=False):
    cfg = config.open(cfg_filename)[0]    # use first block, like indexing

    working_dir = cfg.working_dir
    analysis_dir = os.path.join(working_dir, cfg.analysis_name)
    
    #instrument parameters
    icfg = get_instrument_parameters(cfg)
    chi = icfg['oscillation_stage']['chi']

    # load maps that were used
    oem = cPickle.load(
        open(cfg.find_orientations.orientation_maps.file, 'r')
        )
    nmaps = len(oem.dataStore)
    omeEdges = np.degrees(oem.omeEdges); nome = len(omeEdges) - 1
    etaEdges = np.degrees(oem.etaEdges); neta = len(etaEdges) - 1
    delta_ome = abs(omeEdges[1]-omeEdges[0])

    full_ome_range = xf.angularDifference(omeEdges[0], omeEdges[-1]) == 0
    full_eta_range = xf.angularDifference(etaEdges[0], etaEdges[-1]) == 0
    
    # grab plane data and figure out IDs of map HKLS
    pd = oem.planeData
    gvids = [pd.hklDataList[i]['hklID'] for i in np.where(pd.exclusions == False)[0].tolist()]

    # load orientations
    quats = np.atleast_2d(np.loadtxt(os.path.join(working_dir, 'accepted_orientations.dat')))
    if plot_trials:
        scored_trials = np.load(os.path.join(working_dir, 'scored_orientations.dat'))
        quats = scored_orientations[:4, scored_orientations[-1, :] >= cfg.find_orientations.clustering.completeness]
        pass
    expMaps = np.tile(2. * np.arccos(quats[:, 0]), (3, 1))*unitVector(quats[:, 1:].T)

    ##########################################
    #      SPECIAL CASE FOR FIT GRAINS       #
    ##########################################
    if plot_from_grains:
        distortion = (GE_41RT, icfg['detector']['distortion']['parameters'])
        #
        grain_table = np.atleast_2d(np.loadtxt(os.path.join(analysis_dir, 'grains.out')))
        ngrains = len(grain_table)
        #
        expMaps = grain_table[:, 3:6]
        tVec_c = grain_table[:, 6:9]
        vInv = grain_table[:, 6:12]
        #
        rMat_d = xf.makeDetectorRotMat(icfg['detector']['transform']['tilt_angles'])
        tVec_d = np.vstack(icfg['detector']['transform']['t_vec_d'])
        #
        chi = icfg['oscillation_stage']['chi']
        tVec_s = np.vstack(icfg['oscillation_stage']['t_vec_s'])
        #
        oes = np.zeros(oem.dataStore.shape)
        for i_grn in range(ngrains):
            spots_table = np.loadtxt(os.path.join(analysis_dir, 'spots_%05d.out' %i_grn))
            idx_m = spots_table[:, 0] >= 0
            for i_map in range(nmaps):
                idx_g = spots_table[:, 1] == gvids[i_map]
                idx = np.logical_and(idx_m, idx_g)
                nrefl = sum(idx)
                
                omes_fit = xf.mapAngle(spots_table[idx, 9], np.radians(cfg.find_orientations.omega.period), units='radians')
                xy_det = spots_table[idx, -3:]
                xy_det[:, 2] = np.zeros(nrefl)
                
                rMat_s_array = xfcapi.makeOscillRotMatArray(chi, omes_fit)
                
                # form in-plane vectors for detector points list in DETECTOR FRAME
                P2_d = xy_det.T
                
                # in LAB FRAME
                P2_l = np.dot(rMat_d, P2_d) + tVec_d # point on detector
                P0_l = np.hstack(
                    [tVec_s + np.dot(rMat_s_array[j], tVec_c[i_grn, :].reshape(3, 1)) for j in range(nrefl)]
                ) # origin of CRYSTAL FRAME

                # diffraction unit vector components in LAB FRAME
                dHat_l = unitVector(P2_l - P0_l)
                P2_l = np.dot(rMat_d, xy_det.T) + tVec_d
                
                # angles for reference frame
                dHat_ref_l = unitVector(P2_l)
    
                # append etas and omes
                etas_fit = np.arctan2(dHat_ref_l[1, :], dHat_ref_l[0, :]).flatten()
           
                # find indices, then truncate or wrap
                i_ome = cellIndices(oem.omeEdges, omes_fit)
                if full_ome_range:
                    i_ome[i_ome < 0] = np.mod(i_ome, nome) + 1
                    i_ome[i_ome >= nome] = np.mod(i_ome, nome)
                else:
                    incl = np.logical_or(i_ome >= 0, i_ome < nome)
                    i_ome = i_ome[incl]
                j_eta = cellIndices(oem.etaEdges, etas_fit)
                if full_eta_range:
                    j_eta[j_eta < 0] = np.mod(j_eta, neta) + 1
                    j_eta[j_eta >= neta] = np.mod(j_eta, neta)
                else:
                    incl = np.logical_or(j_eta >= 0, j_eta < neta)
                    j_eta = j_eta[incl]

                #if np.max(i_ome) >= nome or np.min(i_ome) < 0 or np.max(j_eta) >= neta or np.min(j_eta) < 0:
                #    import pdb; pdb.set_trace()
                # add to map
                oes[i_map][i_ome, j_eta] = 1
            pass
        pass
    
    # simulate quaternion points
    if not plot_from_grains:
        oes = simulateOmeEtaMaps(omeEdges, etaEdges, pd,
                                 expMaps,
                                 chi=chi,
                                 etaTol=0.01, omeTol=0.01,
                                 etaRanges=None, omeRanges=None,
                                 bVec=xf.bVec_ref, eVec=xf.eta_ref, vInv=xf.vInv_ref)
    
    # tick labling
    omes = np.degrees(oem.omeEdges)
    etas = np.degrees(oem.etaEdges)
    num_ticks = 7
    xmin = np.amin(etas); xmax = np.amax(etas)
    dx = (xmax - xmin) / (num_ticks - 1.); dx1 = (len(etas) - 1) / (num_ticks - 1.)
    xtlab = ["%.0f" % (xmin + i*dx) for i in range(num_ticks)]
    xtloc = np.array([i*dx1 for i in range(num_ticks)]) - 0.5
    ymin = np.amin(omes); ymax = np.amax(omes)
    dy = (ymax - ymin) / (num_ticks - 1.); dy1 = (len(omes) - 1) / (num_ticks - 1.)
    ytlab = ["%.0f" % (ymin + i*dy) for i in range(num_ticks)]
    ytloc = np.array([i*dy1 for i in range(num_ticks)]) - 0.5
    
    # Plot the three kernel density estimates
    n_maps = len(oem.iHKLList)
    
    fig_list =[plt.figure(num=i+1) for i in range(n_maps)]
    ax_list = [fig_list[i].gca() for i in range(n_maps)]
    for i_map in range(n_maps):
        y, x = np.where(oes[i_map] > 0)
        ax_list[i_map].hold(True)
        ax_list[i_map].imshow(oem.dataStore[i_map] > 0.1, cmap=cm.bone)
        ax_list[i_map].set_title(r'Map for $\{%d %d %d\}$' %tuple(pd.hkls[:, i_map]))
        ax_list[i_map].set_xlabel(r'Azimuth channel, $\eta$; $\Delta\eta=%.3f$' %delta_ome)
        ax_list[i_map].set_ylabel(r'Rotation channel, $\omega$; $\Delta\omega=%.3f$' %delta_ome)
        ax_list[i_map].plot(x, y, 'c+')
        ax_list[i_map].xaxis.set_ticks(xtloc)
        ax_list[i_map].xaxis.set_ticklabels(xtlab)
        ax_list[i_map].yaxis.set_ticks(ytloc)
        ax_list[i_map].yaxis.set_ticklabels(ytlab)
        ax_list[i_map].axis('tight')
    plt.show()
    return fig_list, oes
예제 #11
0
파일: fitting.py 프로젝트: rachelelim/hexrd
def objFuncFitGrain(gFit,
                    gFull,
                    gFlag,
                    instrument,
                    reflections_dict,
                    bMat,
                    wavelength,
                    omePeriod,
                    simOnly=False,
                    return_value_flag=return_value_flag):
    """
    gFull[0]  = expMap_c[0]
    gFull[1]  = expMap_c[1]
    gFull[2]  = expMap_c[2]
    gFull[3]  = tVec_c[0]
    gFull[4]  = tVec_c[1]
    gFull[5]  = tVec_c[2]
    gFull[6]  = vInv_MV[0]
    gFull[7]  = vInv_MV[1]
    gFull[8]  = vInv_MV[2]
    gFull[9]  = vInv_MV[3]
    gFull[10] = vInv_MV[4]
    gFull[11] = vInv_MV[5]

    OLD CALL
    objFuncFitGrain(gFit, gFull, gFlag,
                    detectorParams,
                    xyo_det, hkls_idx, bMat, wavelength,
                    bVec, eVec,
                    dFunc, dParams,
                    omePeriod,
                    simOnly=False, return_value_flag=return_value_flag)
    """

    bVec = instrument.beam_vector
    eVec = instrument.eta_vector

    # fill out parameters
    gFull[gFlag] = gFit

    # map parameters to functional arrays
    rMat_c = xfcapi.makeRotMatOfExpMap(gFull[:3])
    tVec_c = gFull[3:6].reshape(3, 1)
    vInv_s = gFull[6:]
    vMat_s = mutil.vecMVToSymm(vInv_s)  # NOTE: Inverse of V from F = V * R

    # loop over instrument panels
    # CAVEAT: keeping track of key ordering in the "detectors" attribute of
    # instrument here because I am not sure if instatiating them using
    # dict.fromkeys() preserves the same order if using iteration...
    # <JVB 2017-10-31>
    calc_omes_dict = dict.fromkeys(instrument.detectors, [])
    calc_xy_dict = dict.fromkeys(instrument.detectors)
    meas_xyo_all = []
    det_keys_ordered = []
    for det_key, panel in instrument.detectors.iteritems():
        det_keys_ordered.append(det_key)

        # extract transformation quantities
        rMat_d = instrument.detectors[det_key].rmat
        tVec_d = instrument.detectors[det_key].tvec
        chi = instrument.chi
        tVec_s = instrument.tvec

        results = reflections_dict[det_key]
        if len(results) == 0:
            continue
        """
        extract data from results list
        fields:
          refl_id, gvec_id, hkl, sum_int, max_int, pred_ang, meas_ang, meas_xy
        """

        # WARNING: hkls and derived vectors below must be columnwise;
        # strictly necessary??? change affected APIs instead?
        # <JVB 2017-03-26>
        hkls = np.atleast_2d(np.vstack([x[2] for x in results])).T

        meas_xyo = np.atleast_2d(
            np.vstack([np.r_[x[7], x[6][-1]] for x in results]))

        # FIXME: distortion handling must change to class-based
        if panel.distortion is not None:
            meas_omes = meas_xyo[:, 2]
            xy_unwarped = panel.distortion[0](meas_xyo[:, :2],
                                              panel.distortion[1])
            meas_xyo = np.vstack([xy_unwarped.T, meas_omes]).T
            pass

        # append to meas_omes
        meas_xyo_all.append(meas_xyo)

        # G-vectors:
        #   1. calculate full g-vector components in CRYSTAL frame from B
        #   2. rotate into SAMPLE frame and apply stretch
        #   3. rotate back into CRYSTAL frame and normalize to unit magnitude
        # IDEA: make a function for this sequence of operations with option for
        # choosing ouput frame (i.e. CRYSTAL vs SAMPLE vs LAB)
        gVec_c = np.dot(bMat, hkls)
        gVec_s = np.dot(vMat_s, np.dot(rMat_c, gVec_c))
        gHat_c = mutil.unitVector(np.dot(rMat_c.T, gVec_s))

        # !!!: check that this operates on UNWARPED xy
        match_omes, calc_omes = matchOmegas(meas_xyo,
                                            hkls,
                                            chi,
                                            rMat_c,
                                            bMat,
                                            wavelength,
                                            vInv=vInv_s,
                                            beamVec=bVec,
                                            etaVec=eVec,
                                            omePeriod=omePeriod)

        # append to omes dict
        calc_omes_dict[det_key] = calc_omes

        # TODO: try Numba implementations
        rMat_s = xfcapi.makeOscillRotMatArray(chi, calc_omes)
        calc_xy = xfcapi.gvecToDetectorXYArray(gHat_c.T,
                                               rMat_d,
                                               rMat_s,
                                               rMat_c,
                                               tVec_d,
                                               tVec_s,
                                               tVec_c,
                                               beamVec=bVec)

        # append to xy dict
        calc_xy_dict[det_key] = calc_xy
        pass

    # stack results to concatenated arrays
    calc_omes_all = np.hstack([calc_omes_dict[k] for k in det_keys_ordered])
    tmp = []
    for k in det_keys_ordered:
        if calc_xy_dict[k] is not None:
            tmp.append(calc_xy_dict[k])
    calc_xy_all = np.vstack(tmp)
    meas_xyo_all = np.vstack(meas_xyo_all)

    npts = len(meas_xyo_all)
    if np.any(np.isnan(calc_xy)):
        raise RuntimeError("infeasible pFull: may want to scale" +
                           "back finite difference step size")

    # return values
    if simOnly:
        # return simulated values
        if return_value_flag in [None, 1]:
            retval = np.hstack([calc_xy_all, calc_omes_all.reshape(npts, 1)])
        else:
            rd = dict.fromkeys(det_keys_ordered)
            for det_key in det_keys_ordered:
                rd[det_key] = {
                    'calc_xy': calc_xy_dict[det_key],
                    'calc_omes': calc_omes_dict[det_key]
                }
            retval = rd
    else:
        # return residual vector
        # IDEA: try angles instead of xys?
        diff_vecs_xy = calc_xy_all - meas_xyo_all[:, :2]
        diff_ome = xf.angularDifference(calc_omes_all, meas_xyo_all[:, 2])
        retval = np.hstack([diff_vecs_xy, diff_ome.reshape(npts, 1)]).flatten()
        if return_value_flag == 1:
            # return scalar sum of squared residuals
            retval = sum(abs(retval))
        elif return_value_flag == 2:
            # return DOF-normalized chisq
            # TODO: check this calculation
            denom = 3 * npts - len(gFit) - 1.
            if denom != 0:
                nu_fac = 1. / denom
            else:
                nu_fac = 1.
            retval = nu_fac * sum(retval**2)
    return retval