Beispiel #1
0
def test_query_bt_info(start, composition_key, wavelength_key, expect):
    config = mod.query_bt_info(start,
                               composition_key=composition_key,
                               wavelength_key=wavelength_key,
                               default_composition="Ni")
    pdfconfig = PDFConfig(**config)
    assert (pdfconfig.composition, pdfconfig.wavelength) == expect
Beispiel #2
0
def process_data(data: tp.Dict[str, np.ndarray], memory: tp.Dict[str,
                                                                 np.ndarray],
                 lsq_comps: tp.List[str], composition: str,
                 config: LSQConfig) -> tp.Dict[str, np.ndarray]:
    """Process the data from the event."""
    # the interpolated target y data
    y = get_interp_data(data, config)
    # the component matrix where each row is a component data
    x = np.stack([memory[k] for k in lsq_comps], axis=0)
    # the model to decompose to a weight and the component matrix.
    model = Model(y, x, config.fit_config.p, xgrid=config.interp_config.x)
    opt.least_squares(model.cost_func, x0=model.x0)
    # transfer the data to PDF
    pdfconfig = PDFConfig(**config.trans_config, composition=composition)
    pdfgetter = PDFGetter(pdfconfig)
    pdfgetter(model.xgrid, model.yres)
    return {
        "y": model.y,
        "x": model.x,
        "w": model.w,
        "xgrid": model.xgrid,
        "yres": model.yres,
        "r": pdfgetter.gr[0],
        "g": pdfgetter.gr[1]
    }
Beispiel #3
0
def process(
    *,
    raw_img: np.ndarray,
    ai: AzimuthalIntegrator,
    dk_img: np.ndarray = None,
    dk_sub_bg_img: np.ndarray = None,
    integ_setting: dict = None,
    mask_setting: dict = None,
    pdfgetx_setting: dict = None,
) -> dict:
    """The function to process the data from event."""
    data = dict()
    # dark subtraction
    if dk_img is None:
        dk_img = np.zeros_like(raw_img)
    dk_sub_img = np.subtract(raw_img, dk_img)
    data.update({"dk_sub_image": dk_sub_img})
    # background subtraction
    if dk_sub_bg_img is None:
        dk_sub_bg_img = np.zeros_like(dk_sub_img)
    bg_sub_img = np.subtract(dk_sub_img, dk_sub_bg_img)
    data.update({"bg_sub_image": bg_sub_img})
    # auto masking
    mask, _ = integ.auto_mask(bg_sub_img, ai, mask_setting=mask_setting)
    data.update({"mask": mask})
    # integration
    x, y = ai.integrate1d(bg_sub_img, mask=mask, **integ_setting)
    chi_max_ind = np.argmax(y)
    data.update({
        "chi_Q": x,
        "chi_I": y,
        "chi_max": y[chi_max_ind],
        "chi_argmax": x[chi_max_ind]
    })
    # transformation
    pdfconfig = PDFConfig(dataformat="QA", **pdfgetx_setting)
    pdfgetter = PDFGetter(pdfconfig)
    pdfgetter(x, y)
    iq, sq, fq, gr = pdfgetter.iq, pdfgetter.sq, pdfgetter.fq, pdfgetter.gr
    gr_max_ind = np.argmax(gr[1])
    data.update({
        "iq_Q": iq[0],
        "iq_I": iq[1],
        "sq_Q": sq[0],
        "sq_S": sq[1],
        "fq_Q": fq[0],
        "fq_F": fq[1],
        "gr_r": gr[0],
        "gr_G": gr[1],
        "gr_max": gr[1][gr_max_ind],
        "gr_argmax": gr[0][gr_max_ind]
    })
    return data
Beispiel #4
0
from pkg_resources import resource_filename
from pyobjcryst import loadCrystal
from pyobjcryst.molecule import Molecule

from pdfstream.io import load_img, load_data

NI_PONI = resource_filename('tests', 'test_data/Ni_poni_file.poni')
NI_GR = resource_filename('tests', 'test_data/Ni_gr_file.gr')
NI_CHI = resource_filename('tests', 'test_data/Ni_chi_file.chi')
NI_FGR = resource_filename('tests', 'test_data/Ni_fgr_file.fgr')
NI_IMG = resource_filename('tests', 'test_data/Ni_img_file.tiff')
NI_CIF = resource_filename('tests', 'test_data/Ni_cif_file.cif')
KAPTON_IMG = resource_filename('tests', 'test_data/Kapton_img_file.tiff')
BLACK_IMG = resource_filename('tests', 'test_data/black_img.tiff')
WHITE_IMG = resource_filename('tests', 'test_data/white_img.tiff')
NI_CONFIG = PDFConfig()
NI_CONFIG.readConfig(NI_GR)
NI_PDFGETTER = PDFGetter(NI_CONFIG)
ZRP_CIF = resource_filename('tests', 'test_data/ZrP.cif')
NI_CRYSTAL = loadCrystal(NI_CIF)
ZRP_CRYSTAL = loadCrystal(ZRP_CIF)
NI_DIFFPY = loadStructure(NI_CIF)

DB = {
    'Ni_img_file': NI_IMG,
    'Ni_img': load_img(NI_IMG),
    'Kapton_img_file': KAPTON_IMG,
    'Kapton_img': load_img(KAPTON_IMG),
    'Ni_poni_file': NI_PONI,
    'Ni_gr_file': NI_GR,
    'Ni_chi_file': NI_CHI,
Beispiel #5
0
NI_PONI_FILE = resource_filename('tests', 'test_data/Ni_poni_file.poni')
NI_GR_FILE = resource_filename('tests', 'test_data/Ni_gr_file.gr')
NI_CHI_FILE = resource_filename('tests', 'test_data/Ni_chi_file.chi')
NI_FGR_FILE = resource_filename('tests', 'test_data/Ni_fgr_file.fgr')
NI_IMG_FILE = resource_filename('tests', 'test_data/Ni_img_file.tiff')
MASK_FILE = resource_filename("tests", "test_data/mask_file.npy")
KAPTON_IMG_FILE = resource_filename('tests', 'test_data/Kapton_img_file.tiff')
BLACK_IMG_FILE = resource_filename('tests', 'test_data/black_img.tiff')
WHITE_IMG_FILE = resource_filename('tests', 'test_data/white_img.tiff')
NI_IMG = load_img(NI_IMG_FILE)
NI_FRAMES = numpy.expand_dims(NI_IMG, 0)
KAPTON_IMG = load_img(KAPTON_IMG_FILE)
NI_GR = load_array(NI_GR_FILE)
NI_CHI = load_array(NI_CHI_FILE)
NI_FGR = load_array(NI_FGR_FILE)
NI_CONFIG = PDFConfig()
NI_CONFIG.readConfig(NI_GR_FILE)
NI_PDFGETTER = PDFGetter(NI_CONFIG)
AI = pyFAI.load(NI_PONI_FILE)
MASK = numpy.load(MASK_FILE)
BLACK_IMG = load_img(BLACK_IMG_FILE)
WHITE_IMG = load_img(WHITE_IMG_FILE)
START_DOC_FILE = resource_filename('tests', 'test_data/start.json')
with Path(START_DOC_FILE).open("r") as f:
    START_DOC = json.load(f)

DB = {
    'Ni_img_file': NI_IMG_FILE,
    'Ni_img': NI_IMG,
    'Kapton_img_file': KAPTON_IMG_FILE,
    'Kapton_img': KAPTON_IMG,
Beispiel #6
0
def make_default_pdfgetter() -> PDFGetter:
    """
    Create a PDFgetter with default setting for Ni.
    """
    config = PDFConfig()
    config.composition = "Ni"
    config.dataformat = "QA"
    config.qmin = 0.
    config.qmax = 24.
    config.qmaxinst = 25.
    config.rmin = 0.
    config.rmax = 60.
    config.rstep = .01
    config.rpoly = 0.9

    pdfgetter = PDFGetter(config)

    return pdfgetter
Beispiel #7
0
def process(
    *,
    raw_img: np.ndarray,
    ai: tp.Union[None, AzimuthalIntegrator],
    user_mask: np.ndarray = None,
    auto_mask: bool = True,
    dk_img: np.ndarray = None,
    integ_setting: dict = None,
    mask_setting: dict = None,
    pdfgetx_setting: dict = None,
) -> dict:
    """The function to process the data from event."""
    # initialize the data dictionary
    data = {
        "dk_sub_image": raw_img,
        "mask": np.zeros_like(raw_img),
        "chi_Q": np.array([0.]),
        "chi_I": np.array([0.]),
        "chi_max": np.float(0.),
        "chi_argmax": np.float(0.),
        "iq_Q": np.array([0.]),
        "iq_I": np.array([0.]),
        "sq_Q": np.array([0.]),
        "sq_S": np.array([0.]),
        "fq_Q": np.array([0.]),
        "fq_F": np.array([0.]),
        "gr_r": np.array([0.]),
        "gr_G": np.array([0.]),
        "gr_max": np.float(0.),
        "gr_argmax": np.float(0.)
    }
    # dark subtraction
    if dk_img is not None:
        data["dk_sub_image"] = np.subtract(raw_img, dk_img)
    # if no calibration, output data now
    if ai is None:
        return data
    # do auto masking if specified
    if auto_mask:
        data["mask"], _ = integ.auto_mask(data["dk_sub_image"],
                                          ai,
                                          mask_setting=mask_setting,
                                          user_mask=user_mask)
    elif user_mask is not None:
        data["mask"] = user_mask
    # integration
    x, y = ai.integrate1d(data["dk_sub_image"],
                          mask=data["mask"],
                          **integ_setting)
    chi_max_ind = np.argmax(y)
    data.update({
        "chi_Q": x,
        "chi_I": y,
        "chi_max": y[chi_max_ind],
        "chi_argmax": x[chi_max_ind]
    })
    # transformation
    if not _PDFGETX_AVAILABLE:
        io.server_message(
            "diffpy.pdfgetx is not installed. No use [0.] for all the relevant data."
        )
        return data
    pdfconfig = PDFConfig(**pdfgetx_setting)
    pdfgetter = PDFGetter(pdfconfig)
    pdfgetter(x, y)
    iq, sq, fq, gr = pdfgetter.iq, pdfgetter.sq, pdfgetter.fq, pdfgetter.gr
    gr_max_ind = np.argmax(gr[1])
    data.update({
        "iq_Q": iq[0],
        "iq_I": iq[1],
        "sq_Q": sq[0],
        "sq_S": sq[1],
        "fq_Q": fq[0],
        "fq_F": fq[1],
        "gr_r": gr[0],
        "gr_G": gr[1],
        "gr_max": gr[1][gr_max_ind],
        "gr_argmax": gr[0][gr_max_ind]
    })
    return data
Beispiel #8
0
def calib_pipe(ai: AzimuthalIntegrator,
               img: ndarray,
               pdfconfig: PDFConfig,
               stru: Crystal,
               fit_range: FIT_RANGE,
               qdamp0: float,
               qbroad0: float,
               bg_img: ndarray = None,
               bg_scale: float = None,
               mask_setting: dict = None,
               integ_setting: dict = None,
               img_setting: dict = None,
               chi_plot_setting: dict = None,
               pdf_plot_setting: dict = None,
               ncpu: int = None) -> tp.Tuple[PDFGetter, MyRecipe]:
    """Pipeline-style qdamp, qbroad calibration.

    A pipeline to do image background subtraction, auto masking, integration, PDF transformation and PDF
    modeling to calibrate the qdamp and qbroad. Also, the accuracy of the calibration is tested by the modeling.

    Parameters
    ----------
    ai : AzimuthalIntegrator
        The AzimuthalIntegrator.

    img : ndarray
        The of the 2D array of the image.

    pdfconfig : PDFConfig
        This class stores all configuration data needed for generating PDF. See diffpy.pdfgetx.PDFConfig.

    stru : Crystal
        The structure of calibration material.

    fit_range : tuple
        The rmin, rmax and rstep in the unit of angstrom.

    qdamp0 : float
        The initial value for the Q damping factor.

    qbroad0 : float
        The initial vluae for the Q broadening factor.

    bg_img : ndarray
        The 2D array of the background image. If None, no background subtraction.

    bg_scale : float
        The scale for background subtraction. If None, use 1.

    mask_setting : dict
        The auto mask setting. See _AUTO_MASK_SETTING in pdfstream.tools.integration. If None,
        use _AUTOMASK_SETTING. To turn off the auto masking, use "OFF".

    integ_setting : dict
        The integration setting. See _INTEG_SETTING in pdfstream.tools.integration. If None, use _INTEG_SETTING.

    img_setting : dict
        The user's modification to imshow kwargs except a special key 'z_score'. If None, use use empty dict.
        To turn off the imshow, use "OFF".

    chi_plot_setting : dict
        The kwargs of chi data plotting. See matplotlib.pyplot.plot. If 'OFF', skip visualization.

    pdf_plot_setting : dict or 'OFF'
        The kwargs of pdf data plotting. See matplotlib.pyplot.plot. If 'OFF', skip visualization.

    ncpu : int
        The number of cpu used in parallel computing. If None, no parallel computing.

    Returns
    -------
    pdfgetter : PDFGetter
        The object with processed data, including iq, sq, fq, gr.

    recipe : MyRecipe
        The refined recipe of the fitting.
    """
    chi = get_chi(ai,
                  img,
                  bg_img=bg_img,
                  bg_scale=bg_scale,
                  mask_setting=mask_setting,
                  integ_setting=integ_setting,
                  img_setting=img_setting,
                  plot_setting=chi_plot_setting)[0]
    pdfconfig.update(rmin=fit_range[0], rmax=fit_range[1], rstep=fit_range[2])
    pdfgetter = get_pdf(pdfconfig, chi, plot_setting=pdf_plot_setting)
    data = MyParser()
    data.parsePDFGetter(pdfgetter, meta={'qdamp': qdamp0, 'qbroad': qbroad0})
    recipe = fit_calib(stru, data, fit_range, ncpu=ncpu)
    return pdfgetter, recipe
Beispiel #9
0
def process(
    *,
    user_config: UserConfig,
    raw_img: np.ndarray,
    ai: tp.Union[None, AzimuthalIntegrator],
    dk_img: np.ndarray = None,
    dk_sub_bg_img: np.ndarray = None,
    integ_setting: dict = None,
    mask_setting: dict = None,
    pdfgetx_setting: dict = None,
) -> dict:
    """The function to process the data from event."""
    # initialize the data dictionary
    data = {
        "dk_sub_image": raw_img.copy(),
        "bg_sub_image": raw_img.copy(),
        "mask": np.zeros_like(raw_img),
        "chi_Q": np.array([0.]),
        "chi_I": np.array([0.]),
        "chi_max": 0.,
        "chi_argmax": 0.,
        "iq_Q": np.array([0.]),
        "iq_I": np.array([0.]),
        "sq_Q": np.array([0.]),
        "sq_S": np.array([0.]),
        "fq_Q": np.array([0.]),
        "fq_F": np.array([0.]),
        "gr_r": np.array([0.]),
        "gr_G": np.array([0.]),
        "gr_max": 0.,
        "gr_argmax": 0.
    }
    # dark subtraction
    if dk_img is not None:
        data["dk_sub_image"] = np.subtract(raw_img, dk_img)
    # background subtraction
    if dk_sub_bg_img is not None:
        data["bg_sub_image"] = np.subtract(data["dk_sub_image"], dk_sub_bg_img)
    # if no calibration, output data now
    if ai is None:
        return data
    # do auto masking if specified
    if user_config.do_auto_masking:
        data["mask"], _ = integ.auto_mask(data["bg_sub_image"],
                                          ai,
                                          mask_setting=mask_setting,
                                          user_mask=user_config.user_mask)
    # if user gives a mask, use it
    elif user_config.user_mask is not None:
        data["mask"] = user_config.user_mask.copy()
    # integration
    x, y = ai.integrate1d(data["bg_sub_image"],
                          mask=data["mask"],
                          **integ_setting)
    chi_max_ind = np.argmax(y)
    data.update({
        "chi_Q": x,
        "chi_I": y,
        "chi_max": y[chi_max_ind],
        "chi_argmax": x[chi_max_ind]
    })
    # transformation
    pdfconfig = PDFConfig(dataformat="QA", **pdfgetx_setting)
    pdfgetter = PDFGetter(pdfconfig)
    pdfgetter(x, y)
    iq, sq, fq, gr = pdfgetter.iq, pdfgetter.sq, pdfgetter.fq, pdfgetter.gr
    gr_max_ind = np.argmax(gr[1])
    data.update({
        "iq_Q": iq[0],
        "iq_I": iq[1],
        "sq_Q": sq[0],
        "sq_S": sq[1],
        "fq_Q": fq[0],
        "fq_F": fq[1],
        "gr_r": gr[0],
        "gr_G": gr[1],
        "gr_max": gr[1][gr_max_ind],
        "gr_argmax": gr[0][gr_max_ind]
    })
    return data
Beispiel #10
0
def load_pdfconfig(cfg_file: str) -> PDFConfig:
    """Load the PDFConfig from the processed data file or configuration file."""
    pdfconfig = PDFConfig()
    pdfconfig.readConfig(cfg_file)
    return pdfconfig
Beispiel #11
0
def make_pdfgetter(pdfconfig: PDFConfig, user_config: dict = None) -> PDFGetter:
    """Make the pdfgetter."""
    if user_config is not None:
        pdfconfig.update(**user_config)
    pdfgetter = PDFGetter(pdfconfig)
    return pdfgetter
Beispiel #12
0
def run(args):
    FILE = args.INPUT
    FILE_NO_EXTENSION = FILE[:-6]
    SAVE_PATH = os.getcwd()
    print(SAVE_PATH)
    if args.OUTPUT:
        SAVE_PATH = args.OUTPUT
    else:
        print(
            '!!! Warning files will be saved in the current folder because no output was defined.'
        )

    REFERENCE_SLICE_NUMBER = 100

    ### Collecting data informations ###

    try:
        inputFile = h5py.File(FILE, 'r')
        print("File " + FILE + " loaded")
    except ValueError:
        raise

    ### Creating matrix from data ###

    rawData = np.array(inputFile['data/data'])
    rawTheta = np.array(inputFile['data/theta'])
    dataX = np.ndarray.flatten(np.array(inputFile['data/dataX']))
    theta = np.sort(rawTheta)
    sinogramData = np.zeros(np.shape(rawData))

    ### Corrections ###

    #Correcting unsorted 2theta acquisition
    argsortVal = np.argsort(rawTheta)
    if not np.array_equal(theta, rawTheta):
        sorting = 0
        while sorting < np.max(argsortVal) - 1:
            sinogramData[sorting, :, :] = rawData[argsortVal[sorting], :, :]
            progression("Sorting data................ ",
                        np.size(argsortVal) - 2, sorting)
            sorting = sorting + 1
    print()

    ##Deleting lines
    if args.DELETE:
        deleted_line = np.fromstring(args.DELETE, dtype=int, sep=',')
        for i in range(0, len(deleted_line)):
            sinogramData = np.delete(sinogramData, deleted_line[i], axis=0)
            theta = np.delete(theta, deleted_line[i], axis=0)
            progression("Deleting lines.............. ", len(deleted_line), i)
        print()

    ### Removing outlier pixels from data ###
    if args.OUTLIERS:
        for i in range(0, np.size(sinogramData, 2)):
            sinogramData[:, :, i] = findOutlierPixels(sinogramData[:, :, i],
                                                      tolerance=10,
                                                      worry_about_edges=False)
            progression("Correcting wrong pixels..... ",
                        np.size(sinogramData, 2), i)
        print()

    ### Subtract air from raw data ###
    if args.AIR:
        dataAir = np.genfromtxt(args.AIR, dtype=float)
        FILE_NO_EXTENSION = FILE_NO_EXTENSION + '_SUBAIR'
        for i in range(0, np.size(sinogramData, 0)):
            currentAir = dataAir[:, 1] * (
                0.85 * np.average(sinogramData[i, :, 50]) / dataAir[50, 1])
            for j in range(0, np.size(sinogramData, 1)):
                sinogramData[i, j, :] = sinogramData[i, j, :] - currentAir
            progression("Substacting air............. ",
                        np.size(sinogramData, 0), i)
        print()

    ### Correcting thermal/beam drifts ###
    if args.CORRECT:
        thresholdedSinogramData = np.copy(sinogramData[:, :,
                                                       REFERENCE_SLICE_NUMBER])
        thresholdedSinogramData[sinogramData[:, :,
                                             REFERENCE_SLICE_NUMBER] < 40] = 0
        thresholdedSinogramData[sinogramData[:, :,
                                             REFERENCE_SLICE_NUMBER] >= 40] = 1
        thresholdedSinogramData = binary_fill_holes(thresholdedSinogramData)
        thresholdedSinogramData = imageFilterBigPart(thresholdedSinogramData)
        CoMThresh = centerOfMass(thresholdedSinogramData *
                                 sinogramData[:, :, REFERENCE_SLICE_NUMBER],
                                 axis=1)
        for i in range(0, np.size(sinogramData, 2)):
            sinogramData[:, :, i] = fixDrift(sinogramData[:, :, i], CoMThresh)
            progression("Correcting drifts........... ",
                        np.size(sinogramData, 2), i)
        print()

    ### Subtract extra pattern from raw data ###
    if args.EXTRA:
        dataExtra = np.genfromtxt(args.EXTRA, dtype=float)
        FILE_NO_EXTENSION = FILE_NO_EXTENSION + '_SUBPAP'
        for i in range(0, np.size(sinogramData, 0)):
            for j in range(0, np.size(sinogramData, 1)):
                currentExtra = dataExtra[:, 1] * (sinogramData[i, j, 100] /
                                                  dataExtra[100, 1])
                sinogramData[i, j, :] = sinogramData[i, j, :] - currentExtra
            progression("Substacting extra........... ",
                        np.size(sinogramData, 0), i)
        print()

    ### Extract PDF signal ###
    sinogramDataPdf = np.copy(sinogramData)
    if args.PDF:
        cfg = PDFConfig()
        cfg.readConfig(args.PDF)
        pdfget = PDFGetter()
        pdfget.configure(cfg)
        sinogramDataPdf = np.zeros(
            (np.size(sinogramData, 0), np.size(sinogramData, 1),
             round((cfg.rmax - cfg.rmin) / cfg.rstep) + 1))
        for i in range(0, np.size(sinogramDataPdf, 0)):
            for j in range(0, np.size(sinogramDataPdf, 1)):
                currentPdfDataY = sinogramData[i, j, :]
                pdfget.getTransformation('gr')
                pdfget(dataX, currentPdfDataY)
                pdfResults = pdfget.results
                pdfResults = pdfResults[8]
                sinogramDataPdf[i, j, :] = pdfResults[1]
            progression("Extracting PDF.............. ",
                        np.size(sinogramDataPdf, 0), i)
        for i in range(0, np.size(sinogramDataPdf, 2)):
            sinogramDataPdf[:, :, i] = np.average(sinogramData[:, :, :],
                                                  axis=2) * np.copy(
                                                      sinogramDataPdf[:, :, i])
        sinogramData = np.copy(sinogramDataPdf)
        FILE_NO_EXTENSION = FILE_NO_EXTENSION + '_PDF'
        print()

    ### Saving ###
    if (args.OVERWRITE == True
            or os.path.isfile(FILE_NO_EXTENSION + '_corrected.h5') == False):
        saveHdf5File(sinogramData,
                     SAVE_PATH,
                     FILE_NO_EXTENSION + '_corrected.h5',
                     mode='stack')
    else:
        print(
            '!!! Warning sinogram file exists, use command -R to overwrite it')

    ### Reconstruction ###
    if args.RECONSTRUCT:
        reconstructedData = np.zeros(
            (np.size(sinogramData, 1), np.size(sinogramData,
                                               1), np.size(sinogramData, 2)))
        for i in range(0, np.size(sinogramData, 2)):
            reconstructedData[:, :, i] = reconstruction(sinogramData[:, :, i],
                                                        theta,
                                                        output_size=np.size(
                                                            sinogramData, 1))
            progression("Reconstructing data......... ",
                        np.size(sinogramData, 2), i)
        print()

    if args.OVERWRITE and args.RECONSTRUCT:
        saveHdf5File(reconstructedData,
                     SAVE_PATH,
                     FILE_NO_EXTENSION + '_reconstructed_stack.h5',
                     mode='stack')
    else:
        print(
            '!!! Warning reconstruction file exists, use command -R to overwrite it'
        )