示例#1
0
文件: puncher.py 项目: k-eks/Burrow
def punch_Bragg(XDSPunchFilePath, sampleFrame, pathToFrames, frameNameTemplate, pathToPunched, punchedPrefix, maskedIntensity=MASKED_BRAGG_INTENSITY):
    """
    Masks all  Bragg peaks using XDS BGK files. For an unkown reason, when XDS's BKGPIX.cbf is substracted from a raw frame, then all positive intensities represent Bragg peaks.
    XDSPunchFilePath ... string location of the BKGPIX file create by XDS. NOTE: fabio can not read the that file by default, I used ALBULA to convert it to an h5 file!
    sampleFrame ... fabio.frame some frame from which the dimensions can be read.
    pathToFrames ... string location of the folder which contains the frames.
    frameNameTemplate ... string example of how the frames are named, allows for percent substitution.
    pathToPunched ... string location where the processed frames should be saved.
    punchedPrefix ... string prefix that should be put in front of the processed frames.
    maskedIntensity ... int placeholder intensity which identifies it as masked.
    """
    frameShape = sampleFrame.data.shape
    # generating the background data
    bgType = os.path.splitext(XDSPunchFilePath)[1]
    if bgType == ".h5":
        bg = xds_tools.bgFromH5(XDSPunchFilePath, frameShape)
    else:
        raise TypeError("Background file not supported!")

    helping_tools.check_folder(pathToPunched)
    frameset = cbf_tools.Frameset(pathToFrames, frameNameTemplate)

    # punching, the sorting function is used for a nice print out
    for fileName in frameset.generate_frame_names_from_template():
        print("Punching " + str(fileName), end="\r")
        frame = fabio.open(fileName)
        punched = frame.data - bg
        for x in range(frameShape[0]):
            for y in range(frameShape[1]):
                if punched[x,y] >= 0:
                    frame.data[x,y] = MASKED_BRAGG_INTENSITY
        frame.write(os.path.join(pathToPunched, punchedPrefix + os.path.basename(fileName)))
        del frame # freeing memory
    print("\nPunching complete!")
示例#2
0
def subtract_single_frame(pathToFrames, pathToSubtracted, namePrefix,
                          singleFrame, maskFrame):
    """Subtracts a single frame from a dataset without any further alterations such as a flux correction.
    pathToFrames ... string location of the folder which contains the frames
    pathToSubtracted ... string location where the processed frames should be saved
    namePrefix ... string short text that is added to each newly calculated frame
    singleFrame ... fabio.frame this frame will be substracted from the dataset
    maskFrame ... fabio.frame frame which contains all pixel that should be masked
    """
    helping_tools.check_folder(pathToSubtracted)
    singleData = singleFrame.data.copy()
    print("Reading masks, please wait!")
    maskUntrusted, maskDefective, maskHot = cbf_tools.generate_all_unwanted_pixel(
        maskFrame, 1000000)
    print("starting subtracting\n")
    frameset = cbf_tools.Frameset(pathToFrames)
    for fileName in frameset.generate_frame_names_from_template():
        frame = fabio.open(fileName)
        frame.data -= singleData.astype(
            np.int32)  # here is the actual frame subtraction
        frame.data = frame.data.round().astype(
            np.int32)  # make resonable counts
        frame.data = cbf_tools.restore_pixel_mask(frame, maskUntrusted,
                                                  maskDefective, maskHot)
        fileName = os.path.basename(
            fileName)  # preparing writing to new location
        frame.save(os.path.join(pathToSubtracted, namePrefix + fileName))
        print("Frame subtracted from %s" % fileName, end='\r')
        del frame  # cleaning up memory
    print("\nDone!")
示例#3
0
文件: filler.py 项目: k-eks/Burrow
def filler_gauss_fit(pathToFrames, pathToFilled, frameNameTemplate, filledPrefix):
    """
    Fills punched Bragg peaks by fitting a Gauss function (from astropy) into the hole.
    pathToFrames ... string location of the punched frames
    pathToFilled ... string location where the corrected frames should be placed
    frameNameTemplate ... string Example of how the frames are named, allows for percent substitution.
    filledPrefix ... string prefix that should be put in front of the processed frames.
    """
    print("\nWarning: One dilation step is performed on punched Braggs before filling!\n")

    helping_tools.check_folder(pathToFilled)
    frameset = cbf_tools.Frameset(pathToFrames, frameNameTemplate)

    kernel = astropy.convolution.Gaussian2DKernel(stddev=1) # creates a Gauss function for later fitting
    for imageFile in frameset.generate_frame_names_from_template():
        print("Filling " + str(imageFile), end="\r")
        frame = fabio.open(imageFile)
        # creating a mask to dilate the Bragg holes
        dilationMask = frame.data.copy()
        dilationMask[dilationMask > -99999] = 0
        dilationMask[dilationMask <= -99999] = 1
        dilationMask = scipy.ndimage.binary_dilation(dilationMask).astype(dilationMask.dtype)
        filledData = np.array(frame.data, dtype=np.float32)
        filledData[dilationMask == 1] = -9999999
        filledData[filledData < 0] = np.nan
        filledData = astropy.convolution.interpolate_replace_nans(filledData, kernel).astype(np.int32) # do the fitting and filling
        filledData[filledData < 0] = -9999999 # re-mask values like detector gaps
        frame.data = filledData.astype(np.int32)
        frame.write(pathToFilled + os.path.basename(filledPrefix + imageFile)) # writing out
示例#4
0
def update_masked_intensity(sampleFrame,
                            pathToFrames,
                            frameNameTemplate,
                            pathToUpdated,
                            updatedPrefix,
                            updateIntensity=-1):
    """
    Changes the already provided masked intisities from MASKED_BRAGG_INTENSITY with to a new intensity.
    sampleFrame ... fabio.frame some frame from which the dimensions can be read.
    pathToFrames ... string location of the folder which contains the frames.
    frameNameTemplate ... string example of how the frames are named, allows for percent substitution.
    pathToPunched ... string location where the processed frames should be saved.
    punchedPrefix ... string prefix that should be put in front of the processed frames.
    """
    frameShape = sampleFrame.data.shape

    helping_tools.check_folder(pathToUpdated)
    frameset = cbf_tools.Frameset(pathToFrames, frameNameTemplate)

    # punching, the sorting function is used for a nice print out
    for fileName in frameset.generate_frame_names_from_template():
        print("Updating " + str(fileName), end="\r")
        frame = fabio.open(fileName)
        data = np.array(frame.data)
        data[data <= MASKED_BRAGG_INTENSITY] = updateIntensity
        frame.data = data
        frame.write(
            os.path.join(pathToUpdated,
                         updatedPrefix + os.path.basename(fileName)))
        del frame  # freeing memory
    print("\nUpdating complete!")
示例#5
0
def SavGol_filter(pathToFrames, nameTemplate, frameRange, pathToFiltered, namePrefix, maskFrame, subsize, windowLength, polyOrder):
    """Subtracts a flux normalized frame from a dataset.
    pathToFrames ... string location of the folder which contains the frames
    pathToSubtracted ... string location where the processed frames should be saved
    namePrefix ... string short text that is added to each newly calculated frame
    single ... fabio.frame this frame will be substracted from the dataset
    maskFrame ... fabio.frame frame which contains all pixel that should be masked
    """
    helping_tools.check_folder(pathToFiltered)
    print("Reading masks, please wait!")
    # maskUntrusted, maskDefective, maskHot = cbf_tools.generate_all_unwanted_pixel(maskFrame, 1000000)
    print("starting filtering\n")
    # generating frame paths and names for reading
    frameset = cbf_tools.Frameset(pathToFrames, nameTemplate)
    frameset.setSize = frameRange
    fileNames = frameset.generate_frame_names_from_template()
    # generating frame paths and names for writing
    frameset = cbf_tools.Frameset(pathToFiltered, namePrefix + nameTemplate)
    frameset.setSize = frameRange
    newFiles = frameset.generate_frame_names_from_template()

    templateFrame = fabio.open(fileNames[0])
    # determination of how many tiles are necessary for the subdivition of the frames
    tilesx = int(templateFrame.data.shape[0] / subsize) + 1
    tilesy = int(templateFrame.data.shape[1] / subsize) + 1
    for subx in range(tilesx):
        for suby in range(tilesy):
            print("\nWorking on sub %i of %i" % ((suby * (subx + 1) + subx), tilesx * tilesy))
            # generation of the subframe size taking the border regions into account
            if (subx + 2) > tilesx:
                width = templateFrame.data.shape[0] - subx * subsize
            else:
                width = subsize
            if (suby + 2) > tilesy:
                height = templateFrame.data.shape[1] - suby * subsize
            else:
                height = subsize
            print("Width %i, height %i" % (width, height))
            subFrame = np.zeros((width, height, frameRange))
            for i in range(frameRange):
                print("Reading frame " + fileNames[i])#, end="\r")
                frame = fabio.open(fileNames[i])
                subFrame[:, : , i] = frame.data[subx * subsize : subx * subsize + width,
                                                suby * subsize : suby * subsize + height].copy()
                del frame # cleaning memory
            print("\nApplying SavGol filter...")
            for x in range(subFrame.shape[0]):
                for y in range(subFrame.shape[1]):
                    print(x, y, end="\r")
                    filterLine = subFrame[x, y, :]
                    subFrame[x, y, :] = scipy.signal.savgol_filter(filterLine, windowLength, polyOrder, mode='wrap').copy()
            subframe = subFrame.astype(np.int32)
            for i in range(frameRange):
                print("Writing frame " + newFiles[i])#, end="\r")
                frame = fabio.open(newFiles[i])
                frame.data[subx * subsize : subx * subsize + width, suby * subsize : suby * subsize + height] = subFrame[:, : , i]
                frame.save(os.path.join(pathToFiltered, namePrefix + os.path.basename(frame.filename)))
                del frame # cleaning memory
    print("\nDone!")
示例#6
0
def subtract_hybrid_background(pathToFrames, pathToSubtracted,
                               backgroundFrameNames, backgroundMixture, bgName,
                               maskFrame):
    """Generates a flux normalized background from multiple sources and subtracts it from the raw data.
    pathToFrames ... string location of the folder which contains the frames
    pathToSubtracted ... string location where the processed frames should be saved
    backgroundFrameNames ... array[string] names of the background files which should be used
    backgroundMixture ... array[float] contribution of each background frame for the final background image
    bgName ... string prefix which should be added to the modified frames
    maskFrame ... fabio.frame frame which contains all pixel that should be masked
    """
    bgFluxes = []
    bgData = []
    bgCount = len(backgroundFrameNames)
    for fileName in backgroundFrameNames:
        bgFluxes.append(get_flux_from_file_name(fileName))
        # grab the file extension, NOTE: extension includes the dot!!!!!!!!!!!
        fileType = os.path.splitext(fileName)[1]
        if fileType == '.h5':
            bgData.append(cbf_tools.h5_to_numpy(fileName, ()))
        elif fileType == '.cbf':
            bgData.append(fabio.open(fileName).data)

    helping_tools.check_folder(pathToSubtracted)
    print("Reading masks, please wait!")
    maskUntrusted, maskDefective, maskHot = cbf_tools.generate_all_unwanted_pixel(
        maskFrame, 1000000)
    print("starting subtracting\n")
    frameset = cbf_tools.Frameset(pathToFrames)
    for fileName in frameset.generate_frame_names_from_template():
        frame = fabio.open(fileName)
        frameFlux = cbf_tools.get_flux(frame)
        # mix the background frame
        bgAll = np.zeros(frame.data.shape)
        for i in range(bgCount):
            scale = frameFlux / bgFluxes[i]
            bgAll += bgData[i] * scale * backgroundMixture[i]
        frame.data -= bgAll  # here is the actual backround subtraction
        frame.data = frame.data.round().astype(
            np.int32)  # make resonable counts
        frame.data = cbf_tools.restore_pixel_mask(frame, maskUntrusted,
                                                  maskDefective, maskHot)
        fileName = os.path.basename(
            fileName)  # preparing writing to new location
        frame.save(os.path.join(pathToSubtracted, bgName + fileName))
        print("Background subtracted from %s" % fileName, end='\r')
        del frame  # cleaning up memory
    print("\nDone!")
示例#7
0
def subtract_hybrid_background(pathToFrames, pathToSubtracted, backgroundFrameNames,
                               backgroundMixture, bgName, maskFrame):
    """Generates a flux normalized background from multiple sources and subtracts it from the raw data.
    pathToFrames ... string location of the folder which contains the frames
    pathToSubtracted ... string location where the processed frames should be saved
    backgroundFrameNames ... array[string] names of the background files which should be used
    backgroundMixture ... array[float] contribution of each background frame for the final background image
    bgName ... string prefix which should be added to the modified frames
    maskFrame ... fabio.frame frame which contains all pixel that should be masked
    """
    bgFluxes = []
    bgData = []
    bgCount = len(backgroundFrameNames)
    for fileName in backgroundFrameNames:
        bgFluxes.append(get_flux_from_file_name(fileName))
        # grab the file extension, NOTE: extension includes the dot!!!!!!!!!!!
        fileType = os.path.splitext(fileName)[1]
        if fileType == '.h5':
            bgData.append(cbf_tools.h5_to_numpy(fileName, ()))
        elif fileType == '.cbf':
            bgData.append(fabio.open(fileName).data)

    helping_tools.check_folder(pathToSubtracted)
    print("Reading masks, please wait!")
    maskUntrusted, maskDefective, maskHot = cbf_tools.generate_all_unwanted_pixel(maskFrame, 1000000)
    print("starting subtracting\n")
    frameset = cbf_tools.Frameset(pathToFrames)
    for fileName in frameset.generate_frame_names_from_template():
        frame = fabio.open(fileName)
        frameFlux = cbf_tools.get_flux(frame)
        # mix the background frame
        bgAll = np.zeros(frame.data.shape)
        for i in range(bgCount):
            scale = frameFlux / bgFluxes[i]
            bgAll += bgData[i] * scale * backgroundMixture[i]
        frame.data -= bgAll # here is the actual backround subtraction
        frame.data = frame.data.round().astype(np.int32) # make resonable counts
        frame.data = cbf_tools.restore_pixel_mask(frame, maskUntrusted, maskDefective, maskHot)
        fileName = os.path.basename(fileName) # preparing writing to new location
        frame.save(os.path.join(pathToSubtracted, bgName + fileName))
        print("Background subtracted from %s" % fileName, end='\r')
        del frame # cleaning up memory
    print("\nDone!")
示例#8
0
def punch_Bragg(XDSPunchFilePath,
                sampleFrame,
                pathToFrames,
                frameNameTemplate,
                pathToPunched,
                punchedPrefix,
                maskedIntensity=MASKED_BRAGG_INTENSITY):
    """
    Masks all  Bragg peaks using XDS BGK files. For an unkown reason, when XDS's BKGPIX.cbf is substracted from a raw frame, then all positive intensities represent Bragg peaks.
    XDSPunchFilePath ... string location of the BKGPIX file create by XDS. NOTE: fabio can not read the that file by default, I used ALBULA to convert it to an h5 file!
    sampleFrame ... fabio.frame some frame from which the dimensions can be read.
    pathToFrames ... string location of the folder which contains the frames.
    frameNameTemplate ... string example of how the frames are named, allows for percent substitution.
    pathToPunched ... string location where the processed frames should be saved.
    punchedPrefix ... string prefix that should be put in front of the processed frames.
    maskedIntensity ... int placeholder intensity which identifies it as masked.
    """
    frameShape = sampleFrame.data.shape
    # generating the background data
    bgType = os.path.splitext(XDSPunchFilePath)[1]
    if bgType == ".h5":
        bg = xds_tools.bgFromH5(XDSPunchFilePath, frameShape)
    else:
        raise TypeError("Background file not supported!")

    helping_tools.check_folder(pathToPunched)
    frameset = cbf_tools.Frameset(pathToFrames, frameNameTemplate)

    # punching, the sorting function is used for a nice print out
    for fileName in frameset.generate_frame_names_from_template():
        print("Punching " + str(fileName), end="\r")
        frame = fabio.open(fileName)
        punched = frame.data - bg
        for x in range(frameShape[0]):
            for y in range(frameShape[1]):
                if punched[x, y] >= 0:
                    frame.data[x, y] = MASKED_BRAGG_INTENSITY
        frame.write(
            os.path.join(pathToPunched,
                         punchedPrefix + os.path.basename(fileName)))
        del frame  # freeing memory
    print("\nPunching complete!")
示例#9
0
文件: puncher.py 项目: k-eks/Burrow
def update_masked_intensity(sampleFrame, pathToFrames, frameNameTemplate, pathToUpdated, updatedPrefix, updateIntensity=-1):
    """
    Changes the already provided masked intisities from MASKED_BRAGG_INTENSITY with to a new intensity.
    sampleFrame ... fabio.frame some frame from which the dimensions can be read.
    pathToFrames ... string location of the folder which contains the frames.
    frameNameTemplate ... string example of how the frames are named, allows for percent substitution.
    pathToPunched ... string location where the processed frames should be saved.
    punchedPrefix ... string prefix that should be put in front of the processed frames.
    """
    frameShape = sampleFrame.data.shape

    helping_tools.check_folder(pathToUpdated)
    frameset = cbf_tools.Frameset(pathToFrames, frameNameTemplate)

    # punching, the sorting function is used for a nice print out
    for fileName in frameset.generate_frame_names_from_template():
        print("Updating " + str(fileName), end="\r")
        frame = fabio.open(fileName)
        data = np.array(frame.data)
        data[data <= MASKED_BRAGG_INTENSITY] = updateIntensity
        frame.data = data
        frame.write(os.path.join(pathToUpdated, updatedPrefix + os.path.basename(fileName)))
        del frame # freeing memory
    print("\nUpdating complete!")
示例#10
0
def subtract_single_frame(pathToFrames, pathToSubtracted, namePrefix, singleFrame, maskFrame):
    """Subtracts a single frame from a dataset without any further alterations such as a flux correction.
    pathToFrames ... string location of the folder which contains the frames
    pathToSubtracted ... string location where the processed frames should be saved
    namePrefix ... string short text that is added to each newly calculated frame
    singleFrame ... fabio.frame this frame will be substracted from the dataset
    maskFrame ... fabio.frame frame which contains all pixel that should be masked
    """
    helping_tools.check_folder(pathToSubtracted)
    singleData = singleFrame.data.copy()
    print("Reading masks, please wait!")
    maskUntrusted, maskDefective, maskHot = cbf_tools.generate_all_unwanted_pixel(maskFrame, 1000000)
    print("starting subtracting\n")
    frameset = cbf_tools.Frameset(pathToFrames)
    for fileName in frameset.generate_frame_names_from_template():
        frame = fabio.open(fileName)
        frame.data -= singleData.astype(np.int32) # here is the actual frame subtraction
        frame.data = frame.data.round().astype(np.int32) # make resonable counts
        frame.data = cbf_tools.restore_pixel_mask(frame, maskUntrusted, maskDefective, maskHot)
        fileName = os.path.basename(fileName) # preparing writing to new location
        frame.save(os.path.join(pathToSubtracted, namePrefix + fileName))
        print("Frame subtracted from %s" % fileName, end='\r')
        del frame # cleaning up memory
    print("\nDone!")
示例#11
0
def extend_yell_file(baseFileName,
                     splitLength=0,
                     offset=0,
                     outputFolder="./",
                     hardcodedSort=False,
                     modifiedParameters=None,
                     silentMode=False,
                     runNumber=0):
    """
    Splices additional yell statements into a base file and outputs a yell model.txt
    baseFileName ... str a standard yell file that is extended by the "input" command.
    splitLength ... int if larger than 0, multiple yell files are created in different
                        folders where each model contains up to splitLength number of
                        RefinableVariables (and the rest is put into the preamble).
                        All preamble items and RefinableVariables are effected by this!
                        Each model created that way contains different RefinableVariables
                        so that all RefinableVariables are accounted for and refined.
                        This is then a recursive method!
    offset ... int number of how many RefinableVariables and preamble items should before
                   be put into the preamble before splitLength number of items are
                   put into the RefinableVariables section
    hardcodedSort ... bool for practical reasons I had do modify the parameter list
                   but this will be only done if this flag is set to true
    outputFolder ... str folder where to put the resulting model.txt
    hardcodedSort ... bool indicates wether the section dedicated to sorting parameters should be used.
                      Warning: this section is hardcoded! Do not use this option unless you know what to do!
    silentMode ... bool If true, no status messages will be displayed.
    runNumber ... int number of times this algorithm has run, used for folder naming when splitting the file up.
    """
    # Preparing the lists which hold the values which are to be inserted into the yell file
    RefinableVariables = []
    MScatterers = []
    Variants = []
    Correlations = []
    Modes = []
    Other = [
    ]  # inserted after scale, if it contains variables, they will be refined when the model is split to multiple files
    Other.append("# Externally added preamble items\n")
    Static = []  # inserted after scale but is never modified
    Static.append("# Static entries\n")
    Print = []  # added at the end of the file

    # preparing to output file location and create a new folder if neccessary
    writePath = outputFolder
    pathExtension = ""
    if splitLength > 0:  # add an index to the folder if multiple files are created
        pathExtension = str(runNumber)
        writePath += pathExtension
    helping_tools.check_folder(
        writePath)  # create the folder, happens regardless of file splitting

    # starting to write the yell model file
    inputFiles = []
    dataPointer = None
    with open(os.path.join(writePath, "model.txt"), 'w') as modelFile:
        # scan for all input instructions and extract the file names
        with open(baseFileName, 'r') as yellexFile:
            for line in yellexFile.readlines():
                if line.strip().startswith("input"):
                    inputFiles.append(
                        line.strip().split(' ')
                        [1])  # gets the file name from the input instruction

        # read yell definition blocks from files into lists for later usage
        for file in inputFiles:
            dataPointer = None
            with open(file) as extensionFile:
                for line in extensionFile.readlines():
                    if "RefinableVariables" in line:
                        dataPointer = RefinableVariables
                    elif "Correlations" in line:
                        dataPointer = Correlations
                    elif "Modes" in line:
                        dataPointer = Modes
                    elif "Preamble" in line:
                        dataPointer = Other
                    elif "Static" in line:
                        dataPointer = Static
                    elif "FileEnd" in line:
                        dataPointer = Print
                    elif "UnitCell" in line:
                        dataPointer = Variants
                    if is_useable_input(line):
                        if not line.endswith("\n"):
                            line = line + "\n"
                        if dataPointer != None:
                            dataPointer.append(line)

        # if multiple model batches are created, redistribute the refinable varaiables over all model files
        if splitLength > 0 or hardcodedSort or modifiedParameters != None:
            allParameters = Other + RefinableVariables

            #############################################################
            # HERE IS THE HARDCODED PORTION! WATCH OUT!                 #
            #############################################################
            if hardcodedSort:
                if not silentMode:
                    print("Warning hardcoded sorting is active!!!")
                # use only a parameter word combination
                # for i in allParameters:
                #     if "center" in i:
                #         p.append(i)
                # sort parameters
                allParameters = list(map(str.lstrip, allParameters))
                allParameters = [
                    x for x in allParameters if not x.startswith('#')
                ]
                allParameters = list(filter(None, allParameters))
                # for i in sorted(sorted(allParameters, key=lambda x: x.split('_')[4]), key=lambda x: x.split('_')[5]):
                for i in sorted(allParameters, key=lambda x: x.split('_')[5]):
                    print(i)
                # arrange parameters randomly
                # random.shuffle(p)
            #############################################################
            # END OF HARDCODED PART                                     #
            #############################################################

            # The following section allows for individual parameters to be replaced
            if modifiedParameters != None:
                if not silentMode:
                    print(
                        "Warning: using an externally added RefinableVariables set!"
                    )
                RefinableVariables = []
                # parsing the new parameters and make them refinable
                for parameter in modifiedParameters:
                    RefinableVariables.append(parameter[0] + "=" +
                                              str(parameter[1]) + ";\n")
                # remove potential duplicates and mak all other parameters non-refinable
                for i in allParameters:
                    for j in modifiedParameters:
                        if j[0] in i:
                            allParameters.remove(i)
                Other = allParameters

            # here happens the splitting into multiple files
            if splitLength > 0:
                allParameters = list(filter(lambda a: a != '\n',
                                            allParameters))
                Other = allParameters[:offset]  # already used set
                RefinableVariables = []
                splitterFound = False
                i = 0
                while i < min(
                        splitLength,
                        len(allParameters) - offset) and not splitterFound:
                    if SPLITTER in allParameters[offset + i]:
                        splitterFound = True
                        print(
                            "Splitting file before max block size was reached")
                    else:
                        RefinableVariables.append(allParameters[offset + i])
                    i += 1

                if splitterFound:
                    offset += i
                else:
                    offset += splitLength

                # append the rest to the preamble
                Other += allParameters[offset:]

        # print a summary of parameters
        if not silentMode:
            print("Creating file ", os.path.join(writePath, "model.txt"))
            print("   Preamble items: ", len(Other))
            print("   RefinableVariables: ", len(RefinableVariables))

        # parsing correlations to get rid of multiple Ruvw vectors with the same length and direction
        allCorrelations = []
        currentCorrelation = None
        correlationPointer = -1
        for line in Correlations:
            if "[" in line and not line.lstrip().startswith(
                    "#"
            ):  # looking for the start of a correlation block, should also conatin the vector
                currentCorrelation = Correlation()
                currentCorrelation.set_uvw(line)
                currentCorrelation.lines = []
            elif "Multiplicity" in line and not line.lstrip().startswith("#"):
                currentCorrelation.set_multiplicity(line)
                # multiplictiy comes after Ruvw, when both are found a new object
                # with them is created and checked wether such a combination already
                # exists
                for i in range(len(allCorrelations)):
                    if allCorrelations[i].are_same_block(currentCorrelation):
                        correlationPointer = i
                if correlationPointer == -1:  # multiplictiy and vector is new, append them
                    allCorrelations.append(currentCorrelation)
                    correlationPointer = len(allCorrelations) - 1
            elif "]" in line and not line.lstrip().startswith(
                    "#"):  # end of a coordination block, release all pointers
                correlationPointer = -1
            elif correlationPointer != -1:  # read mode, append current line to current block
                allCorrelations[correlationPointer].lines.append(line)

        # bringing the correlations into a writeable pattern
        Correlations = []
        for c in allCorrelations:
            Correlations.append(c.create_block())
            # print(len(c.lines), c.m, c.uvw)

        # here happens the actual writing
        Preamble = Static + Other  # used a seperate variable because it would mess up the calculations for multiple files
        with open(baseFileName) as yellexFile:
            dataPointer = None
            writeIntoModelFile = False
            for line in yellexFile.readlines():
                # looking for start and endpoints in of blocks
                if dataPointer != None and writeIntoModelFile and not line.lstrip(
                ).startswith("#"):
                    for item in dataPointer:
                        modelFile.write(item)
                    dataPointer = None
                if '[' in line:
                    writeIntoModelFile = True
                if ']' in line:
                    writeIntoModelFile = False

                # writing the items which should be inserted
                if "RefinableVariables" in line:
                    dataPointer = RefinableVariables
                elif "Correlations" in line:
                    dataPointer = Correlations
                elif "Modes" in line:
                    dataPointer = Modes
                elif "Scale" in line:  # needs special treatment as it is not enclosed in brackets
                    for item in Preamble:
                        modelFile.write(item)
                elif "UnitCell" in line:
                    dataPointer = Variants

                # skip the input command and write from the original file
                if "input" not in line:
                    modelFile.write(line)

            modelFile.write("\n")
            for item in Print:
                modelFile.write(item)
    if not silentMode:
        print("Creation of a yell file was successful!")
    # finished with writing the yell model file

    # determin wether another run is neccessary to generate additional model files with a different set of refinable variables
    if offset < len(Other) + len(RefinableVariables) and splitLength > 0:
        extend_yell_file(baseFileName,
                         splitLength,
                         offset,
                         outputFolder,
                         runNumber=runNumber + 1)
示例#12
0
def generate_subframe_background_percentile(pathToFrames, pathToBackground, nameTemplate,
                                            frameRange, subsize, percentile, outputName,
                                            outputModifiers=None):
    """Creates a background by only reading in parts of frames and puzzeling these parts together.
    pathToFrames ... string location of the folder which contains the frames
    pathToBackground ... string location where the background frame should be placed
    nameTemplate ... string format of the frame names, allows percent substitution
    frameRange ... int maximum number of frames over which to run the algorithm
    subsize ... int number of pixels in x and y directions to determine the subframe size
                this is used to save memory
    percentile ... numeric the percentile of the frames which should be considered as background
    outputName ... string name of the finished background frame, allows percent substituiton
    outputModifiers ... string plus-sign seperated string list, these modfieres are used to susbtitute outputName
    """
    # parse the modifiers
    outputModifiers = helping_tools.parse_substition_modifiers(outputModifiers)
    fileNames = []
    frameset = cbf_tools.Frameset(pathToFrames, nameTemplate)
    fileNames = frameset.generate_frame_names_from_template()

    templateFrame = fabio.open(fileNames[0]) # just a prototype
    bg = np.zeros((templateFrame.data.shape[0], templateFrame.data.shape[1]))

    # determination of how many tiles are necessary for the subdivition of the frames
    tilesx = int(templateFrame.data.shape[0] / subsize) + 1
    tilesy = int(templateFrame.data.shape[1] / subsize) + 1
    for subx in range(tilesx):
        for suby in range(tilesy):
            print("\nWorking on sub %i of %i" % ((subx + 1) * (suby + 1), tilesx * tilesy))
            # generation of the subframe size taking the border regions into account
            if (subx + 2) > tilesx:
                width = templateFrame.data.shape[0] - subx * subsize
            else:
                width = subsize
            if (suby + 2) > tilesy:
                height = templateFrame.data.shape[1] - suby * subsize
            else:
                height = subsize
            print("Width %i, height %i" % (width, height))
            subFrame = np.zeros((width, height, frameRange))
            for i in range(frameRange):
                print("Reading frame " + fileNames[i], end="\r")
                frame = fabio.open(fileNames[i])
                subFrame[:, : , i] = frame.data[subx * subsize : subx * subsize + width,
                                                suby * subsize : suby * subsize + height].copy()
                del frame # cleaning memory
            print("\nCalculating percentile")
            bg[subx * subsize : subx * subsize + width,
               suby * subsize : suby * subsize + height] = get_percentile(subFrame, subFrame.shape, percentile)

    helping_tools.check_folder(pathToBackground)
    # create and write the flux monitor
    fluxFileName = "fluxmonitor_" + outputName + ".csv"
    flux = cbf_tools.average_flux(pathToFrames, nameTemplate, pathToBackground, fluxFileName % outputModifiers, frameRange)
    # writing the background file
    templateFrame.data = bg.astype(np.int32)
    fileName, fileExtension = os.path.splitext(outputName)
    # splicing the average flux into the file name and prepare the extension
    outputName = fileName + "_flux_" + str(flux) + ".cbf"
    # write the cbf file
    templateFrame.write(os.path.join(pathToBackground, outputName % outputModifiers))
    # write the h5 file
    cbf_tools.frame_to_h5(templateFrame, os.path.join(pathToBackground, outputName + ".h5"), outputModifiers)
示例#13
0
def extend_yell_file(baseFileName, splitLength=0, offset=0, outputFolder="./", hardcodedSort=False, modifiedParameters=None, silentMode=False, runNumber=0):
    """
    Splices additional yell statements into a base file and outputs a yell model.txt
    baseFileName ... str a standard yell file that is extended by the "input" command.
    splitLength ... int if larger than 0, multiple yell files are created in different
                        folders where each model contains up to splitLength number of
                        RefinableVariables (and the rest is put into the preamble).
                        All preamble items and RefinableVariables are effected by this!
                        Each model created that way contains different RefinableVariables
                        so that all RefinableVariables are accounted for and refined.
                        This is then a recursive method!
    offset ... int number of how many RefinableVariables and preamble items should before
                   be put into the preamble before splitLength number of items are
                   put into the RefinableVariables section
    hardcodedSort ... bool for practical reasons I had do modify the parameter list
                   but this will be only done if this flag is set to true
    outputFolder ... str folder where to put the resulting model.txt
    hardcodedSort ... bool indicates wether the section dedicated to sorting parameters should be used.
                      Warning: this section is hardcoded! Do not use this option unless you know what to do!
    silentMode ... bool If true, no status messages will be displayed.
    runNumber ... int number of times this algorithm has run, used for folder naming when splitting the file up.
    """
    # Preparing the lists which hold the values which are to be inserted into the yell file
    RefinableVariables = []
    MScatterers = []
    Variants = []
    Correlations = []
    Modes = []
    Other = [] # inserted after scale, if it contains variables, they will be refined when the model is split to multiple files
    Other.append("# Externally added preamble items\n")
    Static = [] # inserted after scale but is never modified
    Static.append("# Static entries\n")
    Print = [] # added at the end of the file

    # preparing to output file location and create a new folder if neccessary
    writePath = outputFolder
    pathExtension = ""
    if splitLength > 0: # add an index to the folder if multiple files are created
        pathExtension = str(runNumber)
        writePath += pathExtension
    helping_tools.check_folder(writePath) # create the folder, happens regardless of file splitting

    # starting to write the yell model file
    inputFiles = []
    dataPointer = None
    with open(os.path.join(writePath,"model.txt"), 'w') as modelFile:
        # scan for all input instructions and extract the file names
        with open(baseFileName, 'r') as yellexFile:
            for line in yellexFile.readlines():
                if line.strip().startswith("input"):
                    inputFiles.append(line.strip().split(' ')[1]) # gets the file name from the input instruction

        # read yell definition blocks from files into lists for later usage
        for file in inputFiles:
            dataPointer = None
            with open(file) as extensionFile:
                for line in extensionFile.readlines():
                    if "RefinableVariables" in line:
                        dataPointer = RefinableVariables
                    elif "Correlations" in line:
                        dataPointer = Correlations
                    elif "Modes" in line:
                        dataPointer = Modes
                    elif "Preamble" in line:
                        dataPointer = Other
                    elif "Static" in line:
                        dataPointer = Static
                    elif "FileEnd" in line:
                        dataPointer = Print
                    elif "UnitCell" in line:
                        dataPointer = Variants
                    if is_useable_input(line):
                        if not line.endswith("\n"):
                            line = line + "\n"
                        if dataPointer != None:
                            dataPointer.append(line)

        # if multiple model batches are created, redistribute the refinable varaiables over all model files
        if splitLength > 0 or hardcodedSort or modifiedParameters != None:
            allParameters = Other + RefinableVariables

            #############################################################
            # HERE IS THE HARDCODED PORTION! WATCH OUT!                 #
            #############################################################
            if hardcodedSort:
                if not silentMode:
                    print("Warning hardcoded sorting is active!!!")
                # use only a parameter word combination
                # for i in allParameters:
                #     if "center" in i:
                #         p.append(i)
                # sort parameters
                allParameters = list(map(str.lstrip, allParameters))
                allParameters = [x for x in allParameters if not x.startswith('#')]
                allParameters = list(filter(None, allParameters))
                # for i in sorted(sorted(allParameters, key=lambda x: x.split('_')[4]), key=lambda x: x.split('_')[5]):
                for i in sorted(allParameters, key=lambda x: x.split('_')[5]):
                    print(i)
                # arrange parameters randomly
                # random.shuffle(p)
            #############################################################
            # END OF HARDCODED PART                                     #
            #############################################################

            # The following section allows for individual parameters to be replaced
            if modifiedParameters != None:
                if not silentMode:
                    print("Warning: using an externally added RefinableVariables set!")
                RefinableVariables = []
                # parsing the new parameters and make them refinable
                for parameter in modifiedParameters:
                    RefinableVariables.append(parameter[0] + "=" + str(parameter[1]) + ";\n")
                # remove potential duplicates and mak all other parameters non-refinable
                for i in allParameters:
                    for j in modifiedParameters:
                        if j[0] in i:
                            allParameters.remove(i)
                Other = allParameters

            # here happens the splitting into multiple files
            if splitLength > 0:
                allParameters = list(filter(lambda a: a != '\n', allParameters))
                Other = allParameters[:offset] # already used set
                RefinableVariables = []
                splitterFound = False
                i = 0
                while i < min(splitLength,len(allParameters) - offset) and not splitterFound:
                    if SPLITTER in allParameters[offset+i]:
                        splitterFound = True
                        print("Splitting file before max block size was reached")
                    else:
                        RefinableVariables.append(allParameters[offset + i])
                    i += 1

                if splitterFound:
                    offset += i
                else:
                    offset += splitLength

                # append the rest to the preamble
                Other += allParameters[offset:]


        # print a summary of parameters
        if not silentMode:
            print("Creating file ", os.path.join(writePath,"model.txt"))
            print("   Preamble items: ", len(Other))
            print("   RefinableVariables: ", len(RefinableVariables))

        # parsing correlations to get rid of multiple Ruvw vectors with the same length and direction
        allCorrelations = []
        currentCorrelation = None
        correlationPointer = -1
        for line in Correlations:
            if "[" in line and not line.lstrip().startswith("#"): # looking for the start of a correlation block, should also conatin the vector
                currentCorrelation = Correlation()
                currentCorrelation.set_uvw(line)
                currentCorrelation.lines = []
            elif "Multiplicity" in line and not line.lstrip().startswith("#"):
                currentCorrelation.set_multiplicity(line)
                # multiplictiy comes after Ruvw, when both are found a new object
                # with them is created and checked wether such a combination already
                # exists
                for i in range(len(allCorrelations)):
                    if allCorrelations[i].are_same_block(currentCorrelation):
                        correlationPointer = i
                if correlationPointer == -1: # multiplictiy and vector is new, append them
                    allCorrelations.append(currentCorrelation)
                    correlationPointer = len(allCorrelations) - 1
            elif "]" in line and not line.lstrip().startswith("#"): # end of a coordination block, release all pointers
                correlationPointer = -1
            elif correlationPointer != -1: # read mode, append current line to current block
                allCorrelations[correlationPointer].lines.append(line)

        # bringing the correlations into a writeable pattern
        Correlations = []
        for c in allCorrelations:
            Correlations.append(c.create_block())
            # print(len(c.lines), c.m, c.uvw)


        # here happens the actual writing
        Preamble = Static + Other # used a seperate variable because it would mess up the calculations for multiple files
        with open(baseFileName) as yellexFile:
            dataPointer = None
            writeIntoModelFile = False
            for line in yellexFile.readlines():
                # looking for start and endpoints in of blocks
                if dataPointer != None and writeIntoModelFile and not line.lstrip().startswith("#"):
                    for item in dataPointer:
                        modelFile.write(item)
                    dataPointer = None
                if '[' in line:
                    writeIntoModelFile = True
                if ']' in line:
                    writeIntoModelFile = False

                # writing the items which should be inserted
                if "RefinableVariables" in line:
                    dataPointer = RefinableVariables
                elif "Correlations" in line:
                    dataPointer = Correlations
                elif "Modes" in line:
                    dataPointer = Modes
                elif "Scale" in line: # needs special treatment as it is not enclosed in brackets
                    for item in Preamble:
                        modelFile.write(item)
                elif "UnitCell" in line:
                    dataPointer = Variants

                # skip the input command and write from the original file
                if "input" not in line:
                    modelFile.write(line)

            modelFile.write("\n")
            for item in Print:
                modelFile.write(item)
    if not silentMode:
        print("Creation of a yell file was successful!")
    # finished with writing the yell model file

    # determin wether another run is neccessary to generate additional model files with a different set of refinable variables
    if offset < len(Other) + len(RefinableVariables) and splitLength > 0:
        extend_yell_file(baseFileName, splitLength, offset, outputFolder, runNumber=runNumber+1)
示例#14
0
def SavGol_filter(pathToFrames, nameTemplate, frameRange, pathToFiltered,
                  namePrefix, maskFrame, subsize, windowLength, polyOrder):
    """Subtracts a flux normalized frame from a dataset.
    pathToFrames ... string location of the folder which contains the frames
    pathToSubtracted ... string location where the processed frames should be saved
    namePrefix ... string short text that is added to each newly calculated frame
    single ... fabio.frame this frame will be substracted from the dataset
    maskFrame ... fabio.frame frame which contains all pixel that should be masked
    """
    helping_tools.check_folder(pathToFiltered)
    print("Reading masks, please wait!")
    # maskUntrusted, maskDefective, maskHot = cbf_tools.generate_all_unwanted_pixel(maskFrame, 1000000)
    print("starting filtering\n")
    # generating frame paths and names for reading
    frameset = cbf_tools.Frameset(pathToFrames, nameTemplate)
    frameset.setSize = frameRange
    fileNames = frameset.generate_frame_names_from_template()
    # generating frame paths and names for writing
    frameset = cbf_tools.Frameset(pathToFiltered, namePrefix + nameTemplate)
    frameset.setSize = frameRange
    newFiles = frameset.generate_frame_names_from_template()

    templateFrame = fabio.open(fileNames[0])
    # determination of how many tiles are necessary for the subdivition of the frames
    tilesx = int(templateFrame.data.shape[0] / subsize) + 1
    tilesy = int(templateFrame.data.shape[1] / subsize) + 1
    for subx in range(tilesx):
        for suby in range(tilesy):
            print("\nWorking on sub %i of %i" %
                  ((suby * (subx + 1) + subx), tilesx * tilesy))
            # generation of the subframe size taking the border regions into account
            if (subx + 2) > tilesx:
                width = templateFrame.data.shape[0] - subx * subsize
            else:
                width = subsize
            if (suby + 2) > tilesy:
                height = templateFrame.data.shape[1] - suby * subsize
            else:
                height = subsize
            print("Width %i, height %i" % (width, height))
            subFrame = np.zeros((width, height, frameRange))
            for i in range(frameRange):
                print("Reading frame " + fileNames[i])  #, end="\r")
                frame = fabio.open(fileNames[i])
                subFrame[:, :,
                         i] = frame.data[subx * subsize:subx * subsize + width,
                                         suby * subsize:suby * subsize +
                                         height].copy()
                del frame  # cleaning memory
            print("\nApplying SavGol filter...")
            for x in range(subFrame.shape[0]):
                for y in range(subFrame.shape[1]):
                    print(x, y, end="\r")
                    filterLine = subFrame[x, y, :]
                    subFrame[x,
                             y, :] = scipy.signal.savgol_filter(
                                 filterLine,
                                 windowLength,
                                 polyOrder,
                                 mode='wrap').copy()
            subframe = subFrame.astype(np.int32)
            for i in range(frameRange):
                print("Writing frame " + newFiles[i])  #, end="\r")
                frame = fabio.open(newFiles[i])
                frame.data[subx * subsize:subx * subsize + width, suby *
                           subsize:suby * subsize + height] = subFrame[:, :, i]
                frame.save(
                    os.path.join(pathToFiltered, namePrefix +
                                 os.path.basename(frame.filename)))
                del frame  # cleaning memory
    print("\nDone!")
示例#15
0
def generate_subframe_background_percentile(pathToFrames,
                                            pathToBackground,
                                            nameTemplate,
                                            frameRange,
                                            subsize,
                                            percentile,
                                            outputName,
                                            outputModifiers=None):
    """Creates a background by only reading in parts of frames and puzzeling these parts together.
    pathToFrames ... string location of the folder which contains the frames
    pathToBackground ... string location where the background frame should be placed
    nameTemplate ... string format of the frame names, allows percent substitution
    frameRange ... int maximum number of frames over which to run the algorithm
    subsize ... int number of pixels in x and y directions to determine the subframe size
                this is used to save memory
    percentile ... numeric the percentile of the frames which should be considered as background
    outputName ... string name of the finished background frame, allows percent substituiton
    outputModifiers ... string plus-sign seperated string list, these modfieres are used to susbtitute outputName
    """
    # parse the modifiers
    outputModifiers = helping_tools.parse_substition_modifiers(outputModifiers)
    fileNames = []
    frameset = cbf_tools.Frameset(pathToFrames, nameTemplate)
    fileNames = frameset.generate_frame_names_from_template()

    templateFrame = fabio.open(fileNames[0])  # just a prototype
    bg = np.zeros((templateFrame.data.shape[0], templateFrame.data.shape[1]))

    # determination of how many tiles are necessary for the subdivition of the frames
    tilesx = int(templateFrame.data.shape[0] / subsize) + 1
    tilesy = int(templateFrame.data.shape[1] / subsize) + 1
    for subx in range(tilesx):
        for suby in range(tilesy):
            print("\nWorking on sub %i of %i" % ((subx + 1) *
                                                 (suby + 1), tilesx * tilesy))
            # generation of the subframe size taking the border regions into account
            if (subx + 2) > tilesx:
                width = templateFrame.data.shape[0] - subx * subsize
            else:
                width = subsize
            if (suby + 2) > tilesy:
                height = templateFrame.data.shape[1] - suby * subsize
            else:
                height = subsize
            print("Width %i, height %i" % (width, height))
            subFrame = np.zeros((width, height, frameRange))
            for i in range(frameRange):
                print("Reading frame " + fileNames[i], end="\r")
                frame = fabio.open(fileNames[i])
                subFrame[:, :,
                         i] = frame.data[subx * subsize:subx * subsize + width,
                                         suby * subsize:suby * subsize +
                                         height].copy()
                del frame  # cleaning memory
            print("\nCalculating percentile")
            bg[subx * subsize:subx * subsize + width,
               suby * subsize:suby * subsize + height] = get_percentile(
                   subFrame, subFrame.shape, percentile)

    helping_tools.check_folder(pathToBackground)
    # create and write the flux monitor
    fluxFileName = "fluxmonitor_" + outputName + ".csv"
    flux = cbf_tools.average_flux(pathToFrames, nameTemplate, pathToBackground,
                                  fluxFileName % outputModifiers, frameRange)
    # writing the background file
    templateFrame.data = bg.astype(np.int32)
    fileName, fileExtension = os.path.splitext(outputName)
    # splicing the average flux into the file name and prepare the extension
    outputName = fileName + "_flux_" + str(flux) + ".cbf"
    # write the cbf file
    templateFrame.write(
        os.path.join(pathToBackground, outputName % outputModifiers))
    # write the h5 file
    cbf_tools.frame_to_h5(templateFrame,
                          os.path.join(pathToBackground, outputName + ".h5"),
                          outputModifiers)