Esempio n. 1
0
 def test_metadata_save(self):
     local = path.dirname(__file__)
     t = np.arange(12).reshape(3,4) #set up a test data file with mixed metadata
     t = Data(t)
     t.column_headers = ["1","2","3","4"]
     metitems = [True,1,0.2,{"a":1, "b":"abc"},(1,2),np.arange(3),[1,2,3], "abc", #all types accepted
                 r"\\abc\cde", 1e-20, #extra tests
                 [1,(1,2),"abc"], #list with different types
                 [[[1]]], #nested list
                 None, #None value
                 ]
     metnames = ["t"+str(i) for i in range(len(metitems))]
     for k,v in zip(metnames,metitems):
         t[k] = v
     t.save(path.join(local, "mixedmetatest.dat"))
     tl = Data(path.join(local, "mixedmetatest.txt")) #will change extension to txt if not txt or tdi, is this what we want?
     t2 = self.d4.clone  #check that python tdi save is the same as labview tdi save
     t2.save(path.join(local, "mixedmetatest2.txt"))
     t2l = Data(path.join(local, "mixedmetatest2.txt"))
     for orig, load in [(t,tl), (t2, t2l)]:
         for k in ['Loaded as', 'TDI Format']:
             orig[k]=load[k]
         self.assertTrue(np.allclose(orig.data, load.data))
         self.assertTrue(orig.column_headers==load.column_headers)
         self.res=load.metadata^orig.metadata
         self.assertTrue(load.metadata==orig.metadata,"Metadata not the same on round tripping to disc")
     os.remove(path.join(local, "mixedmetatest.txt")) #clear up
     os.remove(path.join(local, "mixedmetatest2.txt"))
def norm_group(pos, _, **kargs):
    """Takes the drain current for each file in group and builds an analysis file and works out the mean drain"""
    if "signal" in kargs:
        signal = kargs["signal"]
    else:
        signal = "fluo"
    lfit = kargs["lfit"]
    rfit = kargs["rfit"]

    posfile = Data()
    posfile.metadata = pos[0].metadata
    posfile = posfile & pos[0].column(0)
    posfile.column_headers = ["Energy"]
    for f in pos:
        print(str(f["run"]) + str(f.find_col(signal)))
        posfile = posfile & f.column(signal)
    posfile.add_column(lambda r: np.mean(r[1:]), "mean drain")
    ec = posfile.find_col("Energy")
    md = posfile.find_col("mean drain")
    linearfit = scipy.poly1d(
        posfile.polyfit(ec, md, 1, lambda x, y: lfit[0] <= x <= lfit[1]))
    posfile.add_column(lambda r: r[md] - linearfit(r[ec]), "minus linear")
    highend = posfile.mean("minus", lambda r: rfit[0] <= r[ec] <= rfit[1])
    ml = posfile.find_col("minus linear")
    posfile.add_column(lambda r: r[ml] / highend, "normalised")
    if "group_key" in kargs:
        posfile[kargs["group_key"]] = pos.key
    return posfile
Esempio n. 3
0
def norm_group(pos,_,**kargs):
    """Takes the drain current for each file in group and builds an analysis file and works out the mean drain"""
    if "signal" in kargs:
        signal=kargs["signal"]
    else:
        signal="fluo"
    lfit=kargs["lfit"]
    rfit=kargs["rfit"]

    posfile=Data()
    posfile.metadata=pos[0].metadata
    posfile=posfile&pos[0].column(0)
    posfile.column_headers=['Energy']
    for f in pos:
        print(str(f["run"])+str(f.find_col(signal)))
        posfile=posfile&f.column(signal)
    posfile.add_column(lambda r:np.mean(r[1:]),"mean drain")
    ec=posfile.find_col('Energy')
    md=posfile.find_col('mean drain')
    linearfit=scipy.poly1d(posfile.polyfit(ec,md,1,lambda x,y:lfit[0]<=x<=lfit[1]))
    posfile.add_column(lambda r:r[md]-linearfit(r[ec]),'minus linear')
    highend=posfile.mean('minus',lambda r:rfit[0]<=r[ec]<=rfit[1])
    ml=posfile.find_col('minus linear')
    posfile.add_column(lambda r:r[ml]/highend,"normalised")
    if "group_key" in kargs:
        posfile[kargs["group_key"]]=pos.key
    return posfile
Esempio n. 4
0
    def hysteresis(self, mask=None):
        """Make a hysteresis loop of the average intensity in the given images

        Keyword Argument:
            mask(ndarray or list):
                boolean array of same size as an image or imarray or list of
                masks for each image. If True then don't include that area in
                the intensity averaging.

        Returns
        -------
        hyst(Data):
            'Field', 'Intensity', 2 column array
        """
        hyst = np.column_stack((self.fields, np.zeros(len(self))))
        for i in range(len(self)):
            im = self[i]
            if isinstance(mask, np.ndarray) and len(mask.shape) == 2:
                hyst[i, 1] = np.average(im[np.invert(mask.astype(bool))])
            elif isinstance(mask, np.ndarray) and len(mask.shape) == 3:
                hyst[i,
                     1] = np.average(im[np.invert(mask[i, :, :].astype(bool))])
            elif isinstance(mask, (tuple, list)):
                hyst[i, 1] = np.average(im[np.invert(mask[i])])
            else:
                hyst[i, 1] = np.average(im)
        d = Data(hyst, setas="xy")
        d.column_headers = ["Field", "Intensity"]
        return d
Esempio n. 5
0
def hist(im, *args, **kargs):
    """Pass through to :py:func:`matplotlib.pyplot.hist` function."""
    counts, edges = np.histogram(im.ravel(), *args, **kargs)
    centres = (edges[1:] + edges[:-1]) / 2
    new = Data(np.column_stack((centres, counts)))
    new.column_headers = ["Intensity", "Frequency"]
    new.setas = "xy"
    return new
Esempio n. 6
0
def hist(im, *args, **kargs):
    """Pass through to :py:func:`matplotlib.pyplot.hist` function."""
    counts, edges = np.histogram(im.ravel(), *args, **kargs)
    centres = (edges[1:] + edges[:-1]) / 2
    new = Data(np.column_stack((centres, counts)))
    new.column_headers = ["Intensity", "Frequency"]
    new.setas = "xy"
    return new
Esempio n. 7
0
def profile_line(img, src=None, dst=None, linewidth=1, order=1, mode="constant", cval=0.0, constrain=True, **kargs):
    """Wrapper for sckit-image method of the same name to get a line_profile.

    Parameters:
        img(ImageArray): Image data to take line section of
        src, dst (2-tuple of int or float): start and end of line profile. If the co-ordinates
            are given as intergers then they are assumed to be pxiel co-ordinates, floats are
            assumed to be real-space co-ordinates using the embedded metadata.
        linewidth (int): the wideth of the profile to be taken.
        order (int 1-3): Order of interpolation used to find image data when not aligned to a point
        mode (str): How to handle data outside of the image.
        cval (float): The constant value to assume for data outside of the image is mode is "constant"
        constrain (bool): Ensure the src and dst are within the image (default True).

    Returns:
        A :py:class:`Stoner.Data` object containing the line profile data and the metadata from the image.
    """
    scale = img.get("MicronsPerPixel", 1.0)
    r, c = img.shape
    if src is None and dst is None:
        if "x" in kargs:
            src = (kargs["x"], 0)
            dst = (kargs["x"], r)
        if "y" in kargs:
            src = (0, kargs["y"])
            dst = (c, kargs["y"])
    if isinstance(src, float):
        src = (src, src)
    if isinstance(dst, float):
        dst = (dst, dst)
    dst = _scale(dst, scale)
    src = _scale(src, scale)
    if not istuple(src, int, int):
        raise ValueError("src co-ordinates are not a 2-tuple of ints.")
    if not istuple(dst, int, int):
        raise ValueError("dst co-ordinates are not a 2-tuple of ints.")

    if constrain:
        fix = lambda x, mx: int(round(sorted([0, x, mx])[1]))
        r, c = img.shape
        src = list(src)
        src = (fix(src[0], r), fix(src[1], c))
        dst = (fix(dst[0], r), fix(dst[1], c))

    result = measure.profile_line(img, src, dst, linewidth, order, mode, cval)
    points = measure.profile._line_profile_coordinates(src, dst, linewidth)[:, :, 0]
    ret = Data()
    ret.data = points.T
    ret.setas = "xy"
    ret &= np.sqrt(ret.x ** 2 + ret.y ** 2) * scale
    ret &= result
    ret.column_headers = ["X", "Y", "Distance", "Intensity"]
    ret.setas = "..xy"
    ret.metadata = img.metadata.copy()
    return ret
Esempio n. 8
0
def profile_line(img, src=None, dst=None, linewidth=1, order=1, mode="constant", cval=0.0, constrain=True, **kargs):
    """Wrapper for sckit-image method of the same name to get a line_profile.

    Parameters:
        img(ImageArray): Image data to take line section of
        src, dst (2-tuple of int or float): start and end of line profile. If the co-ordinates
            are given as intergers then they are assumed to be pxiel co-ordinates, floats are
            assumed to be real-space co-ordinates using the embedded metadata.
        linewidth (int): the wideth of the profile to be taken.
        order (int 1-3): Order of interpolation used to find image data when not aligned to a point
        mode (str): How to handle data outside of the image.
        cval (float): The constant value to assume for data outside of the image is mode is "constant"
        constrain (bool): Ensure the src and dst are within the image (default True).

    Returns:
        A :py:class:`Stoner.Data` object containing the line profile data and the metadata from the image.
    """
    scale = img.get("MicronsPerPixel", 1.0)
    r, c = img.shape
    if src is None and dst is None:
        if "x" in kargs:
            src = (kargs["x"], 0)
            dst = (kargs["x"], r)
        if "y" in kargs:
            src = (0, kargs["y"])
            dst = (c, kargs["y"])
    if isinstance(src, float):
        src = (src, src)
    if isinstance(dst, float):
        dst = (dst, dst)
    dst = _scale(dst, scale)
    src = _scale(src, scale)
    if not istuple(src, int, int):
        raise ValueError("src co-ordinates are not a 2-tuple of ints.")
    if not istuple(dst, int, int):
        raise ValueError("dst co-ordinates are not a 2-tuple of ints.")

    if constrain:
        fix = lambda x, mx: int(round(sorted([0, x, mx])[1]))
        r, c = img.shape
        src = list(src)
        src = (fix(src[0], r), fix(src[1], c))
        dst = (fix(dst[0], r), fix(dst[1], c))

    result = measure.profile_line(img, src, dst, linewidth, order, mode, cval)
    points = measure.profile._line_profile_coordinates(src, dst, linewidth)[:, :, 0]
    ret = Data()
    ret.data = points.T
    ret.setas = "xy"
    ret &= np.sqrt(ret.x ** 2 + ret.y ** 2) * scale
    ret &= result
    ret.column_headers = ["X", "Y", "Distance", "Intensity"]
    ret.setas = "..xy"
    ret.metadata = img.metadata.copy()
    return ret
Esempio n. 9
0
 def test_metadata_save(self):
     local = path.dirname(__file__)
     t = np.arange(12).reshape(
         3, 4)  #set up a test data file with mixed metadata
     t = Data(t)
     t.column_headers = ["1", "2", "3", "4"]
     metitems = [
         True,
         1,
         0.2,
         {
             "a": 1,
             "b": "abc"
         },
         (1, 2),
         np.arange(3),
         [1, 2, 3],
         "abc",  #all types accepted
         r"\\abc\cde",
         1e-20,  #extra tests
         [1, (1, 2), "abc"],  #list with different types
         [[[1]]]  #nested list
     ]
     metnames = ["t" + str(i) for i in range(len(metitems))]
     for k, v in zip(metnames, metitems):
         t[k] = v
     t.save(path.join(local, "mixedmetatest.dat"))
     tl = Data(
         path.join(local, "mixedmetatest.txt")
     )  #will change extension to txt if not txt or tdi, is this what we want?
     t2 = self.d4.clone  #check that python tdi save is the same as labview tdi save
     t2.save(path.join(local, "mixedmetatest2.txt"))
     t2l = Data(path.join(local, "mixedmetatest2.txt"))
     for orig, load in [(t, tl), (t2, t2l)]:
         self.assertTrue(np.allclose(orig.data, load.data))
         self.assertTrue(orig.column_headers == load.column_headers)
         self.assertTrue(
             all([i in load.metadata.keys() for i in orig.metadata.keys()]))
         for k in orig.metadata.keys():
             if isinstance(orig[k], np.ndarray):
                 self.assertTrue(np.allclose(load[k], orig[k]))
             elif isinstance(orig[k], float) and np.isnan(orig[k]):
                 self.assertTrue(np.isnan(load[k]))
             else:
                 self.assertTrue(
                     load[k] == orig[k],
                     "Not equal for metadata: {}".format(load[k]))
     os.remove(path.join(local, "mixedmetatest.txt"))  #clear up
     os.remove(path.join(local, "mixedmetatest2.txt"))
Esempio n. 10
0
def profile_line(img,
                 src,
                 dst,
                 linewidth=1,
                 order=1,
                 mode='constant',
                 cval=0.0):
    """Wrapper for sckit-image method of the same name to get a line_profile.

    Parameters:
        img(ImageArray): Image data to take line section of
        src, dst (2-tuple of int or float): start and end of line profile. If the co-ordinates
            are given as intergers then they are assumed to be pxiel co-ordinates, floats are
            assumed to be real-space co-ordinates using the embedded metadata.
        linewidth (int): the wideth of the profile to be taken.
        order (int 1-3): Order of interpolation used to find image data when not aligned to a point
        mode (str): How to handle data outside of the image.
        cval (float): The constant value to assume for data outside of the image is mode is "constant"

    Returns:
        A :py:class:`Stoner.Data` object containing the line profile data and the metadata from the image.
    """
    scale = img.get("MicronsPerPixel", 1.0)
    if isinstance(src[0], float):
        src = (int(src[0] / scale), int(src[1] / scale))
    if isinstance(dst[0], float):
        dst = (int(dst[0] / scale), int(dst[1] / scale))

    result = measure.profile_line(img, src, dst, linewidth, order, mode, cval)
    points = measure.profile._line_profile_coordinates(src, dst,
                                                       linewidth)[:, :, 0]
    ret = Data()
    ret.data = points.T
    ret.setas = "xy"
    ret &= np.sqrt(ret.x**2 + ret.y**2) * scale
    ret &= result
    ret.column_headers = ["X", "Y", "Distance", "Intensity"]
    ret.setas = "..xy"
    ret.metadata = img.metadata.copy()
    return ret
Esempio n. 11
0
#Now get the section of the data file that has the peak positions
# This is really doing the hard work
# We differentiate the data using a Savitsky-Golay filter with a 5 point window fitting quartics.
# This has proved most succesful for me looking at some MdV data.
# We then threshold for zero crossing of the derivative
# And check the second derivative to see whether we like the peak as signficant. This is the significance parameter
# and seems to be largely empirical
# Finally we interpolate back to the complete data set to make sure we get the angle as well as the counts.
d.lmfit(ExponentialModel,result=True,replace=False,header="Envelope")
d.subtract("Counts","Envelope",replace=False,header="peaks")
d.setas="xy"
sys.exit()
t=Data(d.interpolate(d.peaks(significance=sensitivity,width=8,poly=4)))

t.column_headers=copy(d.column_headers)
d%='peaks'
t%='peaks'
d.setas="xy"
d.labels[d.find_col('Angle')]=r"Reflection Angle $\theta$"
t.del_rows(0, lambda x,y: x<critical_edge)
t.setas="xy"
t.template.fig_width=7.0
t.template.fig_height=5.0
t.plot(fmt='go',  plotter=pyplot.semilogy)
main_fig=d.plot(figure=t.fig, plotter=pyplot.semilogy)
d.show()
#Now convert the angle to sin^2
t.apply(lambda x: np.sin(np.radians(x[0]/2.0))**2, 0,header=r"$sin^2\theta$")
# Now create the m^2 order
m=np.arange(len(t))+fringe_offset
Esempio n. 12
0
def plane(coord, a, b, c):
    """Function to define a plane"""
    return c - (coord[0] * a + coord[1] * b)


coeefs = [1, -0.5, -1]
col = linspace(-10, 10, 6)
X, Y = meshgrid(col, col)
Z = plane((X, Y), *coeefs) + normal(size=X.shape, scale=7.0)
d = Data(
    column_stack((X.ravel(), Y.ravel(), Z.ravel())),
    filename="Fitting a Plane",
    setas="xyz",
)

d.column_headers = ["X", "Y", "Z"]
d.figure(projection="3d")
d.plot_xyz(plotter="scatter")

popt, pcov = d.curve_fit(plane, [0, 1], 2, result=True)
d.setas = "xy.z"

d.plot_xyz(linewidth=0, cmap=cmap.jet)

txt = "$z=c-ax+by$\n"
txt += "\n".join([d.format("plane:{}".format(k), latex=True) for k in ["a", "b", "c"]])

ax = plt.gca(projection="3d")
ax.text(15, 5, -50, txt)
d.draw()
Esempio n. 13
0
    def slice(self, key=None, values_only=False, output=None):  # pylint: disable=arguments-differ
        """Return a list of the metadata dictionaries for each item/file in the top level group

        Keyword Arguments:
            key(string or list of strings):
                if given then only return the item(s) requested from the metadata
            values_only(bool):
                if given amd *output* not set only return tuples of the dictionary values. Mostly useful
                when given a single key string
            output (str or type):
                Controls the output format from slice_metadata. Possible values are

                - "dict" or dict - return a list of dictionary subsets of the metadata from each image
                - "list" or list - return a list of values of each item pf the metadata
                - "array" or np.array - return a single array - like list above, but returns as a numpy array. This can create a 2D array from multiple keys
                - "Data" or Stoner.Data - returns the metadata in a Stoner.Data object where the column headers are the metadata keys.
                - "smart" - switch between *dict* and *list* depending whether there is one or more keys.

        Returns:
            ret(list of dict, tuple of values or :py:class:`Stoner.Data`):
                depending on *values_only* or (output* returns the sliced dictionaries or tuples/
                values of the items

        To do:
            this should probably be a func in baseFolder and should use have
            recursive options (build a dictionary of metadata values). And probably
            options to extract other parts of objects (first row or whatever).
        """
        if output is None:  # Sort out a definitive value of output
            output = "dict" if not values_only else "smart"
        if output not in [
                "dict",
                "list",
                "array",
                "Data",
                "smart",
                dict,
                list,
                _np_.ndarray,
                DataFile,
        ]:  # Check for good output value
            raise SyntaxError(
                "output of slice metadata must be either dict, list, or array not {}"
                .format(output))
        metadata = [
            k.metadata for k in self._folder
        ]  # this can take some time if it's loading in the contents of the folder
        if isinstance(key, string_types):  # Single key given
            key = metadata[0].__lookup__(key, multiple=True)
            key = [key] if not islike_list(key) else key
        # Expand all keys in case of multiple metadata matches
        newkey = []
        for k in key:
            newkey.extend(metadata[0].__lookup__(k, multiple=True))
        key = newkey
        if len(
                set(key) - set(self.common_keys)
        ):  # Is the key in the common keys? # TODO: implement __getitem__'s masked array logic?
            raise KeyError(
                "{} are missing from some items in the Folder.".format(
                    set(key) - set(self.common_keys)))
        results = []
        for i, met in enumerate(
                metadata):  # Assemble a list of dictionaries of values
            results.append({k: v for k, v in metadata[i].items() if k in key})
        if output in [
                "list", "array", "Data", list, _np_.ndarray, DataFile
        ] or (output == "smart" and len(results[0]) == 1):  # Reformat output
            cols = []
            for k in key:  # Expand the columns of data we're going to need if some values are not scalar
                if islike_list(metadata[0][k]):
                    for i, _ in enumerate(metadata[0][k]):
                        cols.append("{}_{}".format(k, i))
                else:
                    cols.append(k)

            for i, met in enumerate(results):  # For each object in the Folder
                results[i] = []
                for k in key:  # and for each key in out list
                    v = met[k]
                    if islike_list(
                            v
                    ):  # extend or append depending if the value is scalar. # TODO: This will blowup for values with more than 1 D!
                        results[i].extend(v)
                    else:
                        results[i].append(v)
                if output in ["aaray", "Data", _np_.ndarray,
                              DataFile]:  # Convert each row to an array
                    results[i] = _np_.array(results[i])
            if len(cols) == 1:  # single key
                results = [m[0] for m in results]
            if output in ["array", _np_.ndarray]:
                results = _np_.array(results)
            if output in ["Data", DataFile]:  # Build oour Data object
                from Stoner import Data

                tmp = Data()
                tmp.data = _np_.array(results)
                tmp.column_headers = cols
                results = tmp
        return results
Esempio n. 14
0
"""Plot data defined on a matrix."""
from Stoner import Data
import numpy as np

x, y = np.meshgrid(np.linspace(-2, 2, 101), np.linspace(-2, 2, 101))
z = np.cos(4 * np.pi * np.sqrt(x**2 + y**2)) * np.exp(-np.sqrt(x**2 + y**2))

p = Data()
p = p & np.linspace(-2, 2, 101) & z
p.column_headers = ["X"]
for i, v in enumerate(np.linspace(-2, 2, 101)):
    p.column_headers[i + 1] = str(v)

p.plot_matrix(xlabel="x", ylabel="y", title="Data as Matrix")
Esempio n. 15
0
        data.del_rows(isnan(data.y))

        #Normalise data on y axis between +/- 1
        data.normalise(base=(-1.,1.), replace=True)

        #Swap x and y axes around so that R is x and T is y
        data=~data

        #Curve fit a straight line, using only the central 90% of the resistance transition
        data.curve_fit(linear,bounds=lambda x,r:-threshold<x<threshold,result=True,p0=[7.0,0.0]) #result=True to record fit into metadata

        #Plot the results
        data.setas[-1]="y"
        data.subplot(1,len(r_cols),i+1)
        data.plot(fmt=["k.","r-"])
        data.annotate_fit(linear,x=-1.,y=7.3c,fontsize="small")
        data.title="Ramp {}".format(data[iterator][0])
        row.extend([data["linear:intercept"],data["linear:intercept err"]])
    data.tight_layout()
    result+=np.array(row)

result.column_headers=["Ramp","Sample 4 R","dR","Sample 7 R","dR"]
result.setas="xyeye"
result.plot(fmt=["k.","r."])






Esempio n. 16
0
    for s in fldr.groups:  # Fit each FMR spectra
        subfldr = fldr[s]
        subfldr.metadata["Field Sign"] = s

        print("s={}".format(s))
        result = []
        for ix, res in enumerate(subfldr.each.iter(do_fit)):
            result.append(res)

        data, headers = zip(*result)
        new_data = data[0]
        for r in data[1:]:
            new_data = append(new_data, r, axis=0)
        result = Data(new_data)
        result.column_headers = headers[0]

        # Now plot all the fits

        subfldr.plots_per_page = 6  # Plot results
        subfldr.plot(figsize=(8, 8), extra=extra)

        # Work with the overall results
        result.setas(  # pylint: disable=not-callable
            y="H_res",
            e="H_res.stderr",
            x="Freq")  # pylint: disable=not-callable
        result.y = result.y / mu_0  # Convert to A/m
        result.e = result.e / mu_0

        resfldr += result  # Stash the results
Esempio n. 17
0
        data.del_rows(isnan(data.y))

        #Normalise data on y axis between +/- 1
        data.normalise(base=(-1.,1.), replace=True)

        #Swap x and y axes around so that R is x and T is y
        data=~data

        #Curve fit a straight line, using only the central 90% of the resistance transition
        data.curve_fit(linear,bounds=lambda x,r:-threshold<x<threshold,result=True,p0=[7.0,0.0]) #result=True to record fit into metadata

        #Plot the results
        data.setas[-1]="y"
        data.subplot(1,len(r_cols),i+1)
        data.plot(fmt=["k.","r-"])
        data.annotate_fit(linear,x=-1.,y=7.3c,fontsize="small")
        data.title="Ramp {}".format(data[iterator][0])
        row.extend([data["linear:intercept"],data["linear:intercept err"]])
    data.tight_layout()
    result+=np.array(row)

result.column_headers=["Ramp","Sample 4 R","dR","Sample 7 R","dR"]
result.setas="xyeye"
result.plot(fmt=["k.","r."])






Esempio n. 18
0
# Now get the section of the data file that has the peak positions
# This is really doing the hard work
# We differentiate the data using a Savitsky-Golay filter with a 5 point window fitting quartics.
# This has proved most succesful for me looking at some MdV data.
# We then threshold for zero crossing of the derivative
# And check the second derivative to see whether we like the peak as signficant. This is the significance parameter
# and seems to be largely empirical
# Finally we interpolate back to the complete data set to make sure we get the angle as well as the counts.
d.lmfit(ExponentialModel, result=True, replace=False, header="Envelope")
d.subtract("Counts", "Envelope", replace=False, header="peaks")
d.setas = "xy"
sys.exit()
t = Data(d.interpolate(d.peaks(significance=sensitivity, width=8, poly=4)))

t.column_headers = copy(d.column_headers)
d %= "peaks"
t %= "peaks"
d.setas = "xy"
d.labels[d.find_col("Angle")] = r"Reflection Angle $\theta$"
t.del_rows(0, lambda x, y: x < critical_edge)
t.setas = "xy"
t.template.fig_width = 7.0
t.template.fig_height = 5.0
t.plot(fmt="go", plotter=pyplot.semilogy)
main_fig = d.plot(figure=t.fig, plotter=pyplot.semilogy)
d.show()
# Now convert the angle to sin^2
t.apply(lambda x: np.sin(np.radians(x[0] / 2.0)) ** 2, 0, header=r"$sin^2\theta$")
# Now create the m^2 order
m = np.arange(len(t)) + fringe_offset
Esempio n. 19
0
from numpy import linspace,meshgrid,column_stack
import matplotlib.cm as cmap
import matplotlib.pyplot as plt



def plane(coord,a,b,c):
    """Function to define a plane"""
    return c-(coord[0]*a+coord[1]*b)

coeefs=[1,-0.5,-1]
col=linspace(-10,10,6)
X,Y=meshgrid(col,col)
Z=plane((X,Y),*coeefs)+normal(size=X.shape,scale=7.0)
d=Data(column_stack((X.ravel(),Y.ravel(),Z.ravel())),filename="Fitting a Plane",setas="xyz")

d.column_headers=["X","Y","Z"]
d.figure(projection="3d")
d.plot_xyz(plotter="scatter",c=cmap.jet(d.z))

d.curve_fit(plane,[0,1],2,result=True)

d.setas="xy.z"
d.plot_xyz(linewidth=0,cmap=cmap.jet)

txt="$z=c-ax+by$\n"
txt+="\n".join([d.format("plane:{}".format(k),latex=True) for k in ["a","b","c"]])

ax=plt.gca(projection="3d")
ax.text(15,5,-50,txt)
d.draw()
Esempio n. 20
0
    for s in fldr.groups:  # Fit each FMR spectra
        subfldr = fldr[s]
        subfldr.metadata["Field Sign"] = s

        print("s={}".format(s))
        result = []
        for ix, res in enumerate(subfldr.each.iter(do_fit)):
            result.append(res)

        data, headers = zip(*result)
        new_data = data[0]
        for r in data[1:]:
            new_data = append(new_data, r, axis=0)
        result = Data(new_data)
        result.column_headers = headers[0]

        # Now plot all the fits

        subfldr.plots_per_page = 6  # Plot results
        subfldr.plot(figsize=(8, 8), extra=extra)

        # Work with the overall results
        result.setas(y="H_res", e="H_res.stderr", x="Freq")
        result.y = result.y / mu_0  # Convert to A/m
        result.e = result.e / mu_0

        resfldr += result  # Stash the results

    # Merge the two field signs into a single file, taking care of the error columns too
    result = resfldr[0].clone