Пример #1
0
class TestWarnNotConstant(unittest.TestCase):
    def setUp(self):
        """Monkey patch LabberData to return synthetic data."""
        self.labberdata = LabberData("")
        self.labberdata.get_data = lambda *_: self.data

    def tearDown(self):
        del self.labberdata
        del self.data

    def assert_1_userwarning(self, warnings):
        self.assertEqual(len(warnings), 1)
        self.assertEqual(warnings[0].category, UserWarning)

    def test_default_1_percent(self):
        with warnings.catch_warnings(record=True) as w:
            self.data = [99] * 100 + [100]
            self.labberdata.warn_not_constant("")
            self.assertEqual(len(w), 0)
            self.data += [99]
            self.labberdata.warn_not_constant("")
            self.assert_1_userwarning(w)

    def test_user_specified_threshold(self):
        self.data = [1, 2]
        with warnings.catch_warnings(record=True) as w:
            self.labberdata.warn_not_constant("", 0.51)
            self.assertEqual(len(w), 0)
            self.labberdata.warn_not_constant("", 0.49)
            self.assert_1_userwarning(w)
Пример #2
0
    def classify_datasets(self):
        """Find the classifiers values in each identified dataset."""
        logger.info(f"classifying datasets...")
        if not self._datasets:
            raise RuntimeError(
                "No identified datasets to work on. Run `identify_datasets` or"
                " load an existing list of datasets using `load_dataset_list`."
            )

        classified_datasets = {p.name: {} for p in self.patterns}
        patterns = {p.name: p for p in self.patterns}
        for name, datafiles in self._datasets.items():
            logger.debug(f"  classifying '{name}' measurements")
            classified = classified_datasets[name]
            for path in datafiles:
                logger.debug(f"    classifying {path}")
                with LabberData(path) as f:
                    classifiers = patterns[name].extract_classifiers(f)
                classified[path] = classifiers

            for path, clsf in classified.items():
                for p, c in classified.items():
                    if p != path and clsf == c:
                        raise RuntimeError(
                            f"{path} and {p} have identical classifiers")

        self._classified_datasets = classified_datasets
        logger.debug(
            f"Classified datasets:\n{pprint.pformat(classified_datasets)}")
Пример #3
0
    def match_dataset(self, path: str) -> Optional[MeasurementPattern]:
        """Match a single file and return the pattern.

        This function is mostly included for debugging purposes.

        """
        with LabberData(path) as f:
            for p in self.patterns:
                if p.match(f):
                    return p
            else:
                return None
Пример #4
0
    def identify_datasets(self, folders):
        """Identify the relevant datasets by scanning the content of a folder."""
        logger.info("identifying datasets...")
        datasets = {p.name: [] for p in self.patterns}
        for folder in folders:
            if not Path(folder).exists():
                logger.warning(f"{folder} does not exist")
                continue
            logger.debug(f"Walking {folder}")
            for root, dirs, files in os.walk(folder):
                for datafile in (f for f in files if f.endswith(".hdf5")):
                    path = os.path.join(root, datafile)
                    logger.debug(f"matching file {datafile}")
                    try:
                        with LabberData(path) as f:
                            logger.debug(f"steps: {[s.name for s in f.steps]}")
                            logger.debug(f"logs:  {[l.name for l in f.logs]}")
                            logger.debug(
                                f"instrument configs: {[c.name for c in f.instrument_configs]}"
                            )
                            for p in self.patterns:
                                logger.debug(
                                    f"matching measurement pattern '{p.name}'")
                                if p.match_file(f):
                                    datasets[p.name].append(path)
                                    logger.debug(
                                        f"- accepted {datafile} "
                                        f"for measurement pattern '{p.name}'")
                    except OSError:
                        logger.debug(
                            f"- rejected {datafile}: file is corrupted")

        self._datasets = datasets
        logger.info(
            f"identified datasets:\n{pprint.pformat({k: str(len(v)) + ' files' for k, v in datasets.items()})}"
        )
        logger.debug(f"identified datasets:\n{pprint.pformat(datasets)}")
Пример #5
0
 def extract(self, dataset: LabberData, config: StepConfig) -> tuple:
     """Extract the classification values associated with that step."""
     if self.ramps and not config.is_ramped:
         raise ValueError(
             "Step is ramped but pattern does not expect a ramp."
             f"Step: {config}, pattern: {self}")
     if config.is_ramped:
         # Retrieve the classifier data directly from the log file to avoid
         # considering values that were not acquired because the measurement
         # was stopped.
         data = np.unique(dataset.get_data(config.name))
         # Remove nan
         return tuple(data[~np.isnan(data)])
     else:
         if config.relation:
             steps = {
                 "Step values" if s.name == config.name else s.name: s.value
                 for s in dataset.steps
             }
             locs = {k: steps[v] for k, v in config.relation[1].items()}
             # XXX should provide some math functions
             return (eval(config.relation[0], locs), )
         else:
             return (config.value, )
        shutil.copy(path, ANALYSIS_PATH)

if not DATA_ROOT_FOLDER:
    raise ValueError('No root path for the date was specified.')

gates_number = {}
datasets = {}
datasets_color = {}

# Load and filter all the datasets
for f, ppath in DATA_PATHS.items():

    datasets[f] = {}
    datasets_color[f] = {}

    with LabberData(os.path.join(DATA_ROOT_FOLDER, ppath)) as data:
        frange = FIELD_RANGES[f]
        if GATE_COLUMN is not None:
            gates = [
                g for g in np.unique(data.get_data(GATE_COLUMN))
                if g not in EXCLUDED_GATES and not np.isnan(g)
            ]
        else:
            gates = [-4.5]
        gates_number[f] = len(gates)

        if PLOT_EXTRACTED_SWITCHING_CURRENT:
            fig, axes = plt.subplots(gates_number[f],
                                     sharex=True,
                                     figsize=(10, 15),
                                     constrained_layout=True)
# Coil current to B-field conversion factor.
# The new sample holder is perpendicular to the old one;
# the conversion factor along the new axis is 30mA to 1mT.
CURR_TO_FIELD = 1 / 30

# constants
PHI0 = cs.h / (2 * cs.e)  # magnetic flux quantum
JJ_WIDTH = 4e-6
# The effective junction length is largely unknown due to thin-film penetration depth
# and flux focusing effects; nominally 100nm.
JJ_LENGTH = 1200e-9
FIELD_TO_WAVENUM = 2 * np.pi * JJ_LENGTH / PHI0  # B-field to beta wavenumber
PERIOD = 2 * np.pi / (FIELD_TO_WAVENUM * JJ_WIDTH)

with LabberData(DATA_FILE_PATH) as f:
    # NOTE: The use of np.unique assumes the gate, field, and bias values are identical
    # for each sweep. This is true for the current datafile but may not hold in general.
    # NOTE: Also, in this case we have to manually correct some Labber shenanigans by
    # flipping some data.
    gate_3 = np.flip(np.unique(f.get_data(CH_GATE_3)))
    gate_2_4 = np.flip(np.unique(f.get_data(CH_GATE_2_4)))
    field = np.unique(f.get_data(CH_MAGNET)) * CURR_TO_FIELD

    # Bias current from the custom Labber driver VICurveTracer isn't available via
    # LabberData methods.
    bias = np.unique(f._file["/Traces/VITracer - VI curve"][:, 1, :])

    resist = f.get_data(CH_RESIST)

# extract_switching_current chokes on 1D arrays. Construct the ndarray of bias sweeps
Пример #8
0
            diffusion_constant_from_mobility_density)
from shabanipy.quantum_hall.density import extract_density
from shabanipy.quantum_hall.mobility import extract_mobility
from shabanipy.quantum_hall.wal.fitting import (extract_soi_from_wal,
                                                estimate_parameters)
from shabanipy.quantum_hall.wal.utils import (flip_field_axis,
                                              recenter_wal_data,
                                              symmetrize_wal_data,
                                              compute_linear_soi,
                                              compute_dephasing_time)
from shabanipy.labber import LabberData

plt.rcParams.update({'font.size': 14})
plt.rcParams.update({'pdf.fonttype': 42})

with LabberData(PATH) as data:

    names = data.channel_names
    shape = data.compute_shape((GATE_COLUMN, FIELD_COLUMN))

    gate = data.get_data(GATE_COLUMN)

    # Handle interruptions in the last scan.
    while len(gate) < shape[0] * shape[1]:
        shape[1] -= 1

    length = shape[0] * shape[1]

    gate = gate.reshape(shape).T[:-1]
    field = data.get_data(FIELD_COLUMN).reshape(shape).T[:-1]
    res = dict.fromkeys(('xx', 'yy', 'xy'))
Пример #9
0
        "Ic_cold(µA)",
        "Ic_hot(µA)",
        "I_exe_cold(µA)",
        "I_exe_hot(µA)",
        "RnIc_cold(meV)",
        "RnI_exe_cold(meV)",
    ]) + "\n")

results = defaultdict(list)

for sample, parameters in SAMPLES.items():

    # Superconducting gap in meV
    gap = 1.674 * constants.Boltzmann / constants.e * 1000 * parameters["Tc"]

    with LabberData(os.path.join(BASE_FOLDER, parameters["path"])) as data:

        print(data.channel_names)
        filters = {}
        counter = get_value(sample, COUNTER_NAME)
        if counter is not None:
            val = data.get_data(counter)[0]
            filters[counter] = val

        gate_col = get_value(sample, GATE_NAME)
        if gate_col:
            gates = np.unique(data.get_data(gate_col))[::-1]
        else:
            gates = [None]

        offset_corr = get_value(sample, CORRECT_VOLTAGE_OFFSET)
Пример #10
0
 def setUp(self):
     """Monkey patch LabberData to return synthetic data."""
     self.labberdata = LabberData("")
     self.labberdata.get_data = lambda *_: self.data
Пример #11
0
def plot_labberdata(path: Union[str, Path],
                    x: str,
                    y: str,
                    z: str,
                    xlabel: Optional[str] = None,
                    ylabel: Optional[str] = None,
                    zlabel: Optional[str] = None,
                    transform: Optional[Callable] = None,
                    xlim: Optional[Tuple[float]] = None,
                    ylim: Optional[Tuple[float]] = None,
                    title: Optional[str] = None,
                    ax: Optional[plt.Axes] = None,
                    style: Union[str, Dict, Path, List] = "default",
                    **kwargs) -> Tuple[plt.Figure, plt.Axes]:
    """Plot data from a Labber .hdf5 log file.

    Parameters
    ----------
    path
        The path to data file. If not absolute, it should be relative to the output of
        `shabanipy.labber.get_data_dir`.
    x, y, z
        Channel names or vector channel `x_name`s specifying which data to plot.
    xlabel, ylabel, zlabel
        Axes labels.  If None, the names from `x`, `y`, and `z` will be used.
    transform
        Function with the signature `Tuple[np.ndarray] -> Tuple[np.ndarray]`,
        i.e. (x, y, z) -> (x_transformed, y_transformed, z_transformed) used to
        transform the data.
    xlim, ylim
        x- and y-axis limits, in the form (min, max), referring to the transformed data
        if `transform` is given.  If either min or max is None, the limit is left
        unchanged.
    title
        Plot title.
    ax
        The matplotlib Axes in which to plot.
    style
        Matplotlib style specifications passed to `matplotlib.pyplot.style.use`.
    **kwargs
        Additional keyword arguments passed to the plotting function
        (`shabanipy.plotting.plot2d` in the case of 2d color plots).

    Returns
    -------
    The figure and axes where the data were plotted.
    """
    path = Path(path)
    if not path.is_absolute():
        path = get_data_dir() / path

    # get the data
    data = []
    with LabberData(path) as f:
        for name in (x, y, z):
            try:
                data.append(f.get_data(name))
            except ValueError:
                # TODO refactor LabberData.get_data to normalize data access
                for log in (l for l in f.logs if l.x_name == name):
                    vdata, _ = f.get_data(log.name, get_x=True)
                    data.append(vdata)
                    break

    # normalize shapes against potentially vectorial data
    dims = [d.ndim for d in data]
    max_dim = max(dims)
    for i, d in enumerate(data):
        if d.ndim < max_dim:
            data[i] = np.expand_dims(
                d, axis=tuple(-np.arange(1, 1 + max_dim - d.ndim)))
    data = np.broadcast_arrays(*data)

    # apply transformations
    if transform is not None:
        data = transform(*data)

    # plot
    plt.style.use(style)
    fig, ax = plot2d(
        *data,
        # TODO: automatically add units in the default case
        xlabel=xlabel if xlabel is not None else x,
        ylabel=ylabel if ylabel is not None else y,
        zlabel=zlabel if zlabel is not None else z,
        title=title,
        **kwargs)
    ax.set_xlim(xlim)
    ax.set_ylim(ylim)

    return fig, ax
Пример #12
0
    def _extract_datasets(
        self,
        storage: Group,
        path: str,
        meas_pattern: MeasurementPattern,
        filters: Dict[str, float],
    ):
        """Extract the data corresponding to the specified filters."""
        vector_data_names = []
        x_vector_data_names = []
        to_store = {}

        with LabberData(path) as f:
            # Find and extract the relevant step channels (ie not used in classifying)
            for i, stepcf in [(i, s) for i, s in enumerate(f.steps)
                              if s.is_ramped]:
                # Collect all ramps except those used for classification
                should_skip = False
                name = stepcf.name

                # Check if a pattern match and if yes determine if we need to
                # extract this parameter and if yes under what name
                for pattern in (p for p in meas_pattern.patterns
                                if isinstance(p, StepPattern)):
                    if pattern.match(i, stepcf):
                        if pattern.use_in_classification:
                            should_skip = True
                            break
                        else:
                            name = pattern.name

                # Skip steps used in classification and explicitely excluded
                if should_skip or meas_pattern.match_excluded_steps(i, stepcf):
                    continue

                # Get the data which are already in a meaningful shape (see
                # LabberData.get_data)
                to_store[name] = f.get_data(stepcf.name, filters=filters)

            # Find and extract the relevant log channels
            n_matched_logs = 0
            log_patterns = [
                p for p in meas_pattern.patterns if isinstance(p, LogPattern)
            ]
            for i, entry in enumerate(f.logs):
                # Collect only requested log entries.
                should_skip = True
                name = entry.name
                x_name = None

                # Check if a pattern match and if yes determine if we need to
                # extract this log entry and if yes under what name
                for lpattern in log_patterns:
                    if lpattern.match(i, entry):
                        n_matched_logs += 1
                        should_skip = False
                        name = lpattern.name
                        x_name = lpattern.x_name

                # Log entry was not requested
                if should_skip:
                    continue

                data = f.get_data(entry.name,
                                  filters=filters,
                                  get_x=x_name is not None)  # type: ignore

                if entry.is_vector:
                    vector_data_names.append(name)
                    if x_name:
                        x_vector_data_names.append(x_name)
                        to_store[x_name] = data[0]
                        to_store[name] = data[1]
                    else:
                        to_store[name] = data
                else:
                    to_store[name] = data

            if n_matched_logs > len(log_patterns):
                log_names = [log.name for log in log_patterns]
                raise RuntimeError(
                    "More logs were matched than there is log patterns. "
                    f"The matched logs are {[l for l in to_store if l in log_names]}"
                )

            # In the presence of vector data do the following
            # - one vector or vectors with the same length and a single x_name,
            #   add a dummy dimension to all data sets to get something reminiscent
            #   of a normal scan
            # - two vectors or more with different x, do not do anything special
            if len(vector_data_names) == 1 or (
                    len(vector_data_names) > 1
                    and all(to_store[vector_data_names[0]].shape[-1] ==
                            to_store[n].shape[-1]
                            for n in vector_data_names[1:])
                    and len(set(x_vector_data_names)) < 2):
                vec_dim = to_store[vector_data_names[0]].shape[-1]

                for n, d in list(to_store.items()):
                    if n not in vector_data_names and n not in x_vector_data_names:
                        # Create a new array with an extra dimension
                        new_data = np.empty(list(d.shape) + [vec_dim],
                                            dtype=d.dtype)

                        # Create a view allowing to easily assign the same value
                        # on all elements of the last dimension.
                        v = np.moveaxis(new_data, -1, 0)
                        v[:] = d

                        # Store the data
                        to_store[n] = new_data

            # If the data are empty, do not store anything.
            if any(v.shape == (0, ) for v in to_store.values()):
                return

            # Store the data
            for n, d in to_store.items():
                # If data with similar classifers are already there check,
                # if there are the same shape. If yes and one of them contains less nan
                # than the other keep the most complete set, otherwise log the issue
                # and do not store the new one.
                if n in storage:
                    dset = storage[n]
                    if dset.shape == d.shape:
                        ex_count = np.count_nonzero(np.isnan(dset))
                        new_count = np.count_nonzero(np.isnan(d))
                        if new_count < ex_count:
                            dset = storage[n] = d
                        else:
                            logger.info(
                                f"Ignoring {n} in {path} since more complete "
                                "data already exists (less nans)")
                    elif dset.shape[1:] == d.shape[1:]:
                        if dset.shape[0] < d.shape[0]:
                            # Delete the existing dataset and use teh more complete one.
                            del storage[n]
                            dset = storage.create_dataset(
                                n,
                                data=d,
                                compression="gzip",
                                compression_opts=6,
                            )
                        else:
                            logger.info(
                                f"Ignoring {n} in {path} since more complete "
                                "data already exists, larger outer dimension")
                    else:
                        logger.info(
                            f"Ignoring {n} in {path} since data of a different "
                            "shape already exists. "
                            f"Existing {dset.shape}, new {d.shape}")
                else:
                    dset = storage.create_dataset(n,
                                                  data=d,
                                                  compression="gzip",
                                                  compression_opts=6)

                # Store the origin of the data by the data.
                dset.attrs["__file__"] = f.filename
Пример #13
0
    action="store_true",
    default=False,
    help="plot the initial guess along with the best fit",
)
args = parser.parse_args()

# load the config file
with open(Path(__file__).parent / args.config_path) as f:
    print(f"Using config file `{f.name}`")
    exec(f.read())

Path(OUTDIR).mkdir(parents=True, exist_ok=True)
plt.style.use(["jy_pink", "fullscreen13"])

# load the data
with LabberData(DATAPATH) as f:
    bfield = f.get_data(CHAN_FIELD_PERP) / AMPS_PER_T
    ibias, lockin = f.get_data(CHAN_LOCKIN, get_x=True)
    dvdi = np.abs(lockin)
    temp_meas = f.get_data(CHAN_TEMP_MEAS)

# check for significant temperature deviations
MAX_TEMPERATURE_STD = 1e-3
temp_std = np.std(temp_meas)
if temp_std > MAX_TEMPERATURE_STD:
    warnings.warn(
        f"Temperature standard deviation {temp_std} K > {MAX_TEMPERATURE_STD} K"
    )

# plot the raw data
fig, ax = plot2d(
Пример #14
0
CH_RESIST = "SRS - Value"

# conversion factors
CURR_TO_FIELD = 1e3 / 18.2  # coil current to B-field (in mT)
VOLT_TO_RESIST = 1 / 10e-9  # lock-in voltage to resistance (inverse current)

# constants
PHI0 = cs.h / (2 * cs.e)  # magnetic flux quantum
jj_width = 4e-6
jj_length = 1800e-9  # includes London penetration depths
b2beta = 2 * np.pi * jj_length / PHI0  # B-field to beta factor
fraunhofer_period = 2 * np.pi / (b2beta * jj_width)

resist = []
ic = []
with LabberData(str(DATA_FILE_PATH)) as f:
    channels = f.channel_names

    field = np.unique(f.get_data(CH_MAGNET))[:-10] * CURR_TO_FIELD

    gate = np.unique(f.get_data(CH_GATE))
    for g in gate:
        bias = f.get_data(CH_BIAS, filters={CH_GATE: g})[:-10]
        resist.append(
            np.abs(
                np.real(VOLT_TO_RESIST *
                        f.get_data(CH_RESIST, filters={CH_GATE: g})[:-10])))
        ic.append(extract_switching_current(bias, resist[-1], 5, "positive"))
resist = np.array(resist)
ic = np.array(ic)
# bias sweeps should be the same for all gate values
Пример #15
0
    warnings.warn(
        f"I can't double check that {config['DATAPATH']} is from {config['FRIDGE']}"
    )

# set up plot styles
jy_pink.register()
plt.style.use(["jy_pink", "fullscreen13"])

# set up output directory and filename prefix
OUTDIR = f"{__file__.split('.py')[0].replace('_', '-')}-results/{config['DEVICE']}"
print(f"All output will be saved to `{OUTDIR}`")
Path(OUTDIR).mkdir(parents=True, exist_ok=True)
OUTPATH = Path(OUTDIR) / f"{config['COOLDOWN']}-{config['SCAN']}"

# load the data
with LabberData(INPATH) as f:
    bfield = f.get_data(config["CH_FIELD_PERP"]) / AMPS_PER_T
    ibias, lockin = f.get_data(config["CH_LOCKIN"], get_x=True)
    dvdi = np.abs(lockin)
    temp_meas = f.get_data(config["CH_TEMP_MEAS"])
    f.warn_not_constant(config["CH_TEMP_MEAS"])

# plot the raw data
fig, ax = plot2d(
    *np.broadcast_arrays(bfield[..., np.newaxis] / 1e-3, ibias / 1e-6, dvdi),
    xlabel="x coil field (mT)",
    ylabel="dc bias (μA)",
    zlabel="dV/dI (Ω)",
    title="raw data",
    stamp=config["COOLDOWN"] + "_" + config["SCAN"],
)
Пример #16
0
from shabanipy.jj.utils import extract_switching_current
from shabanipy.labber import LabberData, get_data_dir
from shabanipy.plotting import jy_pink, plot, plot2d

jy_pink.register()
plt.style.use(["fullscreen13", "jy_pink"])

OUTDIR = Path("plots")
OUTDIR.mkdir(exist_ok=True)
print(f"Output will be saved to {str(OUTDIR)}/")

# from vector9 fridge
FILENAME = "JS602-SE1_4xFlQpcSq-v1_N_WFSBHE01-071"
PATH = get_data_dir() / f"2021/10/Data_1030/{FILENAME}.hdf5"
with LabberData(PATH) as f:
    bfield = f.get_data("Vector Magnet - Field X")
    ibias, dc_volts = f.get_data("VITracer - VI curve", get_x=True)
    dc_volts /= 100  # amplifier gain 100x
    iflux = f.get_data("circleFL 6 - Source current")

# plot extracted switching current for a few flux current values
ic = extract_switching_current(ibias, dc_volts, threshold=3.5e-5)
fig, ax = plot(
    np.unique(bfield) / 1e-6,
    ic[::2].T / 1e-6,
    label=[f"{int(i / 1e-6)} μA" for i in np.unique(iflux[::2])],
    xlabel="Vector Magnet Field (μT)",
    ylabel="Current Bias (μA)",
)
fig.savefig(OUTDIR / f"{FILENAME}_Ic.png")