Beispiel #1
0
    def init_streams(self):

        # Since Mux sweeps over channels itself, channel number must be added explicitly as a data axis to each measurement
        self.sheet_res.add_axis(DataAxis("channel", CHAN_LIST))
        self.temp_A.add_axis(DataAxis("channel", CHAN_LIST))
        self.temp_B.add_axis(DataAxis("channel", CHAN_LIST))
        self.sys_time.add_axis(DataAxis("channel", CHAN_LIST))
Beispiel #2
0
    def load_segment_sweeps(experiment):
        # Load the active sweeps from the sweep ordering
        for name in experiment.sweep_settings['sweepOrder']:
            par = experiment.sweep_settings['sweepDict'][name]

            # Treat segment sweeps separately since they are DataAxes rather than SweepAxes
            if par['x__class__'] == 'SegmentNum':
                data_axis = par['meta_info']['axis_descriptor'][0]

                # See if there are multiple partitions, and therefore metadata
                if len(par['meta_info']['axis_descriptor']) > 1:

                    meta_axis = par['meta_info']['axis_descriptor'][1]

                    # There should be metadata for each cal describing what it is
                    metadata = ['data']*len(data_axis['points']) + meta_axis['points']

                    # Pad the data axis with dummy equidistant x-points for the extra calibration points
                    avg_step = (data_axis['points'][-1] - data_axis['points'][0])/(len(data_axis['points'])-1)
                    points = np.append(data_axis['points'], data_axis['points'][-1] + (np.arange(len(meta_axis['points']))+1)*avg_step)

                    # If there's only one segment we can probabluy ignore this axis
                    if len(points) > 1:
                        experiment.segment_axis = DataAxis(data_axis['name'], points, unit=data_axis['unit'], metadata=metadata)

                else:
                    if len(data_axis['points']) > 1:
                        experiment.segment_axis = DataAxis(data_axis['name'], data_axis['points'], unit=data_axis['unit'])
 def init_streams(self):
     # Baked in data axes
     descrip = DataStreamDescriptor()
     descrip.add_axis(DataAxis("sample", range(self.samps_per_trig)))
     descrip.add_axis(DataAxis("state", range(2)))
     descrip.add_axis(DataAxis("attempt", range(self.attempts.value)))
     self.voltage.set_descriptor(descrip)
Beispiel #4
0
def load_from_HDF5_legacy(filename_or_fileobject):
    data = {}
    if isinstance(filename_or_fileobject, h5py.File):
        f = filename_or_fileobject
    else:
        f = h5py.File(filename_or_fileobject, 'r')
    for groupname in f:
        # Reconstruct the descrciptor
        descriptor = DataStreamDescriptor()
        g = f[groupname]
        axis_refs = g['descriptor']
        for ref in reversed(axis_refs):
            ax = g[ref]
            if not 'unit' in ax.attrs:
                # Unstructured

                names = [k for k in ax.dtype.fields.keys()]
                units = [ax.attrs['unit_'+name] for name in names]
                points = ax[:]
                points = points.view(np.float32).reshape(points.shape + (-1,))
                descriptor.add_axis(DataAxis(names, points=points, unit=units))
            else:
                # Structured
                name = ax.attrs['NAME'].decode('UTF-8')
                unit = ax.attrs['unit']
                points = ax[:]
                descriptor.add_axis(DataAxis(name, points=points, unit=unit))

        for attr_name in axis_refs.attrs.keys():
            descriptor.metadata[attr_name] = axis_refs.attrs[attr_name]

        data[groupname] = g['data'][:]

    f.close()
    return data, descriptor
Beispiel #5
0
    def get_descriptor(self, source_instr_settings, channel_settings):
        # Create a channel
        channel = X6Channel(channel_settings)

        descrip = DataStreamDescriptor()
        # If it's an integrated stream, then the time axis has already been eliminated.
        # Otherwise, add the time axis.
        if channel_settings['stream_type'] == 'Raw':
            samp_time = 4.0e-9
            descrip.add_axis(
                DataAxis(
                    "time",
                    samp_time *
                    np.arange(source_instr_settings['record_length'] // 4)))
            descrip.dtype = np.float64
        elif channel_settings['stream_type'] == 'Demodulated':
            samp_time = 32.0e-9
            descrip.add_axis(
                DataAxis(
                    "time",
                    samp_time *
                    np.arange(source_instr_settings['record_length'] // 32)))
            descrip.dtype = np.complex128
        elif channel_settings['stream_type'] == 'Integrated':
            descrip.dtype = np.complex128
        elif channel_settings[
                'stream_type'] == 'Correlated':  # Same as integrated
            descrip.dtype = np.complex128
        elif channel_settings['stream_type'] == 'State':
            descrip.dtype = np.complex128
        return channel, descrip
Beispiel #6
0
 def init_streams(self):
     descrip = DataStreamDescriptor()
     descrip.data_name = 'voltage'
     descrip.add_axis(DataAxis("sample", range(self.samps_per_trig)))
     descrip.add_axis(DataAxis("state", range(2)))
     descrip.add_axis(DataAxis("attempt", range(self.attempts)))
     self.voltage.set_descriptor(descrip)
    def init_streams_custom(self):

        # Since Scope "sweeps" time on its own, time base must be added explicitly as a data axis for each wf measurement
        # Time base is manually adjusted by experimenter
        TIMEBASE = np.linspace(0, self.scope.record_duration,
                               self.scope.record_length)
        self.demod_wf.add_axis(DataAxis("timebase", TIMEBASE))
        self.raw_wf.add_axis(DataAxis("timebase", TIMEBASE))
Beispiel #8
0
 def init_streams(self):
     # Baked in data axes
     descrip = DataStreamDescriptor()
     descrip.data_name = 'voltage'
     descrip.add_axis(DataAxis("sample", range(self.samps_per_trig)))
     descrip.add_axis(DataAxis("amplitude", self.amplitudes))
     descrip.add_axis(DataAxis("repeat", range(self.repeats)))
     self.voltage.set_descriptor(descrip)
Beispiel #9
0
 def init_streams(self):
     descrip = DataStreamDescriptor()
     descrip.add_axis(
         DataAxis("samples", 2e-9 * np.arange(self.num_samples)))
     descrip.add_axis(DataAxis("delay", self.delays))
     descrip.add_axis(DataAxis("round_robins",
                               np.arange(self.round_robins)))
     self.voltage.set_descriptor(descrip)
Beispiel #10
0
 def init_streams(self):
     # Baked in data axes
     descrip = DataStreamDescriptor()
     descrip.data_name = 'voltage'
     descrip.add_axis(
         DataAxis("sample",
                  range(int(self.integration_time * self.ai_clock))))
     descrip.add_axis(DataAxis("attempt", range(self.attempts)))
     self.voltage.set_descriptor(descrip)
Beispiel #11
0
    def init_streams(self):
        # Baked in data axes
        descrip = DataStreamDescriptor()
        descrip.add_axis(DataAxis("time", 1e-9 * np.arange(self.samples)))
        if len(self.gate_durs) > 1:
            descrip.add_axis(DataAxis("gate_pulse_duration", self.gate_durs))
        descrip.add_axis(DataAxis("gate_pulse_amplitude", self.gate_amps))
        descrip.add_axis(DataAxis("attempt", range(self.attempts)))

        self.voltage.set_descriptor(descrip)
Beispiel #12
0
    def init_streams(self):
        descrip = DataStreamDescriptor()
        descrip.data_name='current_input'
        descrip.add_axis(DataAxis("time", np.arange(int(self.sample_rate*self.num_bursts/self.frequency))/self.sample_rate))
        self.current_input.set_descriptor(descrip)

        descrip = DataStreamDescriptor()
        descrip.data_name='voltage_sample'
        descrip.add_axis(DataAxis("time", np.arange(int(self.sample_rate*self.num_bursts/self.frequency))/self.sample_rate))
        self.voltage_sample.set_descriptor(descrip)
Beispiel #13
0
def load_from_HDF5(filename_or_fileobject,
                   reshape=True,
                   return_structured_array=True):
    data = {}
    descriptors = {}
    if isinstance(filename_or_fileobject, h5py.File):
        f = filename_or_fileobject
    else:
        f = h5py.File(filename_or_fileobject, 'r')
    for groupname in f:
        # Reconstruct the descrciptor
        descriptor = DataStreamDescriptor()
        g = f[groupname]
        axis_refs = g['descriptor']
        for ref in reversed(axis_refs):
            ax = g[ref]
            if ax.attrs['unstructured']:
                # The entry for the main unstructured axis contains
                # references to the constituent axes.

                # The DataAxis expects points as tuples coordinates
                # in the form [(x1, y1), (x2, y2), ...].
                points = np.vstack([g[e] for e in ax[:]]).T
                names = [g[e].attrs["name"] for e in ax[:]]
                units = [g[e].attrs["unit"] for e in ax[:]]
                descriptor.add_axis(DataAxis(names, points=points, unit=units))
            else:
                name = ax.attrs['name']
                unit = ax.attrs['unit']
                points = ax[:]
                descriptor.add_axis(DataAxis(name, points=points, unit=unit))

        for attr_name in axis_refs.attrs.keys():
            descriptor.metadata[attr_name] = axis_refs.attrs[attr_name]

        col_names = list(g['data'].keys())
        if return_structured_array:
            dtype = [(g['data'][n].attrs['name'], g['data'][n].dtype.char)
                     for n in col_names]
            length = g['data'][col_names[0]].shape[0]
            group_data = np.empty((length, ), dtype=dtype)
            for cn in col_names:
                group_data[cn] = g['data'][cn]
        else:
            group_data = {n: g['data'][n][:] for n in col_names}

        if reshape:
            group_data = group_data.reshape(descriptor.dims())

        data[groupname] = group_data
        descriptors[groupname] = descriptor
    if not isinstance(filename_or_fileobject, h5py.File):
        f.close()
    return data, descriptors
Beispiel #14
0
 def test_copy_descriptor(self):
     dsd = DataStreamDescriptor()
     dsd.add_axis(DataAxis("One", [1, 2, 3, 4]))
     dsd.add_axis(DataAxis("Two", [1, 2, 3, 4, 5]))
     self.assertTrue(len(dsd.axes) == 2)
     self.assertTrue("One" in [a.name for a in dsd.axes])
     dsdc = copy(dsd)
     self.assertTrue(dsd.axes == dsdc.axes)
     ax = dsdc.pop_axis("One")
     self.assertTrue(ax.name == "One")
     self.assertTrue(len(dsdc.axes) == 1)
     self.assertTrue(dsdc.axes[0].name == "Two")
Beispiel #15
0
    def init_streams(self):
        descrip = DataStreamDescriptor()
        descrip.data_name = 'voltage_input'
        descrip.add_axis(DataAxis("index", np.arange(self.num_points + 2)))
        descrip.add_axis(DataAxis("repeat", np.arange(self.repeat)))
        self.voltage_input.set_descriptor(descrip)

        descrip = DataStreamDescriptor()
        descrip.data_name = 'voltage_sample'
        descrip.add_axis(DataAxis("index", np.arange(self.num_points + 2)))
        descrip.add_axis(DataAxis("repeat", np.arange(self.repeat)))
        self.voltage_sample.set_descriptor(descrip)
Beispiel #16
0
    def update_descriptors(self):

        logger.debug("Updating Plotter %s descriptors based on input descriptor %s", self.filter_name, self.sink.descriptor)
        self.stream = self.sink.input_streams[0]
        self.descriptor = self.sink.descriptor
        try:
            self.time_pts = self.descriptor.axes[self.descriptor.axis_num("time")].points
            self.record_length = len(self.time_pts)
        except ValueError:
            raise ValueError("Single shot filter sink does not appear to have a time axis!")
        self.num_averages = len(self.sink.descriptor.axes[self.descriptor.axis_num("averages")].points)
        self.num_segments = len(self.sink.descriptor.axes[self.descriptor.axis_num("segment")].points)
        self.ground_data = np.zeros((self.record_length, self.num_averages), dtype=np.complex)
        self.excited_data = np.zeros((self.record_length, self.num_averages), dtype=np.complex)
        self.total_points = self.num_segments*self.record_length*self.num_averages # Total points BEFORE sweep axes

        output_descriptor = DataStreamDescriptor()
        output_descriptor.axes = [_ for _ in self.descriptor.axes if type(_) is SweepAxis]
        output_descriptor._exp_src = self.sink.descriptor._exp_src
        output_descriptor.dtype = np.complex128

        if len(output_descriptor.axes) == 0:
            output_descriptor.add_axis(DataAxis("Fidelity", [1]))

        for os in self.fidelity.output_streams:
            os.set_descriptor(output_descriptor)
            os.end_connector.update_descriptors()
Beispiel #17
0
    def get_descriptor(self, source_instr_settings, channel_settings):
        channel = AlazarChannel(channel_settings)

        # Add the time axis
        samp_time = 1.0/source_instr_settings['sampling_rate']
        descrip = DataStreamDescriptor()
        descrip.add_axis(DataAxis("time", samp_time*np.arange(source_instr_settings['record_length'])))
        return channel, descrip
Beispiel #18
0
 def init_streams(self):
     # Add a "base" data axis: say we are averaging 5 samples per trigger
     descrip = DataStreamDescriptor()
     descrip.data_name = 'voltage'
     if self.is_complex:
         descrip.dtype = np.complex128
     descrip.add_axis(DataAxis("samples", list(range(self.samples))))
     self.voltage.set_descriptor(descrip)
Beispiel #19
0
 def get_descriptor(self, stream_selector, receiver_channel):
     """Get the axis descriptor corresponding to this stream selector. For the Alazar cards this
     is always just a time axis."""
     samp_time = 1.0 / receiver_channel.receiver.sampling_rate
     descrip = DataStreamDescriptor()
     descrip.add_axis(
         DataAxis(
             "time",
             samp_time *
             np.arange(receiver_channel.receiver.record_length)))
     return descrip
Beispiel #20
0
 def get_descriptor(self, stream_selector, receiver_channel):
     """Get the axis descriptor corresponding to this stream selector. If it's an integrated stream,
     then the time axis has already been eliminated. Otherswise, add the time axis."""
     descrip = DataStreamDescriptor()
     if stream_selector.stream_type == 'raw':
         samp_time = 4.0e-9
         descrip.add_axis(
             DataAxis(
                 "time",
                 samp_time *
                 np.arange(receiver_channel.receiver.record_length // 4)))
         descrip.dtype = np.float64
     elif stream_selector.stream_type == 'demodulated':
         samp_time = 32.0e-9
         descrip.add_axis(
             DataAxis(
                 "time",
                 samp_time *
                 np.arange(receiver_channel.receiver.record_length // 32)))
         descrip.dtype = np.complex128
     else:  # Integrated
         descrip.dtype = np.complex128
     return descrip
Beispiel #21
0
 def init_streams(self):
     # Add "base" data axes
     self.chan1.add_axis(DataAxis("samples", list(range(self.samples))))
     self.chan2.add_axis(DataAxis("trials", list(range(self.num_trials))))
Beispiel #22
0
 def init_streams(self):
     self.chan1.add_axis(DataAxis("samples", list(range(self.samples))))
     self.chan1.add_axis(DataAxis("trials", list(range(self.trials))))
     self.chan1.add_axis(DataAxis("repeats", list(range(self.repeats))))
Beispiel #23
0
    def load_meta_info(experiment, meta_file):
        """If we get a meta_file, modify the configurations accordingly. Enable only instruments
        on the graph that connect the relevant *ReceiverChannel* objects to *Writer* or *Plotter*
        objects."""

        calibration = experiment.calibration
        save_data = experiment.save_data

        # Create a mapping from qubits to data writers and inverse
        qubit_to_writer = {}
        writer_to_qubit = {}
        qubit_to_stream_sel = {}
        stream_sel_to_qubit = {}

        # shortcuts
        instruments = experiment.settings['instruments']
        filters = experiment.settings['filters']
        qubits = experiment.settings['qubits']
        if 'sweeps' in experiment.settings:
            sweeps = experiment.settings['sweeps']

        # Use the meta info to modify the parameters
        # loaded from the human-friendly yaml configuration.
        with open(meta_file, 'r') as FID:
            meta_info = json.load(FID)

        # Construct a graph of all instruments in order to properly enabled those
        # associated with the meta_file. We only need to use string representations
        # here, not actual filter and instrument objects.

        # Strip any spaces, since we only care about the general flow, and not any
        # named connectors.
        def strip_conn_name(text):
            val_list = []
            # multiple sourcs are separated by commas
            all_vals = text.strip().split(',')
            for vals in all_vals:
                val = vals.strip().split()
                if len(val) == 0:
                    raise ValueError(
                        "Please disable filters with missing source.")
                elif len(val) > 2:
                    raise ValueError(
                        "Spaces are reserved to separate filters and connectors. Please rename {}."
                        .format(vals))
                val_list.append(val[0])
            return val_list

        # Graph edges for the measurement filters
        # switch stream selector to raw (by default) before building the graph
        if experiment.__class__.__name__ == "SingleShotFidelityExperiment":
            receivers = [s for s in meta_info['receivers'].items()]
            if len(receivers) > 1:
                raise NotImplementedError(
                    "Single shot fidelity for more than one qubit is not yet implemented."
                )
            stream_sel_name_orig = receivers[0][0].replace('RecvChan-', '')

            stream_selectors = [
                k for k, v in filters.items()
                if "AlazarStreamSelector" in v["type"]
                and v["source"] == filters[stream_sel_name_orig]['source']
            ]
            stream_selectors += [k for k,v in filters.items() if v["type"] == 'X6StreamSelector' and v["source"] == filters[stream_sel_name_orig]['source']\
             and v['channel'] == filters[stream_sel_name_orig]['channel'] and np.mod(v["dsp_channel"]-1,5)+1 == np.mod(filters[stream_sel_name_orig]["dsp_channel"]-1,5)+1]

            for s in stream_selectors:
                if filters[s]['stream_type'] == experiment.ss_stream_type:
                    filters[s]['enabled'] = True
                    stream_sel_name = s
                else:
                    filters[s]['enabled'] = False
        edges = [[(s, k) for s in strip_conn_name(v["source"])]
                 for k, v in filters.items()
                 if ("enabled" not in v.keys()) or v["enabled"]]
        edges = [e for edge in edges for e in edge]
        dag = nx.DiGraph()
        dag.add_edges_from(edges)

        inst_to_enable = []
        filt_to_enable = set()

        # Find any writer endpoints of the receiver channels
        for receiver_name, num_segments in meta_info['receivers'].items():
            # Receiver channel name format: RecvChan-StreamSelectorName
            if not experiment.__class__.__name__ == "SingleShotFidelityExperiment":
                stream_sel_name = receiver_name.replace('RecvChan-', '')
                stream_sel_name_orig = stream_sel_name
            dig_name = filters[stream_sel_name]['source']
            chan_name = filters[stream_sel_name]['channel']

            if experiment.repeats is not None:
                num_segments *= experiment.repeats

            # Set the correct number of segments for the digitizer
            instruments[dig_name]['nbr_segments'] = num_segments

            # Enable the digitizer
            inst_to_enable.append(dig_name)

            # Find the enabled X6 stream selectors with the same channel as the receiver. Allow to plot/save raw/demod/int streams belonging to the same receiver
            if calibration:
                stream_selectors = []
            else:
                stream_selectors = [
                    k for k, v in filters.items()
                    if ("AlazarStreamSelector" in v["type"]) and (
                        v["source"] == filters[stream_sel_name_orig]['source'])
                ]
                stream_selectors += [
                    k for k, v in filters.items()
                    if (v["type"] == 'X6StreamSelector'
                        and v["source"] == filters[stream_sel_name]['source']
                        and v["enabled"] == True
                        and v["channel"] == filters[stream_sel_name]["channel"]
                        and (np.mod(v["dsp_channel"] - 1, 5) + 1 == np.mod(
                            filters[stream_sel_name_orig]["dsp_channel"] -
                            1, 5) + 1 or v["dsp_channel"] > 5))
                ]
            # Enable the tree for single-shot fidelity experiment. Change stream_sel_name to raw (by default)
            writers = []
            plotters = []
            singleshot = []
            buffers = []

            def check_endpoint(endpoint_name, endpoint_type):
                source_type = filters[filters[endpoint_name]['source'].split(
                    ' ')[0]]['type']
                return filters[endpoint_name]['type'] == endpoint_type and (
                    not hasattr(filters[endpoint_name], 'enabled')
                    or filters[endpoint_name]['enabled']
                ) and not (calibration and source_type == 'Correlator') and (
                    not source_type == 'SingleShotMeasurement'
                    or experiment.__class__.__name__
                    == 'SingleShotFidelityExperiment')

            for filt_name, filt in filters.items():
                if filt['enabled'] == False:
                    continue
                if filt_name in [stream_sel_name] + stream_selectors:
                    # Find descendants of the channel selector
                    chan_descendants = nx.descendants(dag, filt_name)
                    # Find endpoints within the descendants
                    endpoints = [
                        n for n in chan_descendants
                        if dag.in_degree(n) == 1 and dag.out_degree(n) == 0
                    ]
                    # Find endpoints which are enabled writers, plotters or singleshot filters without an output. Disable outputs of single-shot filters when not used.
                    writers += [
                        e for e in endpoints
                        if check_endpoint(e, "WriteToHDF5")
                    ]
                    plotters += [
                        e for e in endpoints if check_endpoint(e, "Plotter")
                    ]
                    buffers += [
                        e for e in endpoints
                        if check_endpoint(e, "DataBuffer")
                    ]
                    singleshot += [
                        e for e in endpoints
                        if check_endpoint(e, "SingleShotMeasurement")
                        and experiment.__class__.__name__ ==
                        "SingleShotFidelityExperiment"
                    ]
            filt_to_enable.update(set().union(writers, plotters, singleshot,
                                              buffers, stream_selectors))
            if calibration:
                # For calibrations the user should only have one writer enabled, otherwise we will be confused.
                if len(writers) > 1:
                    raise Exception(
                        "More than one viable data writer was found for a receiver channel {}. Please enable only one!"
                        .format(receiver_name))
                if len(writers) == 0 and len(plotters) == 0 and len(
                        singleshot) == 0 and len(buffers) == 0:
                    raise Exception(
                        "No viable data writer, plotter or single-shot filter was found for receiver channel {}. Please enable one!"
                        .format(receiver_name))

            if writers and not save_data:
                # If we are calibrating we don't care about storing data, use buffers instead
                buffers = []
                for w in writers:
                    source_filt = filters[w]["source"].split(" ")[0]
                    if filters[source_filt]["type"] == "Averager":
                        sources = ", ".join([
                            source_filt + " final_average",
                            source_filt + " final_variance"
                        ])
                    else:
                        sources = filters[w]["source"]
                    buff = {
                        "source": sources,
                        "enabled": True,
                        "type": "DataBuffer",
                    }
                    # Remove the writer
                    filters.pop(w)
                    # Substitute the buffer
                    filters[w] = buff
                    # Store buffer name for local use
                    buffers.append(w)
                writers = buffers

            # For now we assume a single qubit, not a big change for multiple qubits
            qubit_name = next(
                k for k, v in qubits.items()
                if v["measure"]["receiver"] in (stream_sel_name,
                                                stream_sel_name_orig))
            if calibration:
                if len(writers) == 1:
                    qubit_to_writer[qubit_name] = writers[0]
            else:
                qubit_to_writer[qubit_name] = writers

            writer_ancestors = []
            plotter_ancestors = []
            singleshot_ancestors = []
            buffer_ancestors = []
            # Trace back our ancestors, using plotters if no writers are available
            if writers:
                writer_ancestors = set().union(
                    *[nx.ancestors(dag, wr) for wr in writers])
            if plotters:
                plotter_ancestors = set().union(
                    *[nx.ancestors(dag, pl) for pl in plotters])
            if singleshot:
                singleshot_ancestors = set().union(
                    *[nx.ancestors(dag, ss) for ss in singleshot])
            if buffers:
                buffer_ancestors = set().union(
                    *[nx.ancestors(dag, bf) for bf in buffers])
            filt_to_enable.update(set().union(writer_ancestors,
                                              plotter_ancestors,
                                              singleshot_ancestors,
                                              buffer_ancestors))
            # remove all the digitizers, which are already taken care of
            filt_to_enable.difference_update(
                [f for f in filt_to_enable if dag.in_degree()[f] == 0])

        if calibration:
            # One to one writers to qubits
            writer_to_qubit = {v: [k] for k, v in qubit_to_writer.items()}
        else:
            # Many to one writers to qubits or viceversa
            writer_to_qubit = {}
            for q, ws in qubit_to_writer.items():
                for w in ws:
                    if w not in writer_to_qubit:
                        writer_to_qubit[w] = []
                    writer_to_qubit[w].append(q)
        # Disable digitizers and APSs and then build ourself back up with the relevant nodes
        for instr_name in instruments.keys():
            if 'tx_channels' in instruments[instr_name].keys(
            ) or 'rx_channels' in instruments[instr_name].keys():
                instruments[instr_name]['enabled'] = False
        for instr_name in inst_to_enable:
            instruments[instr_name]['enabled'] = True

        for meas_name in filters.keys():
            filters[meas_name]['enabled'] = False
        for meas_name in filt_to_enable:
            filters[meas_name]['enabled'] = True

        #label measurement with qubit name (assuming the convention "M-"+qubit_name)
        for meas_name in filt_to_enable:
            if filters[meas_name]["type"] == "WriteToHDF5":
                filters[meas_name]['groupname'] = ''.join(writer_to_qubit[meas_name]) \
                    + "-" + filters[meas_name]['groupname']

        for instr_name, chan_data in meta_info['instruments'].items():
            instruments[instr_name]['enabled'] = True
            if isinstance(chan_data, str):
                instruments[instr_name][
                    'seq_file'] = chan_data  # Per-instrument seq file
            elif isinstance(chan_data, dict):
                for chan_name, seq_file in chan_data.items():
                    if "tx_channels" in instruments[
                            instr_name] and chan_name in instruments[
                                instr_name]["tx_channels"].keys():
                        instruments[instr_name]["tx_channels"][chan_name][
                            'seq_file'] = seq_file
                    elif "rx_channels" in instruments[
                            instr_name] and chan_name in instruments[
                                instr_name]["rx_channels"].keys():
                        instruments[instr_name]["rx_channels"][chan_name][
                            'seq_file'] = seq_file
                    else:
                        raise ValueError(
                            "Could not find channel {} in of instrument {}.".
                            format(chan_name, instr_name))

        # Now we will construct the DataAxis from the meta_info
        desc = meta_info["axis_descriptor"]
        data_axis = desc[0]  # Data will always be the first axis

        if experiment.repeats is not None:
            #ovverride data axis with repeated number of segments
            data_axis['points'] = np.tile(data_axis['points'],
                                          experiment.repeats)

        # Search for calibration axis, i.e., metadata
        axis_names = [d['name'] for d in desc]
        if 'calibration' in axis_names:
            meta_axis = desc[axis_names.index('calibration')]
            # There should be metadata for each cal describing what it is
            if len(desc) > 1:
                metadata = ['data'] * len(
                    data_axis['points']) + meta_axis['points']
                # Pad the data axis with dummy equidistant x-points for the extra calibration points
                avg_step = (data_axis['points'][-1] - data_axis['points'][0]
                            ) / (len(data_axis['points']) - 1)
                points = np.append(
                    data_axis['points'], data_axis['points'][-1] +
                    (np.arange(len(meta_axis['points'])) + 1) * avg_step)
            else:
                metadata = meta_axis[
                    'points']  # data may consist of calibration points only
                points = np.arange(
                    len(metadata))  # dummy axis for plotting purposes
            # If there's only one segment we can ignore this axis
            if len(points) > 1:
                experiment.segment_axis = DataAxis(data_axis['name'],
                                                   points,
                                                   unit=data_axis['unit'],
                                                   metadata=metadata)

        else:
            if len(data_axis['points']) > 1:
                experiment.segment_axis = DataAxis(data_axis['name'],
                                                   data_axis['points'],
                                                   unit=data_axis['unit'])

        experiment.qubit_to_writer = qubit_to_writer
        experiment.writer_to_qubit = writer_to_qubit
Beispiel #24
0
    def create_from_meta(self, meta_file, averages):
        """Method called during creation. Implementing a subclass of `QubitExperiment` this method
        may be overridden to provide additional functionality. However, this is a complex method, and
        it is recommended that the user instead override the `modify_graph` method to provide
        custom subclass behavior.
        """
        try:
            with open(meta_file, 'r') as FID:
                meta_info = json.load(FID)
        except:
            raise Exception(
                f"Could note process meta info from file {meta_file}")

        # Load ChannelLibrary and database information
        db_provider = meta_info['database_info']['db_provider']
        db_resource_name = meta_info['database_info']['db_resource_name']
        library_name = meta_info['database_info']['library_name']
        library_id = meta_info['database_info']['library_id']

        # Respect separate sessions for channel library and pipeline
        self.cl_session = bbndb.get_cl_session()
        self.pl_session = bbndb.get_pl_session()

        # Load the channel library by ID
        self.chan_db = self.cl_session.query(
            bbndb.qgl.ChannelDatabase).filter_by(id=library_id).first()
        all_channels = self.chan_db.channels
        all_generators = self.chan_db.generators
        all_transmitters = self.chan_db.transmitters
        all_receivers = self.chan_db.receivers
        all_transceivers = self.chan_db.transceivers
        all_qubits = [
            c for c in all_channels if isinstance(c, bbndb.qgl.Qubit)
        ]
        all_measurements = [
            c for c in all_channels if isinstance(c, bbndb.qgl.Measurement)
        ]
        # Restrict to current qubits, channels, etc. involved in this actual experiment
        self.controlled_qubits = [
            c for c in self.chan_db.channels if c.label in meta_info["qubits"]
        ]
        self.measurements = [
            c for c in self.chan_db.channels
            if c.label in meta_info["measurements"]
        ]
        self.measured_qubits = [
            c for c in self.chan_db.channels
            if "M-" + c.label in meta_info["measurements"]
        ]
        if 'edges' in meta_info:
            self.edges = [
                c for c in self.chan_db.channels
                if c.label in meta_info["edges"]
            ]
        else:
            self.edges = []
        self.phys_chans = list(
            set([
                e.phys_chan for e in self.controlled_qubits +
                self.measurements + self.edges
            ]))
        self.receiver_chans = list(
            set([e.receiver_chan for e in self.measurements]))
        self.slave_trigs = [
            c for c in self.chan_db.channels if c.label == 'slave_trig'
        ]
        self.trig_chans = list(
            set([e.trig_chan.phys_chan for e in self.measurements
                 ])) + [c.phys_chan for c in self.slave_trigs]
        self.transmitters = list(
            set([
                e.phys_chan.transmitter for e in self.controlled_qubits +
                self.measurements + self.edges + self.slave_trigs
            ]))
        self.receivers = list(
            set([e.receiver_chan.receiver for e in self.measurements]))
        self.generators = list(
            set([
                q.phys_chan.generator for q in self.measured_qubits +
                self.controlled_qubits + self.measurements
                if q.phys_chan.generator
            ]))
        self.qubits_by_name = {
            q.label: q
            for q in self.measured_qubits + self.controlled_qubits
        }

        # Load the relevant stream selectors from the pipeline.
        self.stream_selectors = pipeline.pipelineMgr.get_current_stream_selectors(
        )
        if len(self.stream_selectors) == 0:
            raise Exception(
                "No filter pipeline has been created. You can try running the create_default_pipeline() method of the Pipeline Manager"
            )
        org_stream_selectors = self.stream_selectors
        for ss in org_stream_selectors:
            labels = ss.label.split('-')
            for l in labels:
                if l in self.qubits_by_name.keys(
                ) and ss not in self.stream_selectors:
                    self.stream_selectors.append(ss)
                    continue

        # Locate transmitters relying on processors
        self.transceivers = list(
            set([
                t.transceiver for t in self.transmitters + self.receivers
                if t.transceiver
            ]))
        self.processors = list(
            set([p for t in self.transceivers for p in t.processors]))

        # Determine if the digitizer trigger lives on another transmitter that isn't included already
        self.transmitters = list(
            set([
                mq.measure_chan.trig_chan.phys_chan.transmitter
                for mq in self.measured_qubits
            ] + self.transmitters))

        # The exception being any instruments that are declared as standalone
        self.all_standalone = [
            i for i in self.chan_db.all_instruments()
            if i.standalone and i not in self.transmitters + self.receivers +
            self.generators
        ]

        # In case we need to access more detailed foundational information
        self.factory = self

        # If no pipeline is defined, assumed we want to generate it automatically
        if not pipeline.pipelineMgr.meas_graph:
            raise Exception(
                "No pipeline has been created, do so automatically using exp_factory.create_default_pipeline()"
            )
            #self.create_default_pipeline(self.measured_qubits)

        # Add the waveform file info to the qubits
        output_chans = self.transmitters + self.transceivers + self.phys_chans + self.trig_chans
        for xmit, fname in meta_info['instruments'].items():
            awg = [c for c in output_chans if c.label == xmit][0]
            awg.sequence_file = fname

        # Construct the DataAxis from the meta_info
        desc = meta_info["axis_descriptor"]
        data_axis = desc[0]  # Data will always be the first axis

        # ovverride data axis with repeated number of segments
        if hasattr(self, "repeats") and self.repeats is not None:
            data_axis['points'] = np.tile(data_axis['points'], self.repeats)

        # Search for calibration axis, i.e., metadata
        axis_names = [d['name'] for d in desc]
        if 'calibration' in axis_names:
            meta_axis = desc[axis_names.index('calibration')]
            # There should be metadata for each cal describing what it is
            if len(desc) > 1:
                metadata = ['data'] * len(
                    data_axis['points']) + meta_axis['points']
                # Pad the data axis with dummy equidistant x-points for the extra calibration points
                avg_step = (data_axis['points'][-1] - data_axis['points'][0]
                            ) / (len(data_axis['points']) - 1)
                points = np.append(
                    data_axis['points'], data_axis['points'][-1] +
                    (np.arange(len(meta_axis['points'])) + 1) * avg_step)
            else:
                metadata = meta_axis[
                    'points']  # data may consist of calibration points only
                points = np.arange(
                    len(metadata))  # dummy axis for plotting purposes
            # If there's only one segment we can ignore this axis
            if len(points) > 1:
                self.segment_axis = DataAxis(data_axis['name'],
                                             points,
                                             unit=data_axis['unit'],
                                             metadata=metadata)
        else:
            # No calibration data, just add a segment axis as long as there is more than one segment
            if len(data_axis['points']) > 1:
                self.segment_axis = DataAxis(data_axis['name'],
                                             data_axis['points'],
                                             unit=data_axis['unit'])

        # Build a mapping of qubits to self.receivers, construct qubit proxies
        # We map by the unique database ID since that is much safer
        receiver_chans_by_qubit_label = {}
        for m in self.measurements:
            q = [c for c in self.chan_db.channels if c.label == m.label[2:]][0]
            receiver_chans_by_qubit_label[q.label] = m.receiver_chan

        # Now a pipeline exists, so we create Auspex filters from the proxy filters in the db
        self.proxy_to_filter = {}
        self.filters = []
        self.connector_by_sel = {}
        self.chan_to_dig = {}
        self.chan_to_oc = {}
        self.qubit_to_dig = {}
        self.qubits_by_output = {}
        self.proxy_name_to_instrument = {}

        # Create microwave sources and receiver instruments from the database objects.
        # We configure the self.receivers later after adding channels.
        self.instrument_proxies = self.generators + self.receivers + self.transmitters + self.transceivers + self.all_standalone + self.processors
        for t in self.transceivers:
            if t.initialize_separately:
                self.instrument_proxies.remove(t)
            else:
                for el in t.transmitters + t.receivers:
                    self.instrument_proxies.remove(el)

        self.instruments = []
        for instrument in self.instrument_proxies:
            if (hasattr(instrument, 'serial_port')
                    and instrument.serial_port is not None
                    and hasattr(instrument, 'dac')
                    and instrument.dac is not None):
                address = (instrument.address, instrument.serial_port,
                           instrument.dac)
            else:
                address = instrument.address
            instr = instrument_map[instrument.model](
                address, instrument.label)  # Instantiate
            # For easy lookup
            instr.proxy_obj = instrument

            instrument._locked = False
            instrument.instr = instr  # This shouldn't be relied upon
            instrument._locked = True

            self.proxy_name_to_instrument[instrument.label] = instr

            # Add to the experiment's instrument list
            self._instruments[instrument.label] = instr
            self.instruments.append(instr)
            # Add to class dictionary for convenience
            if not hasattr(self, instrument.label):
                setattr(self, instrument.label, instr)

        mq_all_stream_sels = []
        for mq in self.measured_qubits:

            # Stream selectors from the pipeline database:
            # These contain all information except for the physical channel
            mq_stream_sels = [
                ss for ss in self.stream_selectors
                if mq.label in ss.label.split("-")
                and ss not in mq_all_stream_sels
            ]
            mq_all_stream_sels.append(mq_stream_sels)

            # The receiver channel only specifies the physical channel
            rcv = receiver_chans_by_qubit_label[mq.label]

            # Create the auspex stream selectors
            transcvr = rcv.receiver.transceiver
            if transcvr is not None and transcvr.initialize_separately == False:
                dig = rcv.receiver.transceiver
                stream_sel_class = stream_sel_map[rcv.receiver.stream_sel]
            else:
                dig = rcv.receiver
                stream_sel_class = stream_sel_map[dig.stream_sel]

            for mq_stream_sel in mq_stream_sels:
                auspex_stream_sel = stream_sel_class(
                    name=f"{rcv.label}-{mq_stream_sel.stream_type}-stream_sel")
                mq_stream_sel.channel = rcv.channel
                auspex_stream_sel.configure_with_proxy(mq_stream_sel)
                auspex_stream_sel.receiver = auspex_stream_sel.proxy = mq_stream_sel

                # Construct the channel from the receiver channel
                channel = auspex_stream_sel.get_channel(mq_stream_sel)
                # Manually set the physical channel
                channel.phys_channel = rcv.channel

                # Get the base descriptor from the channel
                descriptor = auspex_stream_sel.get_descriptor(
                    mq_stream_sel, rcv)

                # Update the descriptor based on the number of segments
                # The segment axis should already be defined if the sequence
                # is greater than length 1
                if hasattr(self, "segment_axis"):
                    descriptor.add_axis(self.segment_axis)

                # Add averaging if necessary
                if averages > 1:
                    descriptor.add_axis(DataAxis("averages", range(averages)))

                # Add the output connectors to the experiment and set their base descriptor
                self.connector_by_sel[mq_stream_sel] = self.add_connector(
                    mq_stream_sel)
                self.connector_by_sel[mq_stream_sel].set_descriptor(descriptor)

                # Add the channel to the instrument
                dig.instr.add_channel(channel)
                self.chan_to_dig[channel] = dig.instr
                self.chan_to_oc[channel] = self.connector_by_sel[mq_stream_sel]
                self.qubit_to_dig[mq.id] = dig

        # Find the number of self.measurements
        segments_per_dig = {
            receiver_chan.receiver: meta_info["receivers"][receiver_chan.label]
            for receiver_chan in self.receiver_chans
            if receiver_chan.label in meta_info["receivers"].keys()
        }

        # Configure receiver instruments from the database objects
        # this must be done after adding channels.
        for dig in self.receivers:
            if dig.transceiver is not None and transcvr.initialize_separately == False:
                dig.transceiver.number_averages = averages
                dig.transceiver.number_waveforms = 1
                dig.transceiver.number_segments = segments_per_dig[dig]
            else:
                dig.number_averages = averages
                dig.number_waveforms = 1
                dig.number_segments = segments_per_dig[dig]
                dig.instr.proxy_obj = dig

        # Restrict the graph to the relevant qubits
        self.measured_qubit_names = [q.label for q in self.measured_qubits]
        self.pl_session.commit()

        # Any modifications to be done by subclasses, just a passthrough here
        self.modified_graph = self.modify_graph(
            pipeline.pipelineMgr.meas_graph)

        # Compartmentalize the instantiation
        self.instantiate_filters(self.modified_graph)
Beispiel #25
0
    def load_filters(experiment):
        # These store any filters we create as well as their connections
        filters = {}
        graph   = []

        # ============================================
        # Find all of the filter modules by inspection
        # ============================================

        modules = (
            importlib.import_module('auspex.filters.' + name)
            for loader, name, is_pkg in pkgutil.iter_modules(auspex.filters.__path__)
        )

        module_map = {}
        for mod in modules:
            filts = (_ for _ in inspect.getmembers(mod) if inspect.isclass(_[1]) and
                                                            issubclass(_[1], Filter) and
                                                            _[1] != Filter)
            module_map.update(dict(filts))

        # ==================================================
        # Find out which output connectors we need to create
        # ==================================================

        # Get the enabled measurements
        enabled_meas = {k: v for k, v in experiment.measurement_settings['filterDict'].items() if v['enabled']}

        # First look for digitizer streams (Alazar or X6)
        dig_settings    = {k: v for k, v in enabled_meas.items() if "StreamSelector" in v['x__class__']}

        # These stream selectors are really just a convenience
        # Remove them from the list of "real" filters
        for k in dig_settings.keys():
            enabled_meas.pop(k)

        # Map from Channel -> OutputConnector
        # and from Channel -> Digitizer for future lookup
        chan_to_oc  = {}
        chan_to_dig = {}

        for name, settings in dig_settings.items():

            # Create and add the OutputConnector
            logger.debug("Adding %s output connector to experiment.", name)
            oc = OutputConnector(name=name, parent=experiment)
            experiment._output_connectors[name] = oc
            experiment.output_connectors[name] = oc
            setattr(experiment, name, oc)

            # Find the digitizer instrument and settings
            source_instr          = experiment._instruments[settings['data_source']]
            source_instr_settings = experiment.instrument_settings['instrDict'][settings['data_source']]

            # Construct the descriptor from the stream
            stream_type = settings['x__class__']
            stream = module_map[stream_type](name=name)
            channel, descrip = stream.get_descriptor(source_instr_settings, settings)

            # Add the channel to the instrument
            source_instr.add_channel(channel)

            # Add the segment axis, which should already be defined...
            if hasattr(experiment, 'segment_axis'):
                # This should contains the proper range and units based on the sweep descriptor
                descrip.add_axis(experiment.segment_axis)
            else:
                # This is the generic axis based on the instrument parameters
                # If there is only one segement, we should omit this axis.
                if source_instr_settings['nbr_segments'] > 1:
                    descrip.add_axis(DataAxis("segments", range(source_instr_settings['nbr_segments'])))

            # Digitizer mode preserves round_robins, averager mode collapsing along them:
            if source_instr_settings['acquire_mode'] == 'digitizer':
                descrip.add_axis(DataAxis("round_robins", range(source_instr_settings['nbr_round_robins'])))

            oc.set_descriptor(descrip)

            # Add to our mappings
            chan_to_oc[channel]    = oc
            chan_to_dig[channel]   = source_instr

        # ========================
        # Process the measurements
        # ========================

        for name, settings in enabled_meas.items():
            filt_type = settings['x__class__']

            if filt_type in module_map:
                filt = module_map[filt_type](**settings)
                filt.name = name
                filters[name] = filt
                logger.debug("Found filter class %s for '%s' when loading experiment settings.", filt_type, name)
            else:
                logger.error("Could not find filter class %s for '%s' when loading experiment settings.", filt_type, name)

        # ====================================
        # Establish all of the connections
        # ====================================

        for name, filt in filters.items():

            # Multiple data sources are comma separated, with optional whitespace.
            # If there is a colon in the name, then we are to hook up to a specific connector
            # Otherwise we can safely assume that the name is "source"

            data_sources = [s.strip() for s in experiment.measurement_settings['filterDict'][name]['data_source'].split(",")]

            for data_source in data_sources:
                source = data_source.split(":")
                node_name = source[0]
                conn_name = "source"
                if len(source) == 2:
                    conn_name = source[1]

                if node_name in filters:
                    source = filters[node_name].output_connectors[conn_name]
                elif node_name in experiment.output_connectors:
                    source = experiment.output_connectors[node_name]
                else:
                    raise ValueError("Couldn't find anywhere to attach the source of the specified filter {}".format(name))

                logger.debug("Connecting %s@%s ---> %s", node_name, conn_name, filt)
                graph.append([source, filt.sink])

        experiment.chan_to_oc  = chan_to_oc
        experiment.chan_to_dig = chan_to_dig
        experiment.set_graph(graph)
Beispiel #26
0
    def update_descriptors(self):
        logger.debug(
            'Updating averager "%s" descriptors based on input descriptor: %s.',
            self.filter_name, self.sink.descriptor)
        descriptor_in = self.sink.descriptor
        names = [a.name for a in descriptor_in.axes]

        self.axis.allowed_values = names

        if self.axis.value is None:
            self.axis.value = descriptor_in.axes[0].name

        # Convert named axes to an index
        if self.axis.value not in names:
            raise ValueError(
                "Could not find axis {} within the DataStreamDescriptor {}".
                format(self.axis.value, descriptor_in))
        self.axis_num = descriptor_in.axis_num(self.axis.value)
        logger.debug("Averaging over axis #%d: %s", self.axis_num,
                     self.axis.value)

        self.data_dims = descriptor_in.data_dims()
        # If we only have a single point along this axis, then just pass the data straight through
        if self.data_dims[self.axis_num] == 1:
            logger.debug("Averaging over a singleton axis")
            self.passthrough = True

        if self.axis_num == len(descriptor_in.axes) - 1:
            logger.debug("Performing scalar average!")
            self.points_before_partial_average = 1
            self.avg_dims = [1]
        else:
            self.points_before_partial_average = descriptor_in.num_points_through_axis(
                self.axis_num + 1)
            self.avg_dims = self.data_dims[self.axis_num + 1:]

        # If we get multiple final average simultaneously
        self.reshape_dims = self.data_dims[self.axis_num:]
        if self.axis_num > 0:
            self.reshape_dims = [-1] + self.reshape_dims
        self.mean_axis = self.axis_num - len(self.data_dims)

        self.points_before_final_average = descriptor_in.num_points_through_axis(
            self.axis_num)
        logger.debug("Points before partial average: %s.",
                     self.points_before_partial_average)
        logger.debug("Points before final average: %s.",
                     self.points_before_final_average)
        logger.debug("Data dimensions are %s", self.data_dims)
        logger.debug("Averaging dimensions are %s", self.avg_dims)

        # Define final axis descriptor
        descriptor = descriptor_in.copy()
        self.num_averages = descriptor.pop_axis(self.axis.value).num_points()
        logger.debug("Number of partial averages is %d", self.num_averages)

        if len(descriptor.axes) == 0:
            # We will be left with only a single point here!
            descriptor.add_axis(DataAxis("result", [0]))

        self.sum_so_far = np.zeros(self.avg_dims, dtype=descriptor.dtype)
        self.current_avg_frame = np.zeros(self.points_before_final_average,
                                          dtype=descriptor.dtype)
        self.partial_average.descriptor = descriptor
        self.source.descriptor = descriptor
        self.excited_counts = np.zeros(self.data_dims, dtype=np.int64)

        # We can update the visited_tuples upfront if none
        # of the sweeps are adaptive...
        desc_out_dtype = descriptor_in.axis_data_type(
            with_metadata=True, excluding_axis=self.axis.value)
        if not descriptor_in.is_adaptive():
            vals = [
                a.points_with_metadata() for a in descriptor_in.axes
                if a.name != self.axis.value
            ]
            nested_list = list(itertools.product(*vals))
            flattened_list = [
                tuple((val for sublist in line for val in sublist))
                for line in nested_list
            ]
            descriptor.visited_tuples = np.core.records.fromrecords(
                flattened_list, dtype=desc_out_dtype)
        else:
            descriptor.visited_tuples = np.empty((0), dtype=desc_out_dtype)

        for stream in self.partial_average.output_streams:
            stream.set_descriptor(descriptor)
            stream.descriptor.buffer_mult_factor = 20
            stream.end_connector.update_descriptors()

        for stream in self.source.output_streams:
            stream.set_descriptor(descriptor)
            stream.end_connector.update_descriptors()

        # Define variance axis descriptor
        descriptor_var = descriptor_in.copy()
        descriptor_var.data_name = "Variance"
        descriptor_var.pop_axis(self.axis.value)
        if descriptor_var.unit:
            descriptor_var.unit = descriptor_var.unit + "^2"
        descriptor_var.metadata["num_averages"] = self.num_averages
        self.final_variance.descriptor = descriptor_var

        # Define counts axis descriptor
        descriptor_count = descriptor_in.copy()
        descriptor_count.data_name = "Counts"
        descriptor_count.dtype = np.float64
        descriptor_count.pop_axis(self.axis.value)
        descriptor_count.add_axis(DataAxis("state", [0, 1]), position=0)
        if descriptor_count.unit:
            descriptor_count.unit = "counts"
        descriptor_count.metadata["num_counts"] = self.num_averages
        self.final_counts.descriptor = descriptor_count

        if not descriptor_in.is_adaptive():
            descriptor_var.visited_tuples = np.core.records.fromrecords(
                flattened_list, dtype=desc_out_dtype)
        else:
            descriptor_var.visited_tuples = np.empty((0), dtype=desc_out_dtype)

        for stream in self.final_variance.output_streams:
            stream.set_descriptor(descriptor_var)
            stream.end_connector.update_descriptors()

        for stream in self.final_counts.output_streams:
            stream.set_descriptor(descriptor_count)
            stream.end_connector.update_descriptors()
Beispiel #27
0
 def init_streams(self):
     # Add a "base" data axis: say we are averaging 5 samples per trigger
     self.voltage.add_axis(DataAxis("trials", list(range(self.samples))))
Beispiel #28
0
 def init_streams(self):
     self.voltage.add_axis(DataAxis("xs", np.arange(100)))
     self.voltage.add_axis(DataAxis("ys", np.arange(100)))
     self.voltage.add_axis(DataAxis("repeats", np.arange(500)))
Beispiel #29
0
 def init_streams(self):
     # Add a "base" data axis: say we are averaging 5 samples per trigger
     self.voltage.add_axis(
         DataAxis("samples",
                  np.array([0, 1, 2, np.nan, np.nan]),
                  metadata=["data", "data", "data", "0", "1"]))
Beispiel #30
0
 def init_streams(self):
     self.chan1.add_axis(DataAxis("samples", list(range(self.samples))))
     self.chan2.add_axis(DataAxis("samples", list(range(self.samples))))