Exemple #1
0
class X6StreamSelector(Filter):
    """Digital demodulation and filtering to select a particular frequency multiplexed channel"""

    sink = InputConnector()
    source = OutputConnector()

    channel = IntParameter(value_range=(1, 3), snap=1)
    dsp_channel = IntParameter(value_range=(0, 4), snap=1)
    stream_type = Parameter(
        allowed_values=["raw", "demodulated", "integrated"],
        default='demodulated')

    # def __init__(self, name=""):
    #     super(X6StreamSelector, self).__init__(name=name)
    # self.stream_type.value = "Raw" # One of Raw, Demodulated, Integrated
    # self.quince_parameters = [self.channel, self.dsp_channel, self.stream_type]

    def get_channel(self, channel_proxy):
        """Create and return a channel object corresponding to this stream selector"""
        return X6Channel(channel_proxy)

    def get_descriptor(self, stream_selector, receiver_channel):
        """Get the axis descriptor corresponding to this stream selector. If it's an integrated stream,
        then the time axis has already been eliminated. Otherswise, add the time axis."""
        descrip = DataStreamDescriptor()
        if stream_selector.stream_type == 'raw':
            samp_time = 4.0e-9
            descrip.add_axis(
                DataAxis(
                    "time",
                    samp_time *
                    np.arange(receiver_channel.receiver.record_length // 4)))
            descrip.dtype = np.float64
        elif stream_selector.stream_type == 'demodulated':
            samp_time = 32.0e-9
            descrip.add_axis(
                DataAxis(
                    "time",
                    samp_time *
                    np.arange(receiver_channel.receiver.record_length // 32)))
            descrip.dtype = np.complex128
        else:  # Integrated
            descrip.dtype = np.complex128
        return descrip
class TestExperiment(Experiment):
    """Here the run loop merely spews data until it fills up the stream. """

    # Parameters
    amplitude = FloatParameter(unit="V")

    # DataStreams
    voltage = OutputConnector()

    def init_instruments(self):
        pass

    def init_streams(self):
        pass

    async def run(self):
        r = np.power(self.amplitude.value, 2) + 0.1 * np.random.random()
        await self.voltage.push(r)
        await asyncio.sleep(0.01)
Exemple #3
0
class AlazarStreamSelector(Filter):
    """Digital demodulation and filtering to select a particular frequency multiplexed channel"""

    sink    = InputConnector()
    source  = OutputConnector()
    channel = IntParameter(value_range=(1,2), snap=1)

    def __init__(self, name=""):
        super(AlazarStreamSelector, self).__init__(name=name)
        self.channel.value = 1 # Either 1 or 2
        self.quince_parameters = [self.channel]

    def get_descriptor(self, source_instr_settings, channel_settings):
        channel = AlazarChannel(channel_settings)

        # Add the time axis
        samp_time = 1.0/source_instr_settings['sampling_rate']
        descrip = DataStreamDescriptor()
        descrip.add_axis(DataAxis("time", samp_time*np.arange(source_instr_settings['record_length'])))
        return channel, descrip
Exemple #4
0
class SweptTestExperiment(Experiment):
    """Here the run loop merely spews data until it fills up the stream. """

    # Parameters
    field = FloatParameter(unit="Oe")
    freq = FloatParameter(unit="Hz")
    dur = FloatParameter(default=5, unit="ns")

    # DataStreams
    voltage = OutputConnector()

    # Constants
    samples = 5
    time_val = 0

    def init_instruments(self):
        self.field.assign_method(
            lambda x: logger.debug("Field got value " + str(x)))
        self.freq.assign_method(
            lambda x: logger.debug("Freq got value " + str(x)))
        self.dur.assign_method(
            lambda x: logger.debug("Duration got value " + str(x)))

    def init_streams(self):
        # Add a "base" data axis: say we are averaging 5 samples per trigger
        self.voltage.add_axis(DataAxis("trials", list(range(self.samples))))

    def __repr__(self):
        return "<SweptTestExperiment>"

    def run(self):
        logger.debug("Data taker running (inner loop)")
        time_step = 0.1
        time.sleep(0.002)
        data_row = np.sin(
            2 * np.pi * self.time_val) * np.ones(5) + 0.1 * np.random.random(5)
        self.time_val += time_step
        self.voltage.push(data_row)
        logger.debug("Stream pushed points {}.".format(data_row))
        logger.debug("Stream has filled {} of {} points".format(
            self.voltage.points_taken, self.voltage.num_points()))
Exemple #5
0
class DummydigStreamSelector(Filter):

    sink = InputConnector()
    source = OutputConnector()
    channel = IntParameter(value_range=(1, 2), snap=1)

    def __init__(self, name=""):
        super(DummydigStreamSelector, self).__init__(name=name)
        self.channel.value = 1  # Either 1 or 2
        self.quince_parameters = [self.channel]

    def get_descriptor(self, source_instr_settings, channel_settings):
        channel = DummydigChannel(channel_settings)

        # Add the time axis
        samp_time = 1.0 / source_instr_settings['sampling_rate']
        descrip = DataStreamDescriptor()
        descrip.add_axis(
            DataAxis(
                "time",
                samp_time * np.arange(source_instr_settings['record_length'])))
        return channel, descrip
Exemple #6
0
    def __init__(self, name=None, **kwargs):
        super(Filter, self).__init__()
        self.filter_name = name
        self.input_connectors = {}
        self.output_connectors = {}
        self.parameters = {}
        self.qubit_name = ""

        # Event for killing the filter properly
        self.exit = Event()
        self.done = Event()

        # Keep track of data throughput
        self.processed = 0

        # For objectively measuring doneness
        self.finished_processing = Event()
        self.finished_processing.clear()

        for ic in self._input_connectors:
            a = InputConnector(name=ic, parent=self)
            a.parent = self
            self.input_connectors[ic] = a
            setattr(self, ic, a)
        for oc in self._output_connectors:
            a = OutputConnector(name=oc, parent=self)
            a.parent = self
            self.output_connectors[oc] = a
            setattr(self, oc, a)
        for param in self._parameters:
            a = copy.deepcopy(param)
            a.parent = self
            self.parameters[param.name] = a
            setattr(self, param.name, a)

        # For sending performance information
        self.last_performance_update = datetime.datetime.now()
        self.beginning = datetime.datetime.now()
        self.perf_queue = None
Exemple #7
0
class TestExperiment(Experiment):
    """Here the run loop merely spews data until it fills up the stream. """

    # Parameters
    amplitude = FloatParameter(unit="V")
    duration = FloatParameter(unit="s")

    # DataStreams
    voltage = OutputConnector()

    def init_instruments(self):
        pass

    def init_streams(self):
        pass

    async def run(self):
        r = np.sqrt(
            np.power(self.amplitude.value, 2) +
            np.power(self.duration.value, 2))
        val = 1.0 / (1.0 + np.exp(-10.0 * (r - 5.0)))
        await self.voltage.push(val)
        await asyncio.sleep(0.01)
Exemple #8
0
class SweptTestExperiment(Experiment):
    """Here the run loop merely spews data until it fills up the stream. """

    # Parameters
    temperature = FloatParameter(unit="K")

    # DataStreams
    resistance = OutputConnector()

    # Constants
    samples = 5
    time_val = 0

    def __repr__(self):
        return "<SweptTestExperiment>"

    async def run(self):
        logger.debug("Data taker running (inner loop)")
        await asyncio.sleep(0.002)

        def ideal_tc(t, tc=9.0, k=20.0):
            return t*1.0/(1.0 + np.exp(-k*(t-tc)))

        await self.resistance.push(ideal_tc(self.temperature.value))
Exemple #9
0
class IVExperiment(Experiment):

    awg = Agilent33220A("192.168.5.198")

    amplitude = FloatParameter(default=0.1, unit="V")
    frequency  = 167.0 # FloatParameter(default=167.0, unit="Hz")

    sample_rate = 5e5
    num_bursts  = 10

    preamp_gain = 1
    r_ref       = 1e3

    current_input  = OutputConnector(unit="V")
    voltage_sample = OutputConnector(unit="V")

    def init_streams(self):
        descrip = DataStreamDescriptor()
        descrip.data_name='current_input'
        descrip.add_axis(DataAxis("time", np.arange(int(self.sample_rate*self.num_bursts/self.frequency))/self.sample_rate))
        self.current_input.set_descriptor(descrip)

        descrip = DataStreamDescriptor()
        descrip.data_name='voltage_sample'
        descrip.add_axis(DataAxis("time", np.arange(int(self.sample_rate*self.num_bursts/self.frequency))/self.sample_rate))
        self.voltage_sample.set_descriptor(descrip)

    def init_instruments(self):
        # Configure the AWG
        self.awg.output         = False
        self.awg.function       = 'Sine'
        self.awg.load_resistance = self.r_ref
        self.awg.auto_range     = True
        self.awg.amplitude      = self.amplitude.value # Preset to avoid danger
        self.awg.dc_offset      = 0.0
        self.awg.frequency      = self.frequency
        self.awg.burst_state    = True
        self.awg.burst_cycles   = self.num_bursts + 2
        self.awg.trigger_source = "Bus"
        self.awg.output         = True

        self.amplitude.assign_method(self.awg.set_amplitude)
        # self.frequency.assign_method(self.awg.set_frequency)

        # Setup the NIDAQ
        max_voltage = 2.0 #self.amplitude.value*2.0
        self.num_samples_total = int(self.sample_rate*(self.num_bursts+2)/self.frequency)
        self.num_samples_trimmed = int(self.sample_rate*(self.num_bursts)/self.frequency)
        self.trim_len = int(self.sample_rate/self.frequency)
        self.analog_input = Task()
        self.read = int32()
        self.analog_input.CreateAIVoltageChan("Dev1/ai0", "", DAQmx_Val_Diff,
            -max_voltage, max_voltage, DAQmx_Val_Volts, None)
        self.analog_input.CreateAIVoltageChan("Dev1/ai1", "", DAQmx_Val_Diff,
            -max_voltage, max_voltage, DAQmx_Val_Volts, None)
        self.analog_input.CfgSampClkTiming("", self.sample_rate, DAQmx_Val_Rising,
            DAQmx_Val_FiniteSamps , self.num_samples_total)
        self.analog_input.CfgInputBuffer(2*self.num_samples_total)
        self.analog_input.CfgDigEdgeStartTrig("/Dev1/PFI0", DAQmx_Val_Rising)
        self.analog_input.StartTask()
        # self.analog_input.SetStartTrigRetriggerable(1)

    def shutdown_instruments(self):
        self.awg.output     = False
        # self.awg.auto_range = True
        try:
            self.analog_input.StopTask()
            self.analog_input.ClearTask()
        except Exception as e:
            logger.warning("Failed to clear DAQ task!")

    async def run(self):
        """This is run for each step in a sweep."""

        self.awg.trigger()

        buf = np.empty(2*self.num_samples_total)
        self.analog_input.ReadAnalogF64(self.num_samples_total, -1, DAQmx_Val_GroupByChannel,
                                        buf, 2*self.num_samples_total, byref(self.read), None)
        await self.current_input.push(buf[self.num_samples_total+self.trim_len:self.num_samples_total+self.trim_len+self.num_samples_trimmed]/self.r_ref)
        await self.voltage_sample.push(buf[self.trim_len:self.trim_len+self.num_samples_trimmed]/self.preamp_gain)
        await asyncio.sleep(0.02)
Exemple #10
0
class IVExperiment(Experiment):

    awg = Agilent33500B("192.168.5.117")
    lecroy = HDO6104("TCPIP0::192.168.5.118::INSTR")

    amplitude = FloatParameter(default=0.1, unit="V")
    frequency = 167.0  # FloatParameter(default=167.0, unit="Hz")

    # Parameters for the Lecroy
    sample_rate = 1e9
    num_points = 2.5e6  # Number of points per repeat
    num_bursts = 10
    repeat = 1
    delay = 1  # Delay between repeats

    awg_amplification = 5
    preamp_gain = 1
    r_ref = 10.0e3

    voltage_input = OutputConnector(unit="V")
    voltage_sample = OutputConnector(unit="V")

    def init_streams(self):
        descrip = DataStreamDescriptor()
        descrip.data_name = 'voltage_input'
        descrip.add_axis(DataAxis("index", np.arange(self.num_points + 2)))
        descrip.add_axis(DataAxis("repeat", np.arange(self.repeat)))
        self.voltage_input.set_descriptor(descrip)

        descrip = DataStreamDescriptor()
        descrip.data_name = 'voltage_sample'
        descrip.add_axis(DataAxis("index", np.arange(self.num_points + 2)))
        descrip.add_axis(DataAxis("repeat", np.arange(self.repeat)))
        self.voltage_sample.set_descriptor(descrip)

    def init_instruments(self):
        # Configure the AWG
        self.awg.set_output(False, channel=1)
        self.awg.set_function('Triangle', channel=1)
        self.awg.set_load(50.0, channel=1)
        self.awg.set_auto_range(True, channel=1)
        self.awg.set_amplitude(self.amplitude.value / self.awg_amplification,
                               channel=1)  # Preset to avoid danger
        self.awg.set_dc_offset(0.0, channel=1)
        self.awg.set_frequency(self.frequency, channel=1)
        self.awg.set_burst_state(True, channel=1)
        self.awg.set_burst_cycles(self.num_bursts, channel=1)
        self.awg.set_trigger_source("Bus")
        self.awg.set_output_trigger_source(1)
        self.awg.set_output(True, channel=1)
        self.lecroy.set_channel_enabled(True, channel=1)
        self.lecroy.set_channel_enabled(True, channel=2)
        self.lecroy.sample_points = self.num_points

        self.amplitude.assign_method(lambda x: self.awg.set_amplitude(
            x / self.awg_amplification, channel=1))

    def shutdown_instruments(self):
        self.awg.set_output(False, channel=1)
        self.lecroy.set_channel_enabled(False, channel=1)
        self.lecroy.set_channel_enabled(False, channel=2)

    async def run(self):
        """This is run for each step in a sweep."""
        for rep in range(self.repeat):
            self.awg.trigger()
            while not self.lecroy.interface.query("*OPC?") == "1":
                time.sleep(1)
                print("waiting")
            await self.voltage_input.push(self.lecroy.fetch_waveform(1)[1])
            await self.voltage_sample.push(self.lecroy.fetch_waveform(2)[1])
            await asyncio.sleep(self.delay)
Exemple #11
0
class Averager(Filter):
    """Takes data and collapses along the specified axis."""

    sink = InputConnector()
    partial_average = OutputConnector()
    final_average = OutputConnector()
    final_variance = OutputConnector()
    axis = Parameter()

    def __init__(self, axis=None, **kwargs):
        super(Averager, self).__init__(**kwargs)
        self.axis.value = axis
        self.points_before_final_average = None
        self.points_before_partial_average = None
        self.sum_so_far = None
        self.num_averages = None

        self.quince_parameters = [self.axis]

        # Rate limiting for partial averages
        self.last_update = time.time()
        self.update_interval = 0.5

    def update_descriptors(self):
        logger.debug(
            'Updating averager "%s" descriptors based on input descriptor: %s.',
            self.name, self.sink.descriptor)
        descriptor_in = self.sink.descriptor
        names = [a.name for a in descriptor_in.axes]

        self.axis.allowed_values = names

        if self.axis.value is None:
            self.axis.value = descriptor_in.axes[0].name

        # Convert named axes to an index
        if self.axis.value not in names:
            raise ValueError(
                "Could not find axis {} within the DataStreamDescriptor {}".
                format(self.axis.value, descriptor_in))
        self.axis_num = descriptor_in.axis_num(self.axis.value)
        logger.debug("Averaging over axis #%d: %s", self.axis_num,
                     self.axis.value)

        self.data_dims = descriptor_in.data_dims()
        if self.axis_num == len(descriptor_in.axes) - 1:
            logger.debug("Performing scalar average!")
            self.points_before_partial_average = 1
            self.avg_dims = [1]
        else:
            self.points_before_partial_average = descriptor_in.num_points_through_axis(
                self.axis_num + 1)
            self.avg_dims = self.data_dims[self.axis_num + 1:]

        # If we get multiple final average simultaneously
        self.reshape_dims = self.data_dims[self.axis_num:]
        if self.axis_num > 0:
            self.reshape_dims = [-1] + self.reshape_dims
        self.mean_axis = self.axis_num - len(self.data_dims)

        self.points_before_final_average = descriptor_in.num_points_through_axis(
            self.axis_num)
        logger.debug("Points before partial average: %s.",
                     self.points_before_partial_average)
        logger.debug("Points before final average: %s.",
                     self.points_before_final_average)
        logger.debug("Data dimensions are %s", self.data_dims)
        logger.debug("Averaging dimensions are %s", self.avg_dims)

        # Define final axis descriptor
        descriptor = descriptor_in.copy()
        self.num_averages = descriptor.pop_axis(self.axis.value).num_points()
        logger.debug("Number of partial averages is %d", self.num_averages)

        self.sum_so_far = np.zeros(self.avg_dims, dtype=descriptor.dtype)
        self.current_avg_frame = np.zeros(self.points_before_final_average,
                                          dtype=descriptor.dtype)
        self.partial_average.descriptor = descriptor
        self.final_average.descriptor = descriptor

        # We can update the visited_tuples upfront if none
        # of the sweeps are adaptive...
        desc_out_dtype = descriptor_in.axis_data_type(
            with_metadata=True, excluding_axis=self.axis.value)
        if not descriptor_in.is_adaptive():
            vals = [
                a.points_with_metadata() for a in descriptor_in.axes
                if a.name != self.axis.value
            ]
            nested_list = list(itertools.product(*vals))
            flattened_list = [
                tuple((val for sublist in line for val in sublist))
                for line in nested_list
            ]
            descriptor.visited_tuples = np.core.records.fromrecords(
                flattened_list, dtype=desc_out_dtype)
        else:
            descriptor.visited_tuples = np.empty((0), dtype=desc_out_dtype)

        for stream in self.partial_average.output_streams + self.final_average.output_streams:
            stream.set_descriptor(descriptor)
            stream.end_connector.update_descriptors()

        # Define variance axis descriptor
        descriptor_var = descriptor_in.copy()
        descriptor_var.data_name = "Variance"
        descriptor_var.pop_axis(self.axis.value)
        if descriptor_var.unit:
            descriptor_var.unit = descriptor_var.unit + "^2"
        descriptor_var.metadata["num_averages"] = self.num_averages
        self.final_variance.descriptor = descriptor_var

        if not descriptor_in.is_adaptive():
            descriptor_var.visited_tuples = np.core.records.fromrecords(
                flattened_list, dtype=desc_out_dtype)
        else:
            descriptor_var.visited_tuples = np.empty((0), dtype=desc_out_dtype)

        for stream in self.final_variance.output_streams:
            stream.set_descriptor(descriptor_var)
            stream.end_connector.update_descriptors()

    def final_init(self):
        if self.points_before_final_average is None:
            raise Exception(
                "Average has not been initialized. Run 'update_descriptors'")

        self.completed_averages = 0
        self.idx_frame = 0
        self.idx_global = 0
        # We only need to accumulate up to the averaging axis
        # BUT we may get something longer at any given time!
        self.carry = np.zeros(0, dtype=self.final_average.descriptor.dtype)

    async def process_data(self, data):

        # TODO: handle unflattened data separately
        if len(data.shape) > 1:
            data = data.flatten()
        #handle single points
        elif not isinstance(data, np.ndarray) and (data.size == 1):
            data = np.array([data])

        if self.carry.size > 0:
            data = np.concatenate((self.carry, data))
            self.carry = np.zeros(0, dtype=self.final_average.descriptor.dtype)

        idx = 0
        while idx < data.size:
            #check whether we have enough data to fill an averaging frame
            if data.size - idx >= self.points_before_final_average:
                # How many chunks can we process at once?
                num_chunks = int(
                    (data.size - idx) / self.points_before_final_average)
                new_points = num_chunks * self.points_before_final_average
                reshaped = data[idx:idx + new_points].reshape(
                    self.reshape_dims)
                averaged = reshaped.mean(axis=self.mean_axis)
                idx += new_points

                if self.sink.descriptor.is_adaptive():
                    new_tuples = self.sink.descriptor.tuples(
                    )[self.idx_global:self.idx_global + new_points]
                    new_tuples_stripped = remove_fields(
                        new_tuples, self.axis.value)
                    take_axis = -1 if self.axis_num > 0 else 0
                    reduced_tuples = new_tuples_stripped.reshape(
                        self.reshape_dims).take((0, ), axis=take_axis)
                    self.idx_global += new_points

                # Add to Visited tuples
                if self.sink.descriptor.is_adaptive():
                    for os in self.final_average.output_streams + self.final_variance.output_streams + self.partial_average.output_streams:
                        os.descriptor.visited_tuples = np.append(
                            os.descriptor.visited_tuples, reduced_tuples)

                for os in self.final_average.output_streams:
                    await os.push(averaged)

                for os in self.final_variance.output_streams:
                    await os.push(reshaped.var(axis=self.mean_axis, ddof=1)
                                  )  # N-1 in the denominator

                for os in self.partial_average.output_streams:
                    await os.push(averaged)

            # Maybe we can fill a partial frame
            elif data.size - idx >= self.points_before_partial_average:
                # How many chunks can we process at once?
                num_chunks = int(
                    (data.size - idx) / self.points_before_partial_average)
                new_points = num_chunks * self.points_before_partial_average

                # Find the appropriate dimensions for the partial
                partial_reshape_dims = self.reshape_dims[:]
                partial_reshape_dims[self.mean_axis] = -1
                partial_reshape_dims = partial_reshape_dims[self.mean_axis:]

                reshaped = data[idx:idx +
                                new_points].reshape(partial_reshape_dims)
                summed = reshaped.sum(axis=self.mean_axis)
                self.sum_so_far += summed

                self.current_avg_frame[self.idx_frame:self.idx_frame +
                                       new_points] = data[idx:idx + new_points]
                idx += new_points
                self.idx_frame += new_points

                self.completed_averages += num_chunks

                # If we now have enoough for the final average, push to both partial and final...
                if self.completed_averages == self.num_averages:
                    reshaped = self.current_avg_frame.reshape(
                        partial_reshape_dims)
                    for os in self.final_average.output_streams + self.partial_average.output_streams:
                        await os.push(reshaped.mean(axis=self.mean_axis))
                    for os in self.final_variance.output_streams:
                        await os.push(reshaped.var(axis=self.mean_axis, ddof=1)
                                      )  # N-1 in the denominator
                    self.sum_so_far[:] = 0.0
                    self.current_avg_frame[:] = 0.0
                    self.completed_averages = 0
                    self.idx_frame = 0
                else:
                    # Emit a partial average since we've accumulated enough data
                    if (time.time() - self.last_update >=
                            self.update_interval):
                        for os in self.partial_average.output_streams:
                            await os.push(self.sum_so_far /
                                          self.completed_averages)
                        self.last_update = time.time()

            # otherwise just add it to the carry
            else:
                self.carry = data[idx:]
                break
Exemple #12
0
    def load_filters(experiment):
        # These store any filters we create as well as their connections
        filters = {}
        graph   = []

        # ============================================
        # Find all of the filter modules by inspection
        # ============================================

        modules = (
            importlib.import_module('auspex.filters.' + name)
            for loader, name, is_pkg in pkgutil.iter_modules(auspex.filters.__path__)
        )

        module_map = {}
        for mod in modules:
            filts = (_ for _ in inspect.getmembers(mod) if inspect.isclass(_[1]) and
                                                            issubclass(_[1], Filter) and
                                                            _[1] != Filter)
            module_map.update(dict(filts))

        # ==================================================
        # Find out which output connectors we need to create
        # ==================================================

        # Get the enabled measurements
        enabled_meas = {k: v for k, v in experiment.measurement_settings['filterDict'].items() if v['enabled']}

        # First look for digitizer streams (Alazar or X6)
        dig_settings    = {k: v for k, v in enabled_meas.items() if "StreamSelector" in v['x__class__']}

        # These stream selectors are really just a convenience
        # Remove them from the list of "real" filters
        for k in dig_settings.keys():
            enabled_meas.pop(k)

        # Map from Channel -> OutputConnector
        # and from Channel -> Digitizer for future lookup
        chan_to_oc  = {}
        chan_to_dig = {}

        for name, settings in dig_settings.items():

            # Create and add the OutputConnector
            logger.debug("Adding %s output connector to experiment.", name)
            oc = OutputConnector(name=name, parent=experiment)
            experiment._output_connectors[name] = oc
            experiment.output_connectors[name] = oc
            setattr(experiment, name, oc)

            # Find the digitizer instrument and settings
            source_instr          = experiment._instruments[settings['data_source']]
            source_instr_settings = experiment.instrument_settings['instrDict'][settings['data_source']]

            # Construct the descriptor from the stream
            stream_type = settings['x__class__']
            stream = module_map[stream_type](name=name)
            channel, descrip = stream.get_descriptor(source_instr_settings, settings)

            # Add the channel to the instrument
            source_instr.add_channel(channel)

            # Add the segment axis, which should already be defined...
            if hasattr(experiment, 'segment_axis'):
                # This should contains the proper range and units based on the sweep descriptor
                descrip.add_axis(experiment.segment_axis)
            else:
                # This is the generic axis based on the instrument parameters
                # If there is only one segement, we should omit this axis.
                if source_instr_settings['nbr_segments'] > 1:
                    descrip.add_axis(DataAxis("segments", range(source_instr_settings['nbr_segments'])))

            # Digitizer mode preserves round_robins, averager mode collapsing along them:
            if source_instr_settings['acquire_mode'] == 'digitizer':
                descrip.add_axis(DataAxis("round_robins", range(source_instr_settings['nbr_round_robins'])))

            oc.set_descriptor(descrip)

            # Add to our mappings
            chan_to_oc[channel]    = oc
            chan_to_dig[channel]   = source_instr

        # ========================
        # Process the measurements
        # ========================

        for name, settings in enabled_meas.items():
            filt_type = settings['x__class__']

            if filt_type in module_map:
                filt = module_map[filt_type](**settings)
                filt.name = name
                filters[name] = filt
                logger.debug("Found filter class %s for '%s' when loading experiment settings.", filt_type, name)
            else:
                logger.error("Could not find filter class %s for '%s' when loading experiment settings.", filt_type, name)

        # ====================================
        # Establish all of the connections
        # ====================================

        for name, filt in filters.items():

            # Multiple data sources are comma separated, with optional whitespace.
            # If there is a colon in the name, then we are to hook up to a specific connector
            # Otherwise we can safely assume that the name is "source"

            data_sources = [s.strip() for s in experiment.measurement_settings['filterDict'][name]['data_source'].split(",")]

            for data_source in data_sources:
                source = data_source.split(":")
                node_name = source[0]
                conn_name = "source"
                if len(source) == 2:
                    conn_name = source[1]

                if node_name in filters:
                    source = filters[node_name].output_connectors[conn_name]
                elif node_name in experiment.output_connectors:
                    source = experiment.output_connectors[node_name]
                else:
                    raise ValueError("Couldn't find anywhere to attach the source of the specified filter {}".format(name))

                logger.debug("Connecting %s@%s ---> %s", node_name, conn_name, filt)
                graph.append([source, filt.sink])

        experiment.chan_to_oc  = chan_to_oc
        experiment.chan_to_dig = chan_to_dig
        experiment.set_graph(graph)
Exemple #13
0
class Framer(Filter):
    """Mete out data in increments defined by the specified axis."""

    sink   = InputConnector()
    source = OutputConnector()
    axis   = Parameter()

    def __init__(self, axis=None, **kwargs):
        super(Framer, self).__init__(**kwargs)
        self.axis.value = axis
        self.points_before_final_average   = None
        self.points_before_partial_average = None
        self.sum_so_far = None
        self.num_averages = None

        self.quince_parameters = [self.axis]

    def final_init(self):
        descriptor_in = self.sink.descriptor
        names = [a.name for a in descriptor_in.axes]

        self.axis.allowed_values = names

        if self.axis.value is None:
            self.axis.value = descriptor_in.axes[0].name

        # Convert named axes to an index
        if self.axis.value not in names:
            raise ValueError("Could not find axis {} within the DataStreamDescriptor {}".format(self.axis.value, descriptor_in))
        self.axis_num = descriptor_in.axis_num(self.axis.value)
        logger.debug("Framing on axis #%d: %s", self.axis_num, self.axis.value)

        # Find how many points we want to spit out at a time
        self.data_dims = descriptor_in.data_dims()
        if self.axis_num == len(descriptor_in.axes) - 1:
            raise Exception("Framer has refused to frame along single points.")
        else:
            self.frame_points = descriptor_in.num_points_through_axis(self.axis_num+1)

        logger.debug("Points before emitting frame: %s.", self.frame_points)

        # For storing carryover if getting uneven buffers
        self.idx = 0
        self.carry = np.zeros(0, dtype=self.sink.descriptor.dtype)

    def process_data(self, data):
        # Append any data carried from the last run
        if self.carry.size > 0:
            data = np.concatenate((self.carry, data))

        # This is the largest number of frames we can emit for the time being
        num_frames = data.size // self.frame_points

        # This is the carryover that we'll store until next round.
        # If nothing is left then reset the carryover.
        remaining_points = data.size % self.frame_points
        if remaining_points > 0:
            if num_frames > 0:
                self.carry = data[-remaining_points:]
                data = data[:-remaining_points]
            else:
                self.carry = data
        else:
            self.carry = np.zeros(0, dtype=self.sink.descriptor.dtype)

        if num_frames > 0:
            for i in range(num_frames):
                for os in self.source.output_streams:
                    os.push(data[i*self.frame_points:(i+1)*self.frame_points])
Exemple #14
0
class MixerCalibrationExperiment(Experiment):

    SSB_FREQ = 10e6

    amplitude = OutputConnector(unit='dBc')

    I_offset = FloatParameter(default=0.0, unit="V")
    Q_offset = FloatParameter(default=0.0, unit="V")
    amplitude_factor = FloatParameter(default=1.0)
    phase_skew = FloatParameter(default=0.0, unit="rad")

    sideband_modulation = False

    def __init__(self, qubit, mixer="control"):
        """Initialize MixerCalibrationExperiment Experiment.
            Args:
                qubit: Qubit identifier string.
                mixer: One of 'control', 'measure' to select which mixer to cal.
        """
        if mixer not in ("measure", "control"):
            raise ValueError("Unknown mixer {}: must be either 'measure' or 'control'.".format(mixer))
            self.mixer = mixer
        self.settings = config.yaml_load(config.configFile)
        sa = [name for name, settings in self.settings['instruments'].items() if settings['type'] == 'SpectrumAnalyzer']
        if len(sa) > 1:
            raise ValueError("More than one spectrum analyzer is defined in the configuration file.")
        if len(sa) == 0:
                raise ValueError("No spectrum analyzer is defined in the configuration file.")
        self.sa = sa[0]
        logger.debug("Found spectrum analyzer: {}.".format(self.sa))
        if "LO" not in self.settings['instruments'][self.sa].keys():
            raise ValueError("No local oscillator is defined for spectrum analyzer {}.".format(self.sa))
        try:
            self.LO = self.settings['instruments'][self.sa]['LO']
        except KeyError:
            raise ValueError("LO {} for spectrum analyzer {} not found in instrument configuration file!".format(self.LO, self.sa))
        try:
            self.qubit = qubit
            self.qubit_settings = self.settings['qubits'][qubit]
        except KeyError as ex:
            raise ValueError("Could not find qubit {} in the qubit configuration file.".format(qubit)) from ex
        self.AWG = self.settings['qubits'][qubit][mixer]['AWG'].split(" ")[0]
        self.chan = self.settings['qubits'][qubit][mixer]['AWG'].split(" ")[1]
        if self.settings['instruments'][self.AWG]['type'] != 'APS2':
            raise ValueError("Mixer calibration only supported for APS2.")
        self.source = self.settings['qubits'][qubit][mixer]['generator']

        self.instruments_to_enable = [self.sa, self.LO, self.AWG, self.source]
        self.instrs_connected = False
        super(MixerCalibrationExperiment, self).__init__()

    def write_to_file(self):
        awg_settings = self.settings['instruments'][self.AWG]
        awg_settings['tx_channels'][self.chan]['amp_factor'] = round(self.amplitude_factor.value, 5)
        awg_settings['tx_channels'][self.chan]['phase_skew'] = round(self.phase_skew.value, 5)
        awg_settings['tx_channels'][self.chan][self.chan[0]]['offset'] = round(self.I_offset.value, 5)
        awg_settings['tx_channels'][self.chan][self.chan[1]]['offset'] = round(self.Q_offset.value, 5)
        self.settings['instruments'][self.AWG] = awg_settings
        config.yaml_dump(self.settings, config.configFile)
        logger.info("Mixer calibration for {}-{} written to experiment file.".format(self.AWG, self.chan))

    def _set_mixer_phase(self, phase):
        self._instruments[self.AWG].set_mixer_phase_skew(phase) 

    def connect_instruments(self):
        """Extend connect_instruments to reset I,Q offsets and amplitude and phase
        imbalance."""
        super(MixerCalibrationExperiment, self).connect_instruments()
        self._instruments[self.AWG].set_offset(0, 0.0)
        self._instruments[self.AWG].set_offset(1, 0.0)
        self._instruments[self.AWG].set_mixer_amplitude_imbalance(0.0)
        self._instruments[self.AWG].set_mixer_phase_skew(0.0)

    def init_instruments(self):
        self.I_offset.assign_method(lambda x: self._instruments[self.AWG].set_offset(0, x))
        self.Q_offset.assign_method(lambda x: self._instruments[self.AWG].set_offset(1, x))
        self.amplitude_factor.assign_method(self._instruments[self.AWG].set_mixer_amplitude_imbalance)
        self.phase_skew.assign_method(self._set_mixer_phase)

        self.I_offset.add_post_push_hook(lambda: time.sleep(0.1))
        self.Q_offset.add_post_push_hook(lambda: time.sleep(0.1))
        self.amplitude_factor.add_post_push_hook(lambda: time.sleep(0.1))
        self.phase_skew.add_post_push_hook(lambda: time.sleep(0.1))

        for name, instr in self._instruments.items():
            instr_par = self.settings['instruments'][name]
            if instr_par['type'] == 'APS2':
                instr_par['seq_file'] = None
            logger.debug("Setting instr %s with params %s.", name, instr_par)
            instr.set_all(instr_par)

        #make sure the microwave generators are set up properly
        self._instruments[self.source].output = True
        LO_freq = self._instruments[self.source].frequency - self._instruments[self.sa].IF_FREQ
        if self.sideband_modulation:
            LO_freq -= self.SSB_FREQ
        self._instruments[self.LO].frequency = LO_freq
        self._instruments[self.LO].output = True
        self._setup_awg_ssb()
        time.sleep(0.1)

    def reset_calibration(self):
        try:
            self._instruments[self.AWG].set_mixer_amplitude_imbalance(1.0)
            self._instruments[self.AWG].set_mixer_phase_skew(0.0)
            self._instruments[self.AWG].set_offset(0, 0.0)
            self._instruments[self.AWG].set_offset(1, 0.0)
        except Exception as ex:
            raise Exception("Could not reset APS2 mixer calibration. Is the AWG connected?") from ex


    def _setup_awg_ssb(self):
        #set up ingle sideband modulation IQ playback on the AWG
        self._instruments[self.AWG].stop()
        self._instruments[self.AWG].load_waveform(1, 0.5*np.ones(1200, dtype=np.float))
        self._instruments[self.AWG].load_waveform(2, np.zeros(1200, dtype=np.float))
        self._instruments[self.AWG].waveform_frequency = -self.SSB_FREQ
        self._instruments[self.AWG].run_mode = "CW_WAVEFORM"
        #start playback
        self._instruments[self.AWG].run()
        logger.debug("Playing SSB CW IQ modulation on {} at frequency: {} MHz".format(self.AWG, self.SSB_FREQ/1e6))

    def shutdown_instruments(self):
        #reset the APS2, just in case.
        self._instruments[self.LO].output = False
        self._instruments[self.source].output = False
        self._instruments[self.AWG].stop()


    def init_streams(self):
        pass

    async def run(self):
        await self.amplitude.push(self._instruments[self.sa].peak_amplitude())
Exemple #15
0
class SwitchSearchLockinExperiment(Experiment):
    voltage = OutputConnector()

    sample = "CSHE2"
    comment = "Search PSPL Switch Voltage"
    # PARAMETERS: Confirm these before running
    field = FloatParameter(default=0.0, unit="T")
    pulse_voltage  = FloatParameter(default=0, unit="V")
    pulse_duration = FloatParameter(default=5.0e-9, unit="s")
    measure_current = 3e-6

    circuit_attenuation = 20.0
    pspl_base_attenuation = 30.0
    settle_delay = 100e-6

    attempts = 1 << 8 # Number of attemps
    samps_per_trig = 10 # Samples per trigger

    # Instruments
    arb   = KeysightM8190A("192.168.5.108")
    pspl  = Picosecond10070A("GPIB0::24::INSTR")
    mag   = AMI430("192.168.5.109")
    # keith = Keithley2400("GPIB0::25::INSTR")
    lock  = SR865("USB0::0xB506::0x2000::002638::INSTR")
    atten = Attenuator("calibration/RFSA2113SB_HPD_20160901.csv", lock.set_ao2, lock.set_ao3)

    min_daq_voltage = -10
    max_daq_voltage = 10

    def init_instruments(self):
        # ===================
        #    Setup the Lockin
        # ===================
        self.lock.tc = 30e-6
        time.sleep(0.5)

        # self.keith.triad()
        # self.keith.conf_meas_res(res_range=1e5)
        # self.keith.conf_src_curr(comp_voltage=0.5, curr_range=1.0e-5)
        # self.keith.current = self.measure_current
        self.mag.ramp()

        # ===================
        #    Setup the AWG
        # ===================

        self.arb.set_output(True, channel=1)
        self.arb.set_output(False, channel=2)
        self.arb.sample_freq = 12.0e9
        self.arb.waveform_output_mode = "WSPEED"
        self.arb.set_output_route("DC", channel=1)
        self.arb.voltage_amplitude = 1.0
        self.arb.set_marker_level_low(0.0, channel=1, marker_type="sync")
        self.arb.set_marker_level_high(1.5, channel=1, marker_type="sync")
        self.arb.continuous_mode = False
        self.arb.gate_mode = False

        # ===================
        #   Setup the PSPL
        # ===================

        self.pspl.amplitude = 7.5*np.power(10, -self.pspl_base_attenuation/20.0)
        self.pspl.trigger_source = "EXT"
        self.pspl.trigger_level = 0.1
        self.pspl.output = True

        self.setup_daq()

        def set_voltage(voltage):
            # Calculate the voltage controller attenuator setting
            self.pspl.amplitude = np.sign(voltage)*7.5*np.power(10, -self.pspl_base_attenuation/20.0)
            vc_atten = abs(20.0 * np.log10(abs(voltage)/7.5)) - self.pspl_base_attenuation - self.circuit_attenuation
            if vc_atten <= 6.0:
                logger.error("Voltage controlled attenuation under range (6dB).")
                raise ValueError("Voltage controlled attenuation under range (6dB).")
            self.atten.set_attenuation(vc_atten)
            time.sleep(0.02)

        # Assign methods
        self.field.assign_method(self.mag.set_field)
        self.pulse_duration.assign_method(self.pspl.set_duration)
        self.pulse_voltage.assign_method(set_voltage)

        # Create hooks for relevant delays
        self.pulse_duration.add_post_push_hook(lambda: time.sleep(0.1))

    def setup_daq(self):
        self.arb.abort()
        self.arb.delete_all_waveforms()
        self.arb.reset_sequence_table()

        # Picosecond trigger waveform
        pspl_trig_wf = KeysightM8190A.create_binary_wf_data(np.zeros(3200), samp_mkr=1)
        pspl_trig_segment_id = self.arb.define_waveform(len(pspl_trig_wf))
        self.arb.upload_waveform(pspl_trig_wf, pspl_trig_segment_id)

        # NIDAQ trigger waveform
        nidaq_trig_wf = KeysightM8190A.create_binary_wf_data(np.zeros(3200), sync_mkr=1)
        nidaq_trig_segment_id = self.arb.define_waveform(len(nidaq_trig_wf))
        self.arb.upload_waveform(nidaq_trig_wf, nidaq_trig_segment_id)

        settle_pts = int(640*np.ceil(self.settle_delay * 12e9 / 640))
        scenario = Scenario()
        seq = Sequence(sequence_loop_ct=int(self.attempts))
        seq.add_waveform(pspl_trig_segment_id)
        seq.add_idle(settle_pts, 0.0)
        seq.add_waveform(nidaq_trig_segment_id)
        seq.add_idle(1 << 16, 0.0) # bonus non-contiguous memory delay
        scenario.sequences.append(seq)
        self.arb.upload_scenario(scenario, start_idx=0)
        self.arb.sequence_mode = "SCENARIO"
        self.arb.scenario_advance_mode = "REPEAT"
        self.arb.scenario_start_index = 0
        self.arb.run()

        # ===================
        #   Setup the NIDAQ
        # ===================
        self.analog_input = Task()
        self.read = int32()
        self.buf_points = self.samps_per_trig*self.attempts
        self.analog_input.CreateAIVoltageChan("Dev1/ai0", "", DAQmx_Val_Diff,
            self.min_daq_voltage, self.max_daq_voltage, DAQmx_Val_Volts, None)
        self.analog_input.CfgSampClkTiming("", 1e6, DAQmx_Val_Rising, DAQmx_Val_FiniteSamps , self.samps_per_trig)
        self.analog_input.CfgInputBuffer(self.buf_points)
        self.analog_input.CfgDigEdgeStartTrig("/Dev1/PFI0", DAQmx_Val_Rising)
        self.analog_input.SetStartTrigRetriggerable(1)
        self.analog_input.StartTask()

    def init_streams(self):
        # Baked in data axes
        descrip = DataStreamDescriptor()
        descrip.add_axis(DataAxis("sample", range(self.samps_per_trig)))
        descrip.add_axis(DataAxis("attempts", range(self.attempts)))
        self.voltage.set_descriptor(descrip)

    async def run(self):
        self.arb.advance()
        self.arb.trigger()
        buf = np.empty(self.buf_points)
        self.analog_input.ReadAnalogF64(self.buf_points, -1, DAQmx_Val_GroupByChannel,
                                        buf, self.buf_points, byref(self.read), None)
        logger.debug("Read a buffer of {} points".format(buf.size))
        await self.voltage.push(buf)
        # Seemingly we need to give the filters some time to catch up here...
        await asyncio.sleep(0.02)
        logger.debug("Stream has filled {} of {} points".format(self.voltage.points_taken, self.voltage.num_points()))

    def shutdown_instruments(self):
        try:
            self.analog_input.StopTask()
        except Exception as e:
            logger.warning("Warning failed to stop task, which is quite typical (!)")

        self.arb.stop()
        # self.keith.current = 0.0
        # self.mag.zero()
        self.pspl.output = False
class SWERExperiment(Experiment):
    """ Experiment class for Switching probability measurment
    Determine switching probability for V << V0
    with varying V (and durations?)
    """

    field          = FloatParameter(default=0.0, unit="T")
    pulse_duration = FloatParameter(default=1.0e-9, unit="s")
    pulse_voltage  = FloatParameter(default=0.1, unit="V")
    repeats        = IntParameter(default = 1) # Dummy parameter for repeating
    voltage     = OutputConnector()

    attempts        = 1 << 12
    settle_delay    = 100e-6
    measure_current = 3.0e-6
    samps_per_trig  = 5

    polarity        = 1

    min_daq_voltage = 0.0
    max_daq_voltage = 0.4

    reset_amplitude = 0.1
    reset_duration  = 5.0e-9

    mag   = AMI430("192.168.5.109")
    lock  = SR865("USB0::0xB506::0x2000::002638::INSTR")
    # pspl  = Picosecond10070A("GPIB0::24::INSTR")
    arb   = KeysightM8190A("192.168.5.108")
    keith = Keithley2400("GPIB0::25::INSTR")

    def init_streams(self):
        # Baked in data axes
        descrip = DataStreamDescriptor()
        descrip.add_axis(DataAxis("sample", range(self.samps_per_trig)))
        descrip.add_axis(DataAxis("state", range(2)))
        descrip.add_axis(DataAxis("attempt", range(self.attempts)))
        self.voltage.set_descriptor(descrip)

    def init_instruments(self):

        # ===================
        #    Setup the Keithley
        # ===================

        self.keith.triad()
        self.keith.conf_meas_res(res_range=1e5)
        self.keith.conf_src_curr(comp_voltage=0.5, curr_range=1.0e-5)
        self.keith.current = self.measure_current
        self.mag.ramp()

        # ===================
        #    Setup the AWG
        # ===================

        self.arb.set_output(True, channel=1)
        self.arb.set_output(False, channel=2)
        self.arb.sample_freq = 12.0e9
        self.arb.waveform_output_mode = "WSPEED"
        self.arb.set_output_route("DC", channel=1)
        self.arb.voltage_amplitude = 1.0
        self.arb.set_marker_level_low(0.0, channel=1, marker_type="sync")
        self.arb.set_marker_level_high(1.5, channel=1, marker_type="sync")
        self.arb.continuous_mode = False
        self.arb.gate_mode = False
        self.setup_arb(self.pulse_voltage.value)

        # ===================
        #   Setup the NIDAQ
        # ===================

        self.analog_input = Task()
        self.read = int32()
        self.buf_points = 2*self.samps_per_trig*self.attempts
        self.analog_input.CreateAIVoltageChan("Dev1/ai1", "", DAQmx_Val_Diff,
            self.min_daq_voltage, self.max_daq_voltage, DAQmx_Val_Volts, None)
        self.analog_input.CfgSampClkTiming("", 1e6, DAQmx_Val_Rising, DAQmx_Val_FiniteSamps , self.samps_per_trig)
        self.analog_input.CfgInputBuffer(self.buf_points)
        self.analog_input.CfgDigEdgeStartTrig("/Dev1/PFI0", DAQmx_Val_Rising)
        self.analog_input.SetStartTrigRetriggerable(1)
        self.analog_input.StartTask()

        # Assign methods
        self.field.assign_method(self.mag.set_field)
        self.pulse_voltage.assign_method(self.setup_arb)

    def setup_arb(self,volt):
        def arb_pulse(amplitude, duration, sample_rate=12e9):
            arb_voltage = arb_voltage_lookup()
            pulse_points = int(duration*sample_rate)
            if pulse_points < 320:
                wf = np.zeros(320)
            else:
                wf = np.zeros(64*np.ceil(pulse_points/64.0))
            wf[:pulse_points] = np.sign(amplitude)*arb_voltage(abs(amplitude))
            return wf

        self.arb.abort()
        self.arb.delete_all_waveforms()
        self.arb.reset_sequence_table()

        # Reset waveform
        reset_wf    = arb_pulse(-self.polarity*self.reset_amplitude, self.reset_duration)
        wf_data     = KeysightM8190A.create_binary_wf_data(reset_wf)
        rst_segment_id  = self.arb.define_waveform(len(wf_data))
        self.arb.upload_waveform(wf_data, rst_segment_id)

        # Switching waveform
        switch_wf    = arb_pulse(self.polarity*volt, self.pulse_duration.value)
        wf_data     = KeysightM8190A.create_binary_wf_data(switch_wf)
        sw_segment_id  = self.arb.define_waveform(len(wf_data))
        self.arb.upload_waveform(wf_data, sw_segment_id)

        # NIDAQ trigger waveform
        nidaq_trig_wf = KeysightM8190A.create_binary_wf_data(np.zeros(3200), sync_mkr=1)
        nidaq_trig_segment_id = self.arb.define_waveform(len(nidaq_trig_wf))
        self.arb.upload_waveform(nidaq_trig_wf, nidaq_trig_segment_id)

        settle_pts = int(640*np.ceil(self.settle_delay * 12e9 / 640))

        scenario = Scenario()
        seq = Sequence(sequence_loop_ct=int(self.attempts))
        #First try with reset flipping pulse
        seq.add_waveform(rst_segment_id)
        seq.add_idle(settle_pts, 0.0)
        seq.add_waveform(nidaq_trig_segment_id)
        seq.add_idle(1 << 16, 0.0) # bonus non-contiguous memory delay
        seq.add_waveform(sw_segment_id)
        seq.add_idle(settle_pts, 0.0)
        seq.add_waveform(nidaq_trig_segment_id)
        seq.add_idle(1 << 16, 0.0) # bonus non-contiguous memory delay
        scenario.sequences.append(seq)
        self.arb.upload_scenario(scenario, start_idx=0)
        self.arb.sequence_mode = "SCENARIO"
        self.arb.scenario_advance_mode = "REPEAT"
        self.arb.scenario_start_index = 0
        self.arb.run()

    async def run(self):
        # Keep track of the previous values
        logger.debug("Waiting for filters.")
        await asyncio.sleep(1.0)
        self.arb.advance()
        self.arb.trigger()
        buf = np.empty(self.buf_points)
        self.analog_input.ReadAnalogF64(self.buf_points, -1, DAQmx_Val_GroupByChannel,
                                        buf, self.buf_points, byref(self.read), None)
        await self.voltage.push(buf)
        # Seemingly we need to give the filters some time to catch up here...
        await asyncio.sleep(0.002)
        logger.debug("Stream has filled {} of {} points".format(self.voltage.points_taken, self.voltage.num_points() ))

    def shutdown_instruments(self):
        self.keith.current = 0.0e-5
        # self.mag.zero()
        self.arb.stop()
        try:
            self.analog_input.StopTask()
        except Exception as e:
            print("Warning: failed to stop task (this normally happens with no consequences when taking multiple samples per trigger).")
            pass
Exemple #17
0
    def __init__(self):
        super(Experiment, self).__init__()
        # Experiment name
        self.name = None

        # Sweep control
        self.sweeper = Sweeper()

        # This holds the experiment graph
        self.graph = None

        # Should we show the dashboard?
        self.dashboard = False

        # Create and use plots?
        self.do_plotting = False

        # Unique ID for this experiment
        self.uuid = str(uuid.uuid4())

        # Disconnect at the end of experiment?
        self.keep_instruments_connected = False

        # Also keep references to all of the plot filters
        self.plotters = []  # Standard pipeline plotters using streams
        self.extra_plotters = [
        ]  # Plotters using streams, but not the pipeline
        self.manual_plotters = [
        ]  # Plotters using neither streams nor the pipeline
        self.manual_plotter_callbacks = [
        ]  # These are called at the end of run
        self._extra_plots_to_streams = {}

        # Keep track of additional DataStreams created for manual plotters, etc.
        self.extra_streams = []

        # Furthermore, keep references to all of the file writers and buffers.
        self.writers = []
        self.buffers = []

        # ExpProgressBar object to display progress bars
        self.progressbar = None

        # indicates whether the instruments are already connected
        self.instrs_connected = False

        # indicates whether this is the first (or only) experiment in a series (e.g. for pulse calibrations)
        self.first_exp = True

        # add date to data files?
        self.add_date = False

        # save channel library
        self.save_chanddb = False

        # Things we can't metaclass
        self.output_connectors = {}
        for oc in self._output_connectors.keys():
            a = OutputConnector(
                name=oc,
                data_name=oc,
                unit=self._output_connectors[oc].data_unit,
                dtype=self._output_connectors[oc].descriptor.dtype,
                parent=self)
            a.parent = self

            self.output_connectors[oc] = a
            setattr(self, oc, a)

        # Some instruments don't clean up well after themselves, reconstruct them on a
        # per instance basis. These instruments contain a wide variety of complex behaviors
        # and rely on other classes and data structures, so we avoid copying them and
        # run through the constructor instead.
        self._instruments_instance = {}
        for n in self._instruments.keys():
            new_cls = type(self._instruments[n])
            new_inst = new_cls(
                resource_name=self._instruments[n].resource_name,
                name=self._instruments[n].name)
            setattr(self, n, new_inst)
            self._instruments_instance[n] = new_inst
        self._instruments = self._instruments_instance

        # We don't want to add parameters to the base class, so do the same here.
        # These aren't very complicated objects, so we'll throw caution to the wind and
        # try copying them directly.
        self._parameters_instance = {}
        for n, v in self._parameters.items():
            new_inst = copy.deepcopy(v)
            setattr(self, n, new_inst)
            self._parameters_instance[n] = new_inst
        self._parameters = self._parameters_instance

        # Based on the logging level, infer whether we want asyncio debug
        do_debug = logger.getEffectiveLevel() <= logging.DEBUG

        # Run the stream init
        self.init_streams()
class nTronBERExperiment(Experiment):

    # Sample information
    sample = "CSHE"
    comment = "Bit Error Rate with nTron pulses"
    # Parameters
    field = FloatParameter(default=0.0, unit="T")
    nTron_voltage = FloatParameter(default=0.2, unit="V")
    nTron_duration = FloatParameter(default=1e-9, unit="s")
    attempts = IntParameter(default=1 << 10)

    # Constants (set with attribute access if you want to change these!)
    settle_delay = 200e-6
    measure_current = 3.0e-6
    samps_per_trig = 5

    polarity = 1

    min_daq_voltage = 0.0
    max_daq_voltage = 0.4

    reset_amplitude = 0.2
    reset_duration = 5.0e-9

    # Things coming back
    daq_buffer = OutputConnector()

    # Instrument resources
    mag = AMI430("192.168.5.109")
    # lock  = SR865("USB0::0xB506::0x2000::002638::INSTR")
    # pspl  = Picosecond10070A("GPIB0::24::INSTR")
    # atten = Attenuator("calibration/RFSA2113SB_HPD_20160706.csv", lock.set_ao2, lock.set_ao3)
    arb = KeysightM8190A("192.168.5.108")
    keith = Keithley2400("GPIB0::25::INSTR")

    def init_instruments(self):

        # ===================
        #    Setup the Keithley
        # ===================

        self.keith.triad()
        self.keith.conf_meas_res(res_range=1e5)
        self.keith.conf_src_curr(comp_voltage=0.5, curr_range=1.0e-5)
        self.keith.current = self.measure_current
        self.mag.ramp()

        # ===================
        #    Setup the AWG
        # ===================

        self.arb.set_output(True, channel=1)
        self.arb.set_output(False, channel=2)
        self.arb.sample_freq = 12.0e9
        self.arb.waveform_output_mode = "WSPEED"
        self.arb.set_output_route("DC", channel=1)
        self.arb.voltage_amplitude = 1.0
        self.arb.set_marker_level_low(0.0, channel=1, marker_type="sync")
        self.arb.set_marker_level_high(1.5, channel=1, marker_type="sync")
        self.arb.continuous_mode = False
        self.arb.gate_mode = False

        self.nTron_control_voltage = nTron_voltage_lookup()
        self.setup_arb(self.nTron_voltage.value)

        # Assign methods
        self.field.assign_method(self.mag.set_field)
        self.nTron_voltage.assign_method(self.setup_arb)

    def setup_arb(self, vpeak):
        self.arb.abort()
        self.arb.delete_all_waveforms()
        self.arb.reset_sequence_table()

        reset_wf = arb_pulse(-self.polarity * self.reset_amplitude,
                             self.reset_duration)
        wf_data = KeysightM8190A.create_binary_wf_data(reset_wf)
        rst_segment_id = self.arb.define_waveform(len(wf_data))
        self.arb.upload_waveform(wf_data, rst_segment_id)

        no_reset_wf = arb_pulse(0.0, 3.0 / 12e9)
        wf_data = KeysightM8190A.create_binary_wf_data(no_reset_wf)
        no_rst_segment_id = self.arb.define_waveform(len(wf_data))
        self.arb.upload_waveform(wf_data, no_rst_segment_id)

        # nTron waveforms
        volt = self.polarity * self.nTron_control_voltage(vpeak)
        logger.debug("Set nTron pulse: {}V -> AWG {}V, {}s".format(
            vpeak, volt, self.nTron_duration.value))
        ntron_wf = ntron_pulse(amplitude=volt,
                               fall_time=self.nTron_duration.value)
        wf_data = KeysightM8190A.create_binary_wf_data(ntron_wf)
        ntron_segment_id = self.arb.define_waveform(len(wf_data))
        self.arb.upload_waveform(wf_data, ntron_segment_id)

        # NIDAQ trigger waveform
        nidaq_trig_wf = KeysightM8190A.create_binary_wf_data(np.zeros(3200),
                                                             sync_mkr=1)
        nidaq_trig_segment_id = self.arb.define_waveform(len(nidaq_trig_wf))
        self.arb.upload_waveform(nidaq_trig_wf, nidaq_trig_segment_id)

        settle_pts = int(640 * np.ceil(self.settle_delay * 12e9 / 640))

        scenario = Scenario()
        seq = Sequence(sequence_loop_ct=int(self.attempts.value))
        seq.add_waveform(rst_segment_id)
        seq.add_idle(settle_pts, 0.0)
        seq.add_waveform(nidaq_trig_segment_id)
        seq.add_idle(1 << 16, 0.0)  # bonus non-contiguous memory delay
        # seq.add_waveform(pspl_trig_segment_id)
        seq.add_waveform(ntron_segment_id)
        seq.add_idle(settle_pts, 0.0)
        seq.add_waveform(nidaq_trig_segment_id)
        seq.add_idle(1 << 16, 0.0)  # bonus non-contiguous memory delay
        scenario.sequences.append(seq)
        self.arb.upload_scenario(scenario, start_idx=0)

        self.arb.sequence_mode = "SCENARIO"
        self.arb.scenario_advance_mode = "REPEAT"
        self.arb.scenario_start_index = 0
        self.arb.run()

        # ===================
        #   Setup the NIDAQ
        # ===================

        self.analog_input = Task()
        self.read = int32()
        self.buf_points = 2 * self.samps_per_trig * self.attempts.value
        self.analog_input.CreateAIVoltageChan("Dev1/ai1", "", DAQmx_Val_Diff,
                                              self.min_daq_voltage,
                                              self.max_daq_voltage,
                                              DAQmx_Val_Volts, None)
        self.analog_input.CfgSampClkTiming("", 1e6, DAQmx_Val_Rising,
                                           DAQmx_Val_FiniteSamps,
                                           self.samps_per_trig)
        self.analog_input.CfgInputBuffer(self.buf_points)
        self.analog_input.CfgDigEdgeStartTrig("/Dev1/PFI0", DAQmx_Val_Rising)
        self.analog_input.SetStartTrigRetriggerable(1)
        self.analog_input.StartTask()

    def init_streams(self):
        # Baked in data axes
        descrip = DataStreamDescriptor()
        descrip.add_axis(DataAxis("samples", range(self.samps_per_trig)))
        descrip.add_axis(DataAxis("state", range(2)))
        descrip.add_axis(DataAxis("attempts", range(self.attempts.value)))
        self.daq_buffer.set_descriptor(descrip)

    async def run(self):
        """This is run for each step in a sweep."""
        self.arb.advance()
        self.arb.trigger()
        buf = np.empty(self.buf_points)
        self.analog_input.ReadAnalogF64(self.buf_points, -1,
                                        DAQmx_Val_GroupByChannel, buf,
                                        self.buf_points, byref(self.read),
                                        None)
        await self.daq_buffer.push(buf)
        # Seemingly we need to give the filters some time to catch up here...
        await asyncio.sleep(0.02)
        logger.debug("Stream has filled {} of {} points".format(
            self.daq_buffer.points_taken, self.daq_buffer.num_points()))

    def shutdown_instruments(self):
        self.keith.current = 0.0e-5
        # self.mag.zero()
        self.arb.stop()
        try:
            self.analog_input.StopTask()
        except Exception as e:
            print(
                "Warning: failed to stop task (this normally happens with no consequences when taking multiple samples per trigger)."
            )
            pass
Exemple #19
0
class SingleShotMeasurement(Filter):

    save_kernel = BoolParameter(default=False)
    optimal_integration_time = BoolParameter(default=False)
    set_threshold = BoolParameter(default=False)
    zero_mean = BoolParameter(default=False)
    logistic_regression = BoolParameter(default=False)

    sink = InputConnector()
    source = OutputConnector() # Single shot fidelity

    TOLERANCE = 1e-3

    def __init__(self, save_kernel=False, optimal_integration_time=False,
                    zero_mean=False, set_threshold=False,
                    logistic_regression=False, **kwargs):
        super(SingleShotMeasurement, self).__init__(**kwargs)
        if len(kwargs) > 0:
            self.save_kernel.value = save_kernel
            self.optimal_integration_time.value = optimal_integration_time
            self.zero_mean.value = zero_mean
            self.set_threshold.value = set_threshold
            self.logistic_regression.value = logistic_regression

        self.quince_parameters = [self.save_kernel, self.optimal_integration_time,
            self.zero_mean, self.set_threshold, self.logistic_regression]

        self.pdf_data_queue = Queue() #Output queue
        self.fidelity       = self.source

    def update_descriptors(self):

        logger.debug("Updating Plotter %s descriptors based on input descriptor %s", self.filter_name, self.sink.descriptor)
        self.stream = self.sink.input_streams[0]
        self.descriptor = self.sink.descriptor
        try:
            self.time_pts = self.descriptor.axes[self.descriptor.axis_num("time")].points
            self.record_length = len(self.time_pts)
        except ValueError:
            raise ValueError("Single shot filter sink does not appear to have a time axis!")
        self.num_averages = len(self.sink.descriptor.axes[self.descriptor.axis_num("averages")].points)
        self.num_segments = len(self.sink.descriptor.axes[self.descriptor.axis_num("segment")].points)
        self.ground_data = np.zeros((self.record_length, self.num_averages), dtype=np.complex)
        self.excited_data = np.zeros((self.record_length, self.num_averages), dtype=np.complex)
        self.total_points = self.num_segments*self.record_length*self.num_averages # Total points BEFORE sweep axes

        output_descriptor = DataStreamDescriptor()
        output_descriptor.axes = [_ for _ in self.descriptor.axes if type(_) is SweepAxis]
        output_descriptor._exp_src = self.sink.descriptor._exp_src
        output_descriptor.dtype = np.complex128

        if len(output_descriptor.axes) == 0:
            output_descriptor.add_axis(DataAxis("Fidelity", [1]))

        for os in self.fidelity.output_streams:
            os.set_descriptor(output_descriptor)
            os.end_connector.update_descriptors()


    def final_init(self):
        self.fid_buffer = np.empty(self.record_length*self.num_averages*self.num_segments, dtype=np.complex)
        self.idx = 0

    def process_data(self, data):
        """Fill the ground and excited data bins"""

        self.fid_buffer[self.idx:self.idx+len(data)] = data
        self.idx += len(data)

        if self.idx == self.record_length*self.num_averages*self.num_segments:
            self.idx = 0
            reshaped = self.fid_buffer.reshape(self.record_length, -1, order='F')
            self.ground_data = reshaped[:, ::2]
            self.excited_data = reshaped[:, 1::2]
            self.compute_filter()
            if self.logistic_regression.value:
                self.logistic_fidelity()
            if self.save_kernel.value:
                self._save_kernel()
            for os in self.fidelity.output_streams:
                os.push(self.fidelity_result)
            self.pdf_data_queue.put(self.pdf_data)

    def compute_filter(self):
        """Compute the single shot kernel and obtain single-shot measurement
        fidelity.

        Expects that the data will be in self.ground_data and self.excited_data,
        which are (T, N)-shaped numpy arrays, with T the time axis and N the
        number of shots."""
        #get excited and ground state data
        try:
            ground_mean = np.mean(self.ground_data, axis=1)
            excited_mean = np.mean(self.excited_data, axis=1)
        except AttributeError:
            raise Exception("Single shot filter does not appear to have any data!")
        distance = np.abs(np.mean(ground_mean - excited_mean))
        bias = np.mean(ground_mean + excited_mean) / distance
        logger.debug("Found single-shot measurement distance: {} and bias {}.".format(distance, bias))
        #construct matched filter kernel
        old_settings = np.seterr(divide='ignore', invalid='ignore')
        kernel = np.nan_to_num(np.divide(np.conj(ground_mean - excited_mean), np.var(self.ground_data, ddof=1, axis=1)))
        np.seterr(**old_settings)
        #sets kernel to zero when difference is too small, and prevents
        #kernel from diverging when var->0 at beginning of record_length
        kernel = np.multiply(kernel, np.greater(np.abs(ground_mean - excited_mean), self.TOLERANCE * distance))
        #subtract offset to cancel low-frequency fluctuations when integrating
        #raw data (not demod)
        if self.zero_mean.value:
            kernel = kernel - np.mean(kernel)
        logger.debug("Found single shot filter norm: {}.".format(np.sum(np.abs(kernel))))
        #annoyingly numpy's isreal has the opposite behavior to MATLAB's
        if not np.any(np.imag(kernel) > np.finfo(np.complex128).eps):
            #construct analytic signal from Hilbert transform
            kernel = hilbert(np.real(kernel))
        #normalize between -1 and 1
        kernel = kernel / np.amax(np.hstack([np.abs(np.real(kernel)), np.abs(np.imag(kernel))]))
        #apply matched filter
        weighted_ground = self.ground_data * kernel[:, np.newaxis]
        weighted_excited = self.excited_data * kernel[:, np.newaxis]

        if self.optimal_integration_time.value:
            #take cumulative sum up to each time step
            ground_I = np.real(weighted_ground)
            ground_Q = np.imag(weighted_ground)
            excited_I = np.real(weighted_excited)
            excited_Q = np.imag(weighted_excited)
            int_ground_I = np.cumsum(ground_I, axis=0)
            int_ground_Q = np.cumsum(ground_Q, axis=0)
            int_excited_I = np.cumsum(excited_I, axis=0)
            int_excited_Q = np.cumsum(excited_Q, axis=0)
            I_mins = np.amin(np.minimum(int_ground_I, int_excited_I), axis=1)
            I_maxes = np.amax(np.maximum(int_ground_I, int_excited_I), axis=1)
            num_times = int_ground_I.shape[0]
            fidelities = np.zeros((num_times, ))
            #Loop through each integration point; estimate the CDF and
            #then calculate best measurement fidelity
            for pt in range(num_times):
                bins = np.linspace(I_mins[pt], I_maxes[pt], 100)
                g_PDF = np.histogram(int_ground_I[pt, :], bins)[0]
                e_PDF = np.histogram(int_excited_I[pt,:], bins)[0]
                fidelities[pt] = np.sum(np.abs(g_PDF - e_PDF)) / np.sum(g_PDF + e_PDF)
            best_idx = fidelities.argmax(axis=0)
            self.best_integration_time = best_idx
            logger.info("Found best integration time at {} out of {} decimated points.".format(best_idx, num_times))
            #redo calculation with KDEs to get a more accurate estimate
            bins = np.linspace(I_mins[best_idx], I_maxes[best_idx], 100)
            g_KDE = gaussian_kde(int_ground_I[best_idx, :])
            e_KDE = gaussian_kde(int_excited_I[best_idx, :])
            g_PDF = g_KDE(bins)
            e_PDF = e_KDE(bins)
        else:
            ground_I = np.sum(np.real(weighted_ground), axis=0)
            ground_Q = np.sum(np.imag(weighted_excited), axis=0)
            excited_I = np.sum(np.real(weighted_excited), axis=0)
            excited_Q = np.sum(np.imag(weighted_excited), axis=0)
            I_min = np.amin(np.minimum(ground_I, excited_I))
            I_max = np.amax(np.maximum(ground_I, excited_I))
            bins = np.linspace(I_min, I_max, 100)
            g_KDE = gaussian_kde(ground_I)
            e_KDE = gaussian_kde(excited_I)
            g_PDF = g_KDE(bins)
            e_PDF = e_KDE(bins)

        self.kernel = kernel
        max_F_I = 1 - 0.5 * (1 - 0.5 * (bins[2] - bins[1]) * np.sum(np.abs(g_PDF - e_PDF)))
        self.pdf_data = {"Max I Fidelity": max_F_I,
                         "I Bins": bins,
                         "Ground I PDF": g_PDF,
                         "Excited I PDF": e_PDF}

        if self.set_threshold.value:
            indmax = (np.abs(np.cumsum(g_PDF / np.sum(g_PDF))
                        - np.cumsum(e_PDF / np.sum(e_PDF)))).argmax(axis=0)
            self.pdf_data["I Threshold"] = bins[indmax]
            logger.info("Single shot kernel found I threshold at {}.".format(bins[indmax]))

        if self.optimal_integration_time.value:
            mu_g, sigma_g = norm.fit(int_ground_I[best_idx, :])
            mu_e, sigma_e = norm.fit(int_excited_I[best_idx, :])
        else:
            mu_g, sigma_g = norm.fit(ground_I)
            mu_e, sigma_e = norm.fit(excited_I)
        self.pdf_data["Ground I Gaussian PDF"] = norm.pdf(bins, mu_g, sigma_g)
        self.pdf_data["Excited I Gaussian PDF"] = norm.pdf(bins, mu_e, sigma_e)

        #calculate kernel density estimates for other quadrature
        if self.optimal_integration_time.value:
            Q_min = np.amin([int_ground_Q[best_idx,:], int_excited_Q[best_idx,:]])
            Q_max = np.argmax([int_ground_Q[best_idx,:], int_excited_Q[best_idx,:]])
            qbins = np.linspace(Q_min, Q_max, 100)
            g_KDE = gaussian_kde(int_ground_Q[best_idx, :])
            e_KDE = gaussian_kde(int_excited_Q[best_idx, :])
        else:
            qbins = np.linspace(np.amin([ground_Q, excited_Q]), np.amax([ground_Q, excited_Q]), 100)
            g_KDE = gaussian_kde(ground_Q)
            e_KDE = gaussian_kde(excited_Q)
        self.pdf_data["Q Bins"] = qbins
        g_PDF_Q = g_KDE(qbins)
        e_PDF_Q = e_KDE(qbins)
        self.pdf_data["Ground Q PDF"] =  g_PDF_Q
        self.pdf_data["Excited Q PDF"] =  e_PDF_Q
        self.pdf_data["Max Q Fidelity"] = 1 - 0.5 * (1 - 0.5 * (qbins[2] - qbins[1]) * np.sum(np.abs(g_PDF_Q - e_PDF_Q)))

        if self.optimal_integration_time.value:
            mu_g, sigma_g = norm.fit(int_ground_Q[best_idx, :])
            mu_e, sigma_e = norm.fit(int_excited_Q[best_idx, :])
        else:
            mu_g, sigma_g = norm.fit(ground_Q)
            mu_e, sigma_e = norm.fit(excited_Q)
        self.pdf_data["Ground Q Gaussian PDF"] = norm.pdf(bins, mu_g, sigma_g)
        self.pdf_data["Excited Q Gaussian PDF"] = norm.pdf(bins, mu_e, sigma_e)

        self.fidelity_result = self.pdf_data["Max I Fidelity"] + 1j * self.pdf_data["Max Q Fidelity"]
        logger.info("Single shot fidelity filter found: {}".format(self.fidelity_result))

    def logistic_fidelity(self):
        #group data and assign state labels
        gnd_features = np.hstack([np.real(self.ground_data.T),
                                np.imag(self.ground_data.T)])
        ex_features = np.hstack([np.real(self.excited_data.T),
                                np.imag(self.excited_data.T)])
        #liblinear wants arrays in C order
        features = np.ascontiguousarray(np.vstack([gnd_features, ex_features]))
        state = np.ascontiguousarray(np.hstack([np.zeros(self.ground_data.shape[1]),
                                                np.ones(self.excited_data.shape[1])]))
        #Set up logistic regression with cross-validation using liblinear.
        #Cs sets the inverse of the regularization strength, which will be optimized
        #through cross-validation. Uses the default Stratified K-Folds
        #CV generator, with 3 folds.
        #This is set up to be as consistent with the MATLAB implementation
        #as I can make it. --GJR
        Cs = np.logspace(-1,2,5)
        logreg = LogisticRegressionCV(Cs, cv=3, solver='liblinear')
        logreg.fit(features, state) #fit the model
        predictions = logreg.predict(features) #in-place classification
        score = logreg.score(features,state) #mean accuracy of classification
        N = len(predictions)
        S = np.sum(predictions == state) #how many we got right
        #now calculate confidence intervals
        c = 0.95
        flo = betaincinv(S+1, N-S+1, (1-c)/2., )
        fhi = betaincinv(S+1, N-S+1, (1+c)/2., )
        logger.info(("In-place logistic regression fidelity: " +
                "{:.2f}% ({:.2f}, {:.2f})".format(100*score, 100*flo, 100*fhi)))

    def _save_kernel(self):
        import QGL.config as qconfig
        if not qconfig.KernelDir or not os.path.exists(qconfig.KernelDir):
            logger.warning("No kernel directory provided, please set auspex.config.KernelDir")
            logger.warning("Saving kernel to local directory.")
            dir = "./"
        else:
            dir = qconfig.KernelDir
        try:
            logger.info(self.filter_name)
            filename = self.filter_name + "_kernel.txt"
            header = "Single shot fidelity filter - {}:\nSource: {}".format(time.strftime("%m/%d/%y -- %H:%M"), self.filter_name)
            np.savetxt(os.path.join(dir, filename), self.kernel, header=header, comments="#")
        except (AttributeError, IOError) as ex:
            raise AttributeError("Could not save single shot fidelity kernel!") from ex
Exemple #20
0
class WindowIntegrator(Filter):
    """
    Allow a kernel from the set {'chebwin', 'blackman', 'slepian',
    'boxcar'} to be set for the duration of the start and stop values.

    YAML parameters are:
    type: WindowIntegrator
    source: Demod-q1
    kernel_type: 'chebwin'
    start: 5.0e-07
    stop: 9.0e-07

    See: https://docs.scipy.org/doc/scipy/reference/signal.html for more
    details on the filters specifics.
    """

    sink = InputConnector()
    source = OutputConnector()
    bias = FloatParameter(default=0.0)
    kernel_type = Parameter(default='boxcar', allowed_values=['chebwin',\
        'blackman', 'slepian', 'boxcar'])
    start = FloatParameter(default=0.0)
    stop = FloatParameter(default=100e-9)
    frequency = FloatParameter(default=0.0)
    """Integrate with a given kernel. Kernel will be padded/truncated to match record length"""
    def __init__(self, **kwargs):
        super(WindowIntegrator, self).__init__(**kwargs)
        self.pre_int_op = None
        self.post_int_op = None
        for k, v in kwargs.items():
            if hasattr(self, k) and isinstance(getattr(self, k), Parameter):
                getattr(self, k).value = v
        if "pre_integration_operation" in kwargs:
            self.pre_int_op = kwargs["pre_integration_operation"]
        if "post_integration_operation" in kwargs:
            self.post_int_op = kwargs["post_integration_operation"]
        self.quince_parameters = [
            self.kernel_type, self.frequency, self.start, self.stop
        ]

    def update_descriptors(self):
        if not self.kernel_type:
            raise ValueError("Integrator was passed kernel None")

        logger.debug(
            'Updating WindowIntegrator "%s" descriptors based on input descriptor: %s.',
            self.name, self.sink.descriptor)

        record_length = self.sink.descriptor.axes[-1].num_points()

        time_pts = self.sink.descriptor.axes[-1].points
        time_step = time_pts[1] - time_pts[0]
        kernel = np.zeros(record_length, dtype=np.complex128)
        sample_start = int(self.box_car_start.value / time_step)
        sample_stop = int(self.box_car_stop.value / time_step) + 1
        if self.kernel_type == 'boxcar':
            kernel[sample_start:sample_stop] = 1.0
        elif self.kernel_type == 'chebwin':
            # create a Dolph-Chebyshev window with 100 dB attenuation
            kernel[sample_start:sample_stop] = \
                chebwin(sample_start-sample_stop, at=100)
        elif self.kernel_type == 'blackman':
            kernel[sample_start:sample_stop] = \
                blackman(sample_start-sample_stop)
        elif self.kernel_type == 'slepian':
            # create a Slepian window with 0.2 bandwidth
            kernel[sample_start:sample_stop] = \
                slepian(sample_start-sample_stop, width=0.2)

        # add modulation
        kernel *= np.exp(2j * np.pi * self.frequency.value * time_step *
                         time_pts)

        # pad or truncate the kernel to match the record length
        if kernel.size < record_length:
            self.aligned_kernel = np.append(
                kernel,
                np.zeros(record_length - kernel.size, dtype=np.complex128))
        else:
            self.aligned_kernel = np.resize(kernel, record_length)

        # Integrator reduces and removes axis on output stream
        # update output descriptors
        output_descriptor = DataStreamDescriptor()
        # TODO: handle reduction to single point
        output_descriptor.axes = self.sink.descriptor.axes[:-1]
        output_descriptor._exp_src = self.sink.descriptor._exp_src
        output_descriptor.dtype = np.complex128
        for os in self.source.output_streams:
            os.set_descriptor(output_descriptor)
            os.end_connector.update_descriptors()

    async def process_data(self, data):

        # TODO: handle variable partial records
        if self.pre_int_op:
            data = self.pre_int_op(data)
        filtered = np.inner(np.reshape(data, (-1, len(self.aligned_kernel))),
                            self.aligned_kernel)
        if self.post_int_op:
            filtered = self.post_int_op(filtered)
        # push to ouptut connectors
        for os in self.source.output_streams:
            await os.push(filtered)
Exemple #21
0
class MixerCalibrationExperiment(Experiment):
    """A mixer calibration experiment, using an APS1 or APS2 unit to calibrate the mixer offsets. Pulls sideband
    modulation frequency (IF) and LO frequency from the channel library.
    """
    SSB_FREQ = 10e6

    amplitude = OutputConnector(unit='dBc')

    I_offset = FloatParameter(default=0.0, unit="V")
    Q_offset = FloatParameter(default=0.0, unit="V")
    amplitude_factor = FloatParameter(default=1.0)
    phase_skew = FloatParameter(default=0.0, unit="rad")

    sideband_modulation = True

    def __init__(self,
                 channel,
                 spectrum_analyzer,
                 config_dict,
                 mixer="control"):
        """Initialize MixerCalibrationExperiment Experiment.
            Args:
                channel:                LogicalChannel to perform calibration.
                spectrum_analyzer:      Which spectrum analyzer should be used.
                config_dict:            Dictionary of values to store calibration results.
                mixer:                  One of 'control', 'measure' to select which mixer to cal.
        """
        super(MixerCalibrationExperiment, self).__init__()

        self.channel = channel
        self.config_dict = config_dict
        self.sideband_modulation = config_dict["sideband_modulation"]
        self._sa = spectrum_analyzer
        assert self._sa.LO_source is not None, "No microwave source associated with spectrum analyzer"
        self._LO = self._sa.LO_source
        if mixer.lower() == "measure":
            self._awg = channel.measure_chan.phys_chan.transmitter
            self._phys_chan = channel.measure_chan.phys_chan
            self._source = channel.measure_chan.phys_chan.generator
            self.SSB_FREQ = channel.measure_chan.autodyne_freq
        elif mixer.lower() == "control":
            self._awg = channel.phys_chan.transmitter
            self._phys_chan = channel.phys_chan
            self._source = channel.phys_chan.generator
            self.SSB_FREQ = channel.frequency
        else:
            raise ValueError(
                "Unknown mixer {}: must be either 'measure' or 'control'.".
                format(mixer))

        self.instrument_proxies = [self._sa, self._LO, self._awg, self._source]
        self.instruments = []
        for instrument in self.instrument_proxies:
            instr = instrument_map[instrument.model](
                instrument.address, instrument.label)  # Instantiate
            # For easy lookup
            instr.proxy_obj = instrument
            instrument._locked = False
            instrument.instr = instr
            instrument._locked = True
            # Add to the experiment's instrument list
            self._instruments[instrument.label] = instr
            self.instruments.append(instr)

        self.sa = self._instruments[self._sa.label]
        self.LO = self._instruments[self._LO.label]
        self.source = self._instruments[self._source.label]
        self.awg = self._instruments[self._awg.label]

        self.buff = DataBuffer()
        edges = [(self.amplitude, self.buff.sink)]
        self.set_graph(edges)

    def connect_instruments(self):
        """Connect instruments, resetting all mixer offsets to default values.
        """
        super(MixerCalibrationExperiment, self).connect_instruments()
        if isinstance(self.awg, bbn.APS2):
            self.awg.set_offset(0, 0.0)
            self.awg.set_offset(1, 0.0)
            self.awg.set_mixer_amplitude_imbalance(1.0)
            self.awg.set_mixer_phase_skew(0.0)
        else:
            self.awg.set_offset(int(self._phys_chan.label[-2]), 0.0)
            self.awg.set_offset(int(self._phys_chan.label[-1]), 0.0)
            self.awg.set_amplitude(int(self._phys_chan.label[-2]), 1)
            self.awg.set_amplitude(int(self._phys_chan.label[-1]), 1)
            self.awg.set_mixer_amplitude_imbalance(self._phys_chan.label[-2:],
                                                   1.0)
            self.awg.set_mixer_phase_skew(self._phys_chan.label[-2:], 0.0)
        self.reset_calibration()

    def init_instruments(self):
        """Initialize instruments for mixer calibration.
        """
        for k, v in self.config_dict.items():
            if k != "sideband_modulation":
                getattr(self, k).value = v

        if isinstance(self.awg, bbn.APS2):
            self.phase_skew.assign_method(self.awg.set_mixer_phase_skew)
            self.I_offset.assign_method(lambda x: self.awg.set_offset(0, x))
            self.Q_offset.assign_method(lambda x: self.awg.set_offset(1, x))
            self.amplitude_factor.assign_method(
                self.awg.set_mixer_amplitude_imbalance)
        else:
            self.amplitude_factor.assign_method(
                lambda x: self.awg.set_mixer_amplitude_imbalance(
                    self._phys_chan.label[-2:], x))
            self.I_offset.assign_method(lambda x: self.awg.set_offset(
                int(self._phys_chan.label[-2]), x))
            self.Q_offset.assign_method(lambda x: self.awg.set_offset(
                int(self._phys_chan.label[-1]), x))
            self.phase_skew.assign_method(
                lambda x: self.awg.set_mixer_phase_skew(
                    self._phys_chan.label[-2:], x, self.SSB_FREQ))
        self.I_offset.add_post_push_hook(lambda: time.sleep(0.1))
        self.Q_offset.add_post_push_hook(lambda: time.sleep(0.1))
        self.amplitude_factor.add_post_push_hook(lambda: time.sleep(0.1))
        self.phase_skew.add_post_push_hook(lambda: time.sleep(0.1))

        for name, instr in self._instruments.items():
            # Configure with dictionary from the instrument proxyg
            instr.configure_with_proxy(instr.proxy_obj)

        #make sure the microwave generators are set up properly
        self.source.output = True
        LO_freq = self.source.frequency - self.sa.IF_FREQ
        if self.sideband_modulation:
            LO_freq -= self.SSB_FREQ
        self.LO.frequency = LO_freq
        self.LO.output = True
        self._setup_awg_ssb()
        time.sleep(0.1)

    def reset_calibration(self):
        """Set calibration back to default values.
        """
        try:
            if isinstance(self.awg, bbn.APS2):
                self.awg.set_mixer_amplitude_imbalance(1.0)
                self.awg.set_mixer_phase_skew(0.0)
                self.awg.set_offset(0, 0.0)
                self.awg.set_offset(1, 0.0)
            else:
                self.awg.set_mixer_amplitude_imbalance(
                    self._phys_chan.label[-2:], 1.0)
                self.awg.set_mixer_phase_skew(self._phys_chan.label[-2:], 0.0)
                self.awg.set_mixer_amplitude_imbalance(
                    self._phys_chan.label[-2:], 1.0)
                self.awg.set_mixer_phase_skew(self._phys_chan.label[-2:], 0.0)
                self.awg.set_offset(int(self._phys_chan.label[-2]), 0.0)
                self.awg.set_offset(int(self._phys_chan.label[-1]), 0.0)
        except Exception as ex:
            raise Exception(
                "Could not reset mixer calibration. Is the AWG connected?"
            ) from ex

    def _setup_awg_ssb(self):
        """Set up AWGS for single sideband modulation, playing back a continuous tone.
        """
        #set up single sideband modulation IQ playback on the AWG
        self.awg.stop()
        if isinstance(self.awg, bbn.APS2):
            self.awg.load_waveform(1, 0.5 * np.ones(1200, dtype=np.float))
            self.awg.load_waveform(2, np.zeros(1200, dtype=np.float))
            self.awg.waveform_frequency = -self.SSB_FREQ
            self.awg.run_mode = "CW_WAVEFORM"
        else:
            iwf = 0.5 * np.cos(2 * np.pi * self.SSB_FREQ * np.arange(
                1200, dtype=np.float64) * 1e-6 / self.awg.sampling_rate)
            qwf = -0.5 * np.sin(2 * np.pi * self.SSB_FREQ * np.arange(
                1200, dtype=np.float64) * 1e-6 / self.awg.sampling_rate)
            self.awg.load_waveform(int(self._phys_chan.label[-2]), iwf)
            self.awg.load_waveform(int(self._phys_chan.label[-1]), qwf)
            self.awg.run_mode = "RUN_WAVEFORM"
            self.awg.repeat_mode = "CONTINUOUS"
            self.awg.trigger_source = "internal"
        #start playback
        self.awg.run()
        logger.debug(
            "Playing SSB CW IQ modulation on {} at frequency: {} MHz".format(
                self.awg, self.SSB_FREQ / 1e6))

    def shutdown_instruments(self):
        #reset the APS2, just in case.
        self.LO.output = False
        self.source.output = False
        self.awg.stop()

    def init_streams(self):
        pass

    def run(self):
        time.sleep(0.05)
        self.amplitude.push(self.sa.peak_amplitude())
Exemple #22
0
class ElementwiseFilter(Filter):
    """Perform elementwise operations on multiple streams:
    e.g. multiply or add all streams element-by-element"""

    sink        = InputConnector()
    source      = OutputConnector()
    filter_name = "GenericElementwise" # To identify subclasses when naming data streams

    def __init__(self, filter_name=None, **kwargs):
        super(ElementwiseFilter, self).__init__(filter_name=filter_name, **kwargs)
        self.sink.max_input_streams = 100
        self.quince_parameters = []

    def operation(self):
        """Must be overridden with the desired mathematical function"""
        pass

    def unit(self, base_unit):
        """Must be overridden accoriding the desired mathematical function
        e.g. return base_unit + "^{}".format(len(self.sink.input_streams))"""
        pass

    def update_descriptors(self):
        """Must be overridden depending on the desired mathematical function"""
        logger.debug('Updating %s "%s" descriptors based on input descriptor: %s.', self.filter_name, self.filter_name, self.sink.descriptor)

        # Sometimes not all of the input descriptors have been updated... pause here until they are:
        if None in [ss.descriptor for ss in self.sink.input_streams]:
            logger.debug('%s "%s" waiting for all input streams to be updated.', self.filter_name, self.name)
            return

        self.descriptor = self.sink.descriptor.copy()
        if self.filter_name:
            self.descriptor.data_name = self.filter_name
        if self.descriptor.unit:
            self.descriptor.unit = self.descriptor.unit + "^{}".format(len(self.sink.input_streams))
        self.source.descriptor = self.descriptor
        self.source.update_descriptors()

    def main(self):
        self.done.clear()
        streams = self.sink.input_streams

        for s in streams[1:]:
            if not np.all(s.descriptor.expected_tuples() == streams[0].descriptor.expected_tuples()):
                raise ValueError("Multiple streams connected to correlator must have matching descriptors.")

        # Buffers for stream data
        stream_data = {s: np.zeros(0, dtype=self.sink.descriptor.dtype) for s in streams}

        # Store whether streams are done
        streams_done      = {s: False for s in streams}
        points_per_stream = {s: 0 for s in streams}

        while not self.exit.is_set():

            # Try to pull all messages in the queue. queue.empty() is not reliable, so we
            # ask for forgiveness rather than permission.
            msgs_by_stream = {s: [] for s in streams}

            for stream in streams[::-1]:
                while not self.exit.is_set():
                    try:
                        msgs_by_stream[stream].append(stream.queue.get(False))
                    except queue.Empty as e:
                        time.sleep(0.002)
                        break

            # Process many messages for each stream
            for stream, messages in msgs_by_stream.items():
                for message in messages:
                    message_type = message['type']
                    # message_data = message['data']
                    # message_data = message_data if hasattr(message_data, 'size') else np.array([message_data])
                    if message_type == 'event':
                        if message['event_type'] == 'done':
                            streams_done[stream] = True
                        elif message['event_type'] == 'refine':
                            logger.warning("ElementwiseFilter doesn't handle refinement yet!")
                    elif message_type == 'data':
                        # Add any old data...
                        message_data = stream.pop()
                        if message_data is not None:
                            points_per_stream[stream] += len(message_data)
                            stream_data[stream] = np.concatenate((stream_data[stream], message_data))
                            # logger.info(f"{stream.name}: {message_data} now {stream_data[stream]}")
            # Now process the data with the elementwise operation
            smallest_length = min([d.size for d in stream_data.values()])
            new_data = [d[:smallest_length] for d in stream_data.values()]
            result = new_data[0]
            for nd in new_data[1:]:
                result = self.operation()(result, nd)
            if result.size > 0:
                self.source.push(result)

            # Add data to carry_data if necessary
            for stream in stream_data.keys():
                if stream_data[stream].size > smallest_length:
                    stream_data[stream] = stream_data[stream][smallest_length:]
                else:
                    stream_data[stream] = np.zeros(0, dtype=self.sink.descriptor.dtype)

            # If the amount of data processed is equal to the num points in the stream, we are done
            if np.all([streams_done[stream] for stream in streams]):
                self.push_to_all({"type": "event", "event_type": "done", "data": None})
                self.done.set()
                break
Exemple #23
0
    def __init__(self):
        super(Experiment, self).__init__()
        # Experiment name
        self.name = None

        # Sweep control
        self.sweeper = Sweeper()

        # This holds the experiment graph
        self.graph = None

        # This holds a reference to a matplotlib server instance
        # for plotting, if there is one.
        self.matplot_server_thread = None
        # If this is True, don't close the plot server thread so that
        # we might push additional plots after run_sweeps is complete.
        self.leave_plot_server_open = False

        # Also keep references to all of the plot filters
        self.plotters = []  # Standard pipeline plotters using streams
        self.extra_plotters = [
        ]  # Plotters using streams, but not the pipeline
        self.manual_plotters = [
        ]  # Plotters using neither streams nor the pipeline
        self.manual_plotter_callbacks = [
        ]  # These are called at the end of run
        self._extra_plots_to_streams = {}

        # Furthermore, keep references to all of the file writers.
        # If multiple writers request acces to the same filename, they
        # should share the same file object and write in separate
        # hdf5 groups.
        self.writers = []
        self.buffers = []

        # ExpProgressBar object to display progress bars
        self.progressbar = None

        # indicates whether the instruments are already connected
        self.instrs_connected = False

        # Things we can't metaclass
        self.output_connectors = {}
        for oc in self._output_connectors.keys():
            a = OutputConnector(name=oc,
                                data_name=oc,
                                unit=self._output_connectors[oc].data_unit,
                                parent=self)
            a.parent = self

            self.output_connectors[oc] = a
            setattr(self, oc, a)

        # Some instruments don't clean up well after themselves, reconstruct them on a
        # per instance basis
        for n in self._instruments.keys():
            new_cls = type(self._instruments[n])
            new_inst = new_cls(
                resource_name=self._instruments[n].resource_name,
                name=self._instruments[n].name)
            setattr(self, n, new_inst)
            self._instruments[n] = new_inst

        # Create the asyncio measurement loop
        self.loop = asyncio.get_event_loop()

        # Based on the logging level, infer whether we want asyncio debug
        do_debug = logger.getEffectiveLevel() <= logging.DEBUG
        self.loop.set_debug(do_debug)

        # Run the stream init
        self.init_streams()
Exemple #24
0
class ResetSearchExperiment(Experiment):

    voltage = OutputConnector()
    field = FloatParameter(default=0, unit="T")
    duration = FloatParameter(default=5e-9, unit="s")

    repeats = 200
    amplitudes = np.arange(-0.01, 0.011, 0.01)  # Reset amplitudes
    samps_per_trig = 5
    settle_delay = 50e-6
    measure_current = 3e-6

    # Instruments
    arb = KeysightM8190A("192.168.5.108")
    mag = AMI430("192.168.5.109")
    keith = Keithley2400("GPIB0::25::INSTR")
    lock = SR865("USB0::0xB506::0x2000::002638::INSTR")

    polarity = -1

    def init_streams(self):
        # Baked in data axes
        descrip = DataStreamDescriptor()
        descrip.data_name = 'voltage'
        descrip.add_axis(DataAxis("sample", range(self.samps_per_trig)))
        descrip.add_axis(DataAxis("amplitude", self.amplitudes))
        descrip.add_axis(DataAxis("repeat", range(self.repeats)))
        self.voltage.set_descriptor(descrip)

    def init_instruments(self):
        # Set up Keithley
        self.keith.triad()
        self.keith.conf_meas_res(res_range=1e6)
        self.keith.conf_src_curr(comp_voltage=0.6, curr_range=1.0e-5)
        self.keith.current = self.measure_current
        self.mag.ramp()

        self.arb.set_output(True, channel=1)
        self.arb.set_output(False, channel=2)
        self.arb.sample_freq = 12.0e9
        self.arb.waveform_output_mode = "WSPEED"
        self.setup_AWG()

        self.analog_input = Task()
        self.read = int32()
        self.buf_points = len(
            self.amplitudes) * self.samps_per_trig * self.repeats
        self.analog_input.CreateAIVoltageChan("Dev1/ai1", "", DAQmx_Val_Diff,
                                              0.0, 0.5, DAQmx_Val_Volts, None)
        self.analog_input.CfgSampClkTiming("", 1e6, DAQmx_Val_Rising,
                                           DAQmx_Val_FiniteSamps,
                                           self.samps_per_trig)
        self.analog_input.CfgInputBuffer(self.buf_points)
        self.analog_input.CfgDigEdgeStartTrig("/Dev1/PFI0", DAQmx_Val_Rising)
        self.analog_input.SetStartTrigRetriggerable(1)
        self.analog_input.StartTask()

        # Assign methods
        self.field.assign_method(self.mag.set_field)
        self.duration.assign_method(self.setup_AWG)

    def setup_AWG(self, *args):
        self.arb.abort()
        self.arb.delete_all_waveforms()
        self.arb.reset_sequence_table()

        self.arb.set_output_route("DC", channel=1)
        self.arb.voltage_amplitude = 1.0

        self.arb.set_marker_level_low(0.0, channel=1, marker_type="sync")
        self.arb.set_marker_level_high(1.5, channel=1, marker_type="sync")

        self.arb.continuous_mode = False
        self.arb.gate_mode = False

        def arb_pulse(amplitude, sample_rate=12e9):
            pulse_points = int(self.duration.value * sample_rate)

            if pulse_points < 320:
                wf = np.zeros(320)
            else:
                wf = np.zeros(64 * int(np.ceil(pulse_points / 64.0)))
            wf[:pulse_points] = amplitude
            return wf

        segment_ids = []
        arb_voltage = arb_voltage_lookup()
        for amp in self.amplitudes:
            waveform = arb_pulse(np.sign(amp) * arb_voltage(abs(amp)))
            wf_data = KeysightM8190A.create_binary_wf_data(waveform)
            segment_id = self.arb.define_waveform(len(wf_data))
            segment_ids.append(segment_id)
            self.arb.upload_waveform(wf_data, segment_id)

        # NIDAQ trigger waveform
        nidaq_trig_wf = KeysightM8190A.create_binary_wf_data(np.zeros(3200),
                                                             sync_mkr=1)
        nidaq_trig_segment_id = self.arb.define_waveform(len(nidaq_trig_wf))
        self.arb.upload_waveform(nidaq_trig_wf, nidaq_trig_segment_id)

        settle_pts = int(640 * np.ceil(self.settle_delay * 12e9 / 640))
        start_idxs = [0]

        scenario = Scenario()
        seq = Sequence(sequence_loop_ct=int(self.repeats))
        for si in segment_ids:
            # seq = Sequence(sequence_loop_ct=int(1))
            seq.add_waveform(si)  # Apply switching pulse to the sample
            seq.add_idle(settle_pts, 0.0)  # Wait for the measurement to settle
            seq.add_waveform(
                nidaq_trig_segment_id)  # Trigger the NIDAQ measurement
            seq.add_idle(1 << 14, 0.0)  # bonus non-contiguous memory delay
        scenario.sequences.append(seq)

        self.arb.upload_scenario(scenario, start_idx=start_idxs[-1])
        start_idxs.append(start_idxs[-1] + len(scenario.scpi_strings()))
        # The last entry is eroneous
        start_idxs = start_idxs[:-1]

        self.arb.sequence_mode = "SCENARIO"
        self.arb.scenario_advance_mode = "REPEAT"
        self.arb.stop()
        self.arb.scenario_start_index = 0
        self.arb.run()

    async def run(self):
        # Establish buffers
        buffers = np.empty(self.buf_points)
        self.arb.advance()
        self.arb.trigger()
        self.analog_input.ReadAnalogF64(self.buf_points, -1,
                                        DAQmx_Val_GroupByChannel,
                                        buffers, self.buf_points,
                                        byref(self.read), None)
        logger.debug("Read a buffer of {} points".format(buffers.size))
        await self.voltage.push(buffers)
        # Seemingly we need to give the filters some time to catch up here...
        await asyncio.sleep(0.02)
        logger.debug("Stream has filled {} of {} points".format(
            self.voltage.points_taken, self.voltage.num_points()))

    def shutdown_instruments(self):
        try:
            self.analog_input.StopTask()
        except Exception as e:
            logger.warning("Warning failed to stop task. This is typical.")
            pass
        self.arb.stop()
        self.keith.current = 0.0
        self.mag.disconnect()
Exemple #25
0
class TestExperiment(Experiment):
    """Here the run loop merely spews data until it fills up the stream. """

    # Create instances of instruments
    fake_instr_1 = TestInstrument("FAKE::RESOURE::NAME")

    # Parameters
    field = FloatParameter(unit="Oe")
    freq = FloatParameter(unit="Hz")

    # DataStreams
    voltage = OutputConnector(unit="V")

    # Constants
    num_samples = 1024
    delays = 1e-9 * np.arange(100, 10001, 100)
    round_robins = 2
    sampling_period = 2e-9
    T2 = 5e-6

    def init_instruments(self):
        pass

    def init_streams(self):
        descrip = DataStreamDescriptor()
        descrip.add_axis(
            DataAxis("samples", 2e-9 * np.arange(self.num_samples)))
        descrip.add_axis(DataAxis("delay", self.delays))
        descrip.add_axis(DataAxis("round_robins",
                                  np.arange(self.round_robins)))
        self.voltage.set_descriptor(descrip)

    def __repr__(self):
        return "<SweptTestExperiment>"

    async def run(self):
        pulse_start = 250
        pulse_width = 700

        #fake the response for a Ramsey frequency experiment with a gaussian excitation profile
        idx = 0
        for _ in range(self.round_robins):
            for delay in self.delays:
                if idx == 0:
                    records = np.zeros((5, self.num_samples), dtype=np.float32)
                await asyncio.sleep(0.01)
                records[idx,pulse_start:pulse_start+pulse_width] = np.exp(-0.5*(self.freq.value/2e6)**2) * \
                                                              np.exp(-delay/self.T2) * \
                                                              np.sin(2*np.pi * 10e6 * self.sampling_period*np.arange(pulse_width) \
                                                              + np.cos(2*np.pi * self.freq.value * delay))

                #add noise
                records[idx] += 0.1 * np.random.randn(self.num_samples)

                if idx == 4:
                    await self.voltage.push(records.flatten())
                    idx = 0
                else:
                    idx += 1

        logger.debug("Stream has filled {} of {} points".format(
            self.voltage.points_taken, self.voltage.num_points()))
class nTronSwitchingExperiment(Experiment):

    # Parameters
    channel_bias = FloatParameter(default=0.05, unit="V")  # On the 33220A
    gate_bias = FloatParameter(default=0.0, unit="V")  # On the M8190A

    # Constants (set with attribute access if you want to change these!)
    attempts = 1 << 8
    samples = 384  #1024 + 16*20
    measure_amplitude = 0.1
    measure_duration = 250.0e-9
    measure_frequency = 100e6

    # arb
    sample_rate = 12e9
    repeat_time = 4 * 2.4e-6  # Picked very carefully for 100ns alignment

    # Things coming back
    voltage = OutputConnector()

    # Instrument resources
    arb = KeysightM8190A("192.168.5.108")
    awg = Agilent33220A("192.168.5.198")
    alz = AlazarATS9870("1")

    def __init__(self, gate_amps, gate_durs):
        self.gate_amps = gate_amps
        self.gate_durs = gate_durs
        super(nTronSwitchingExperiment, self).__init__()

    def init_instruments(self):

        self.awg.function = 'Pulse'
        self.awg.frequency = 0.5e6
        self.awg.pulse_width = 1e-6
        self.awg.low_voltage = 0.0
        self.awg.high_voltage = self.channel_bias.value
        self.awg.burst_state = True
        self.awg.burst_cycles = 1
        self.awg.trigger_source = "External"
        self.awg.output = True

        self.ch = AlazarChannel({'channel': 1})
        self.alz.add_channel(self.ch)
        alz_cfg = {
            'acquire_mode': 'digitizer',
            'bandwidth': 'Full',
            'clock_type': 'ref',
            'delay': 850e-9,
            'enabled': True,
            'label': "Alazar",
            'record_length': self.samples,
            'nbr_segments': len(self.gate_amps) * len(self.gate_durs),
            'nbr_waveforms': 1,
            'nbr_round_robins': self.attempts,
            'sampling_rate': 1e9,
            'trigger_coupling': 'DC',
            'trigger_level': 125,
            'trigger_slope': 'rising',
            'trigger_source': 'Ext',
            'vertical_coupling': 'AC',
            'vertical_offset': 0.0,
            'vertical_scale': 0.1,
        }
        self.alz.set_all(alz_cfg)
        self.loop.add_reader(self.alz.get_socket(self.ch),
                             self.alz.receive_data, self.ch, self.voltage)

        self.arb.set_output(True, channel=2)
        self.arb.set_output(True, channel=1)
        self.arb.sample_freq = self.sample_rate
        self.arb.set_waveform_output_mode("WSPEED", channel=1)
        self.arb.set_waveform_output_mode("WSPEED", channel=2)
        self.arb.set_output_route("DC", channel=1)
        self.arb.set_output_route("DC", channel=2)
        self.arb.set_output_complement(False, channel=1)
        self.arb.set_output_complement(False, channel=2)
        self.arb.set_voltage_amplitude(1.0, channel=1)
        self.arb.set_voltage_amplitude(1.0, channel=2)
        self.arb.continuous_mode = False
        self.arb.gate_mode = False
        self.arb.set_marker_level_low(0.0, channel=1, marker_type="sync")
        self.arb.set_marker_level_high(1.5, channel=1, marker_type="sync")
        self.arb.set_marker_level_low(0.0, channel=2, marker_type="sync")
        self.arb.set_marker_level_high(1.5, channel=2, marker_type="sync")

        self.setup_arb(
        )  #self.gate_bias.value, self.gate_pulse_amplitude.value, self.gate_pulse_duration.value) # Sequencing goes here

    def setup_arb(
            self):  #, gate_bias, gate_pulse_amplitude, gate_pulse_duration):
        self.arb.abort()
        self.arb.delete_all_waveforms()
        self.arb.reset_sequence_table()

        seg_ids_ch1 = []
        seg_ids_ch2 = []

        # For the measurements pulses along the channel
        wf = measure_pulse(amplitude=self.measure_amplitude,
                           duration=self.measure_duration,
                           frequency=self.measure_frequency)
        wf_data = KeysightM8190A.create_binary_wf_data(wf, sync_mkr=1)
        seg_id = self.arb.define_waveform(len(wf_data), channel=1)
        self.arb.upload_waveform(wf_data, seg_id, channel=1)
        seg_ids_ch1.append(seg_id)

        # Build in a delay between sequences
        settle_pts = 640 * np.int(
            np.ceil(self.repeat_time * self.sample_rate / 640))
        # settle_pts2 = 640*np.ceil(8*2.4e-9 * self.sample_rate / 640)

        scenario = Scenario()
        seq = Sequence(sequence_loop_ct=self.attempts * len(self.gate_amps) *
                       len(self.gate_durs))
        for si in seg_ids_ch1:
            seq.add_waveform(si)
            seq.add_idle(settle_pts, 0.0)
        scenario.sequences.append(seq)
        self.arb.upload_scenario(scenario, start_idx=0, channel=1)

        for amp in self.gate_amps:
            for dur in self.gate_durs:
                # For the switching pulses along the gate
                wf = switching_pulse(
                    amplitude=amp,
                    duration=dur)  #self.gate_pulse_duration.value)
                wf_data = KeysightM8190A.create_binary_wf_data(wf)
                seg_id = self.arb.define_waveform(len(wf_data), channel=2)
                self.arb.upload_waveform(wf_data, seg_id, channel=2)
                seg_ids_ch2.append(seg_id)

        scenario = Scenario()
        seq = Sequence(sequence_loop_ct=self.attempts)
        for si in seg_ids_ch2:
            seq.add_waveform(si)
            seq.add_idle(settle_pts, 0.0)
        scenario.sequences.append(seq)
        self.arb.upload_scenario(scenario, start_idx=0, channel=2)

        self.arb.set_sequence_mode("SCENARIO", channel=1)
        self.arb.set_scenario_advance_mode("SINGLE", channel=1)
        self.arb.set_scenario_start_index(0, channel=1)
        self.arb.set_sequence_mode("SCENARIO", channel=2)
        self.arb.set_scenario_advance_mode("SINGLE", channel=2)
        self.arb.set_scenario_start_index(0, channel=2)
        self.arb.initiate(channel=1)
        self.arb.initiate(channel=2)
        self.arb.advance()

    def init_streams(self):
        # Baked in data axes
        descrip = DataStreamDescriptor()
        descrip.add_axis(DataAxis("time", 1e-9 * np.arange(self.samples)))
        if len(self.gate_durs) > 1:
            descrip.add_axis(DataAxis("gate_pulse_duration", self.gate_durs))
        descrip.add_axis(DataAxis("gate_pulse_amplitude", self.gate_amps))
        descrip.add_axis(DataAxis("attempt", range(self.attempts)))

        self.voltage.set_descriptor(descrip)

    async def run(self):
        # self.arb.stop()
        self.arb.set_scenario_start_index(0, channel=1)
        self.arb.set_scenario_start_index(0, channel=2)
        self.arb.advance()
        await asyncio.sleep(0.3)
        self.alz.acquire()
        await asyncio.sleep(0.3)
        self.arb.trigger()
        await self.alz.wait_for_acquisition(10.0)
        await asyncio.sleep(0.8)
        self.alz.stop()
        # Seemingly we need to give the filters some time to catch up here...
        await asyncio.sleep(0.02)
        logger.info("Stream has filled {} of {} points".format(
            self.voltage.points_taken, self.voltage.num_points()))

    def shutdown_instruments(self):
        self.awg.output = False

        self.arb.stop()
        self.loop.remove_reader(self.alz.get_socket(self.ch))

        for name, instr in self._instruments.items():
            instr.disconnect()
Exemple #27
0
class Channelizer(Filter):
    """Digital demodulation and filtering to select a particular frequency multiplexed channel. If
    an axis name is supplied to `follow_axis` then the filter will demodulate at the freqency
    `axis_frequency_value - follow_freq_offset` otherwise it will demodulate at `frequency`. Note that
    the filter coefficients are still calculated with respect to the `frequency` paramter, so it should
    be chosen accordingly when `follow_axis` is defined."""

    sink = InputConnector()
    source = OutputConnector()
    follow_axis = Parameter(default="")  # Name of the axis to follow
    follow_freq_offset = FloatParameter(default=0.0)  # Offset
    decimation_factor = IntParameter(value_range=(1, 100), default=4, snap=1)
    frequency = FloatParameter(value_range=(-10e9, 10e9),
                               increment=1.0e6,
                               default=10e6)
    bandwidth = FloatParameter(value_range=(0.00, 100e6),
                               increment=0.1e6,
                               default=5e6)

    def __init__(self,
                 frequency=None,
                 bandwidth=None,
                 decimation_factor=None,
                 follow_axis=None,
                 follow_freq_offset=None,
                 **kwargs):
        super(Channelizer, self).__init__(**kwargs)
        if frequency:
            self.frequency.value = frequency
        if bandwidth:
            self.bandwidth.value = bandwidth
        if decimation_factor:
            self.decimation_factor.value = decimation_factor
        if follow_axis:
            self.follow_axis.value = follow_axis
        if follow_freq_offset:
            self.follow_freq_offset.value = follow_freq_offset
        self.quince_parameters = [
            self.decimation_factor, self.frequency, self.bandwidth
        ]
        self._phase = 0.0

    def final_init(self):
        self.init_filters(self.frequency.value, self.bandwidth.value)

        if self.follow_axis.value is not "":
            desc = self.sink.descriptor
            axis_num = desc.axis_num(self.follow_axis.value)
            self.pts_before_freq_update = desc.num_points_through_axis(
                axis_num + 1)
            self.pts_before_freq_reset = desc.num_points_through_axis(axis_num)
            self.demod_freqs = desc.axes[
                axis_num].points - self.follow_freq_offset.value
            self.current_freq = 0
            self.update_references(self.current_freq)
        self.idx = 0

        # For storing carryover if getting uneven buffers
        self.carry = np.zeros(0, dtype=self.output_descriptor.dtype)

    def update_references(self, frequency):
        # store decimated reference for mix down
        # phase_drift = 2j*np.pi*0.5e-6 * (abs(frequency) - 100e6)
        ref = np.exp(2j * np.pi * -frequency * self.time_pts[::self.d1] +
                     1j * self._phase,
                     dtype=np.complex64)

        self.reference = ref
        self.reference_r = np.real(ref)
        self.reference_i = np.imag(ref)

    def init_filters(self, frequency, bandwidth):
        # convert bandwidth normalized to Nyquist interval
        n_bandwidth = bandwidth * self.time_step * 2
        n_frequency = abs(frequency) * self.time_step * 2

        # arbitrarily decide on three stage filter pipeline
        # 1. first stage decimating filter on real data
        # 2. second stage decimating filter on mixed product to boost n_bandwidth
        # 3. final channel selecting filter at n_bandwidth/2

        # anecdotally don't decimate more than a factor of eight for stability

        self.decim_factors = [1] * 3
        self.filters = [None] * 3

        # first stage decimating filter
        # maximize first stage decimation:
        #     * minimize subsequent stages time taken
        #     * filter and decimate while signal is still real
        #     * first stage decimation cannot be too large or then 2omega signal from mixing will alias
        self.d1 = 1
        while (self.d1 < 8) and (2 * n_frequency <= 0.8 / self.d1) and (
                self.d1 < self.decimation_factor.value):
            self.d1 *= 2
            n_bandwidth *= 2
            n_frequency *= 2

        if self.d1 > 1:
            # create an anti-aliasing filter
            # pass-band to 0.8 * decimation factor; anecdotally single precision needs order <= 4 for stability
            b, a = scipy.signal.cheby1(4, 3, 0.8 / self.d1)
            b = np.float32(b)
            a = np.float32(a)
            self.decim_factors[0] = self.d1
            self.filters[0] = (b, a)

        # store decimated reference for mix down
        self.update_references(frequency)

        # second stage filter to bring n_bandwidth/2 up
        # decimation cannot be too large or will impinge on channel bandwidth (keep n_bandwidth/2 <= 0.8)
        self.d2 = 1
        while (self.d2 < 8) and (
            (self.d1 * self.d2) <
                self.decimation_factor.value) and (n_bandwidth / 2 <= 0.8):
            self.d2 *= 2
            n_bandwidth *= 2
            n_frequency *= 2

        if self.d2 > 1:
            # create an anti-aliasing filter
            # pass-band to 0.8 * decimation factor; anecdotally single precision needs order <= 4 for stability
            b, a = scipy.signal.cheby1(4, 3, 0.8 / self.d2)
            b = np.float32(b)
            a = np.float32(a)
            self.decim_factors[1] = self.d2
            self.filters[1] = (b, a)

        # final channel selection filter
        if n_bandwidth < 0.1:
            raise ValueError(
                "Insufficient decimation to achieve stable filter: {}.".format(
                    n_bandwidth))

        b, a = scipy.signal.cheby1(4, 3, n_bandwidth / 2)
        b = np.float32(b)
        a = np.float32(a)
        self.decim_factors[2] = self.decimation_factor.value // (self.d1 *
                                                                 self.d2)
        self.filters[2] = (b, a)

    def update_descriptors(self):
        logger.debug(
            'Updating Channelizer "%s" descriptors based on input descriptor: %s.',
            self.name, self.sink.descriptor)

        # extract record time sampling
        self.time_pts = self.sink.descriptor.axes[-1].points
        self.record_length = len(self.time_pts)
        self.time_step = self.time_pts[1] - self.time_pts[0]
        logger.debug("Channelizer time_step = {}".format(self.time_step))

        # We will be decimating along a time axis, which is always
        # going to be the last axis given the way we usually take data.
        # TODO: perform this function along a named axis rather than a numbered axis
        # in case something about this changes.

        # update output descriptors
        decimated_descriptor = DataStreamDescriptor()
        decimated_descriptor.axes = self.sink.descriptor.axes[:]
        decimated_descriptor.axes[-1] = deepcopy(self.sink.descriptor.axes[-1])
        decimated_descriptor.axes[-1].points = self.sink.descriptor.axes[
            -1].points[self.decimation_factor.value -
                       1::self.decimation_factor.value]
        decimated_descriptor.axes[
            -1].original_points = decimated_descriptor.axes[-1].points
        decimated_descriptor._exp_src = self.sink.descriptor._exp_src
        decimated_descriptor.dtype = np.complex64
        self.output_descriptor = decimated_descriptor
        for os in self.source.output_streams:
            os.set_descriptor(decimated_descriptor)
            if os.end_connector is not None:
                os.end_connector.update_descriptors()

    async def process_data(self, data):

        # Append any data carried from the last run
        if self.carry.size > 0:
            data = np.concatenate((self.carry, data))

        # This is the largest number of records we can handle
        num_records = data.size // self.record_length

        # This is the carryover that we'll store until next round.
        # If nothing is left then reset the carryover.
        remaining_points = data.size % self.record_length
        if remaining_points > 0:
            if num_records > 0:
                self.carry = data[-remaining_points:]
                data = data[:-remaining_points]
            else:
                self.carry = data
        else:
            self.carry = np.zeros(0, dtype=self.output_descriptor.dtype)

        if num_records > 0:
            # The records are processed in parallel after being reshaped here
            reshaped_data = np.reshape(data, (num_records, self.record_length),
                                       order="C")

            # Update demodulation frequency if necessary
            if self.follow_axis.value is not "":
                freq = self.demod_freqs[(self.idx % self.pts_before_freq_reset)
                                        // self.pts_before_freq_update]
                if freq != self.current_freq:
                    self.update_references(freq)
                    self.current_freq = freq

            self.idx += data.size

            # first stage decimating filter
            if self.filters[0] is None:
                filtered = reshaped_data
            else:
                stacked_coeffs = np.concatenate(self.filters[0])
                # filter
                if np.iscomplexobj(reshaped_data):
                    # TODO: compile complex versions of the IPP functions
                    filtered_r = np.empty_like(reshaped_data, dtype=np.float32)
                    filtered_i = np.empty_like(reshaped_data, dtype=np.float32)
                    libipp.filter_records_iir(
                        stacked_coeffs, self.filters[0][0].size - 1,
                        np.ascontiguousarray(
                            reshaped_data.real.astype(np.float32)),
                        self.record_length, num_records, filtered_r)
                    libipp.filter_records_iir(
                        stacked_coeffs, self.filters[0][0].size - 1,
                        np.ascontiguousarray(
                            reshaped_data.imag.astype(np.float32)),
                        self.record_length, num_records, filtered_i)
                    filtered = filtered_r + 1j * filtered_i
                    # decimate
                    if self.decim_factors[0] > 1:
                        filtered = filtered[:, ::self.decim_factors[0]]
                else:
                    filtered = np.empty_like(reshaped_data)
                    libipp.filter_records_iir(stacked_coeffs,
                                              self.filters[0][0].size - 1,
                                              reshaped_data,
                                              self.record_length, num_records,
                                              filtered)

                    # decimate
                    if self.decim_factors[0] > 1:
                        filtered = filtered[:, ::self.decim_factors[0]]

            # mix with reference
            # keep real and imaginary separate for filtering below
            if np.iscomplexobj(reshaped_data):
                filtered *= self.reference
                filtered_r = filtered.real
                filtered_i = filtered.imag
            else:
                filtered_r = self.reference_r * filtered
                filtered_i = self.reference_i * filtered

            # channel selection filters
            for ct in [1, 2]:
                if self.filters[ct] == None:
                    continue

                coeffs = self.filters[ct]
                stacked_coeffs = np.concatenate(self.filters[ct])
                out_r = np.empty_like(filtered_r).astype(np.float32)
                out_i = np.empty_like(filtered_i).astype(np.float32)
                libipp.filter_records_iir(
                    stacked_coeffs, self.filters[ct][0].size - 1,
                    np.ascontiguousarray(filtered_r.astype(np.float32)),
                    filtered_r.shape[-1], num_records, out_r)
                libipp.filter_records_iir(
                    stacked_coeffs, self.filters[ct][0].size - 1,
                    np.ascontiguousarray(filtered_i.astype(np.float32)),
                    filtered_i.shape[-1], num_records, out_i)

                # decimate
                if self.decim_factors[ct] > 1:
                    filtered_r = np.copy(out_r[:, ::self.decim_factors[ct]],
                                         order="C")
                    filtered_i = np.copy(out_i[:, ::self.decim_factors[ct]],
                                         order="C")
                else:
                    filtered_r = out_r
                    filtered_i = out_i

            filtered = filtered_r + 1j * filtered_i

            # recover gain from selecting single sideband
            filtered *= 2

            # push to ouptut connectors
            for os in self.source.output_streams:
                await os.push(filtered)
Exemple #28
0
class KernelIntegrator(Filter):

    sink = InputConnector()
    source = OutputConnector()
    kernel = Parameter()
    bias = FloatParameter(default=0.0)
    simple_kernel = BoolParameter(default=True)
    box_car_start = FloatParameter(default=0.0)
    box_car_stop = FloatParameter(default=100e-9)
    demod_frequency = FloatParameter(default=0.0)
    """Integrate with a given kernel. Kernel will be padded/truncated to match record length"""
    def __init__(self, **kwargs):
        super(KernelIntegrator, self).__init__(**kwargs)
        self.pre_int_op = None
        self.post_int_op = None
        for k, v in kwargs.items():
            if hasattr(self, k) and isinstance(getattr(self, k), Parameter):
                getattr(self, k).value = v
        if "pre_integration_operation" in kwargs:
            self.pre_int_op = kwargs["pre_integration_operation"]
        if "post_integration_operation" in kwargs:
            self.post_int_op = kwargs["post_integration_operation"]
        # self.quince_parameters = [self.simple_kernel, self.demod_frequency, self.box_car_start, self.box_car_stop]

    def update_descriptors(self):
        if not self.simple_kernel and self.kernel.value is None:
            raise ValueError("Integrator was passed kernel None")

        logger.debug(
            'Updating KernelIntegrator "%s" descriptors based on input descriptor: %s.',
            self.filter_name, self.sink.descriptor)

        record_length = self.sink.descriptor.axes[-1].num_points()

        if self.kernel.value:
            if os.path.exists(
                    os.path.join(config.KernelDir,
                                 self.kernel.value + '.txt')):
                kernel = np.loadtxt(
                    os.path.join(config.KernelDir, self.kernel.value + '.txt'),
                    dtype=complex,
                    converters={
                        0: lambda s: complex(s.decode().replace('+-', '-'))
                    })
            else:
                try:
                    kernel = eval(self.kernel.value.encode('unicode_escape'))
                except:
                    raise ValueError(
                        'Kernel invalid. Provide a file name or an expression to evaluate'
                    )
            if self.simple_kernel.value:
                logger.warning(
                    "Using specified kernel. To use a box car filter instead, clear kernel.value"
                )

        elif self.simple_kernel.value:
            time_pts = self.sink.descriptor.axes[-1].points
            time_step = time_pts[1] - time_pts[0]
            kernel = np.zeros(record_length, dtype=np.complex128)
            sample_start = int(self.box_car_start.value / time_step)
            sample_stop = int(self.box_car_stop.value / time_step) + 1
            kernel[sample_start:sample_stop] = 1.0
            # add modulation
            kernel *= np.exp(2j * np.pi * self.demod_frequency.value *
                             time_pts)
        else:
            raise ValueError(
                'Kernel invalid. Either provide a file name or an expression to evaluate or set simple_kernel.value to true'
            )
        # pad or truncate the kernel to match the record length
        if kernel.size < record_length:
            self.aligned_kernel = np.append(
                kernel,
                np.zeros(record_length - kernel.size, dtype=np.complex128))
        else:
            self.aligned_kernel = np.resize(kernel, record_length)

        # Integrator reduces and removes axis on output stream
        # update output descriptors
        output_descriptor = DataStreamDescriptor()
        # TODO: handle reduction to single point
        output_descriptor.axes = self.sink.descriptor.axes[:-1]
        output_descriptor._exp_src = self.sink.descriptor._exp_src
        output_descriptor.dtype = np.complex128
        for ost in self.source.output_streams:
            ost.set_descriptor(output_descriptor)
            ost.end_connector.update_descriptors()

    def process_data(self, data):

        # TODO: handle variable partial records
        if self.pre_int_op:
            data = self.pre_int_op(data)
        filtered = np.inner(np.reshape(data, (-1, len(self.aligned_kernel))),
                            self.aligned_kernel)
        if self.post_int_op:
            filtered = self.post_int_op(filtered)
        # push to ouptut connectors
        for os in self.source.output_streams:
            os.push(filtered)
class SwitchingExperiment(Experiment):

    # Parameters and outputs
    field = FloatParameter(default=0.0, unit="T")
    pulse_duration = FloatParameter(default=5.0e-9, unit="s")
    pulse_voltage = FloatParameter(default=0.1, unit="V")
    voltage = OutputConnector()

    # Constants (set with attribute access if you want to change these!)
    attempts = 1 << 9
    settle_delay = 100e-6
    measure_current = 3.0e-6
    samps_per_trig = 15
    polarity = 1
    pspl_atten = 4
    min_daq_voltage = -1
    max_daq_voltage = 1
    reset_amplitude = 0.2
    reset_duration = 5.0e-9
    circuit_attenuation = 20.0
    tc = 300e-6

    # Instrument Resources
    mag = AMI430("192.168.5.109")
    lock = SR865("USB0::0xB506::0x2000::002638::INSTR")
    pspl = Picosecond10070A("GPIB0::24::INSTR")
    atten = Attenuator("calibration/RFSA2113SB_HPD_20160901.csv", lock.set_ao2,
                       lock.set_ao3)
    arb = KeysightM8190A("192.168.5.108")

    # keith = Keithley2400("GPIB0::25::INSTR")

    def init_streams(self):
        descrip = DataStreamDescriptor()
        descrip.data_name = 'voltage'
        descrip.add_axis(DataAxis("sample", range(self.samps_per_trig)))
        descrip.add_axis(DataAxis("state", range(2)))
        descrip.add_axis(DataAxis("attempt", range(self.attempts)))
        self.voltage.set_descriptor(descrip)

    def init_instruments(self):
        self.lock.tc = self.tc

        # Setup the Keithley
        # self.keith.triad()
        # self.keith.conf_meas_res(res_range=1e5)
        # self.keith.conf_src_curr(comp_voltage=0.5, curr_range=1.0e-5)
        # self.keith.current = self.measure_current
        self.mag.ramp()

        # Setup the AWG
        self.arb.set_output(True, channel=1)
        self.arb.set_output(False, channel=2)
        self.arb.sample_freq = 12.0e9
        self.arb.waveform_output_mode = "WSPEED"
        self.arb.abort()
        self.arb.delete_all_waveforms()
        self.arb.reset_sequence_table()
        self.arb.set_output_route("DC", channel=1)
        self.arb.voltage_amplitude = 1.0
        self.arb.set_marker_level_low(0.0, channel=1, marker_type="sync")
        self.arb.set_marker_level_high(1.5, channel=1, marker_type="sync")
        self.arb.continuous_mode = False
        self.arb.gate_mode = False

        def arb_pulse(amplitude, duration, sample_rate=12e9):
            arb_voltage = arb_voltage_lookup()
            pulse_points = int(duration * sample_rate)
            if pulse_points < 320:
                wf = np.zeros(320)
            else:
                wf = np.zeros(64 * np.ceil(pulse_points / 64.0))
            wf[:pulse_points] = np.sign(amplitude) * arb_voltage(
                abs(amplitude))
            return wf

        reset_wf = arb_pulse(
            -self.polarity * self.reset_amplitude *
            np.power(10.0, self.circuit_attenuation / 20.0),
            self.reset_duration)
        wf_data = KeysightM8190A.create_binary_wf_data(reset_wf)
        rst_segment_id = self.arb.define_waveform(len(wf_data))
        self.arb.upload_waveform(wf_data, rst_segment_id)

        # no_reset_wf = arb_pulse(0.0, 3.0/12e9)
        # wf_data     = KeysightM8190A.create_binary_wf_data(no_reset_wf)
        # no_rst_segment_id  = self.arb.define_waveform(len(wf_data))
        # self.arb.upload_waveform(wf_data, no_rst_segment_id)

        # Picosecond trigger waveform
        pspl_trig_wf = KeysightM8190A.create_binary_wf_data(np.zeros(3200),
                                                            samp_mkr=1)
        pspl_trig_segment_id = self.arb.define_waveform(len(pspl_trig_wf))
        self.arb.upload_waveform(pspl_trig_wf, pspl_trig_segment_id)

        # NIDAQ trigger waveform
        nidaq_trig_wf = KeysightM8190A.create_binary_wf_data(np.zeros(3200),
                                                             sync_mkr=1)
        nidaq_trig_segment_id = self.arb.define_waveform(len(nidaq_trig_wf))
        self.arb.upload_waveform(nidaq_trig_wf, nidaq_trig_segment_id)

        settle_pts = int(640 * np.ceil(self.settle_delay * 12e9 / 640))

        scenario = Scenario()
        seq = Sequence(sequence_loop_ct=int(self.attempts))
        #First try with reset flipping pulse
        seq.add_waveform(rst_segment_id)
        seq.add_idle(settle_pts, 0.0)
        seq.add_waveform(nidaq_trig_segment_id)
        seq.add_idle(1 << 16, 0.0)  # bonus non-contiguous memory delay
        seq.add_waveform(pspl_trig_segment_id)
        seq.add_idle(settle_pts, 0.0)
        seq.add_waveform(nidaq_trig_segment_id)
        seq.add_idle(1 << 16, 0.0)  # bonus non-contiguous memory delay
        scenario.sequences.append(seq)
        self.arb.upload_scenario(scenario, start_idx=0)
        self.arb.sequence_mode = "SCENARIO"
        self.arb.scenario_advance_mode = "REPEAT"
        self.arb.scenario_start_index = 0
        self.arb.run()

        # Setup the NIDAQ
        self.analog_input = Task()
        self.read = int32()
        self.buf_points = 2 * self.samps_per_trig * self.attempts
        self.analog_input.CreateAIVoltageChan("Dev1/ai0", "", DAQmx_Val_Diff,
                                              self.min_daq_voltage,
                                              self.max_daq_voltage,
                                              DAQmx_Val_Volts, None)
        self.analog_input.CfgSampClkTiming("", 1e6, DAQmx_Val_Rising,
                                           DAQmx_Val_FiniteSamps,
                                           self.samps_per_trig)
        self.analog_input.CfgInputBuffer(self.buf_points)
        self.analog_input.CfgDigEdgeStartTrig("/Dev1/PFI0", DAQmx_Val_Rising)
        self.analog_input.SetStartTrigRetriggerable(1)
        self.analog_input.StartTask()

        # Setup the PSPL
        self.pspl.amplitude = self.polarity * 7.5 * np.power(
            10, (-self.pspl_atten) / 20.0)
        self.pspl.trigger_source = "EXT"
        self.pspl.trigger_level = 0.1
        self.pspl.output = True

        def set_voltage(voltage):
            # Calculate the voltage controller attenuator setting
            vc_atten = abs(20.0 * np.log10(abs(voltage) / 7.5)
                           ) - self.pspl_atten - self.circuit_attenuation
            if vc_atten <= 6.0:
                raise ValueError(
                    "Voltage controlled attenuation under range (6dB).")
            self.atten.set_attenuation(vc_atten)
            time.sleep(0.02)

        # Assign methods
        self.field.assign_method(self.mag.set_field)
        self.pulse_duration.assign_method(self.pspl.set_duration)
        self.pulse_voltage.assign_method(set_voltage)

        # Create hooks for relevant delays
        self.pulse_duration.add_post_push_hook(lambda: time.sleep(0.1))

    async def run(self):
        """This is run for each step in a sweep."""
        self.arb.advance()
        self.arb.trigger()
        buf = np.empty(self.buf_points)
        self.analog_input.ReadAnalogF64(self.buf_points, -1,
                                        DAQmx_Val_GroupByChannel, buf,
                                        self.buf_points, byref(self.read),
                                        None)
        await self.voltage.push(buf)
        # Seemingly we need to give the filters some time to catch up here...
        await asyncio.sleep(0.02)
        logger.debug("Stream has filled {} of {} points".format(
            self.voltage.points_taken, self.voltage.num_points()))

    def shutdown_instruments(self):
        # self.keith.current = 0.0e-5
        # self.mag.zero()
        self.arb.stop()
        self.pspl.output = False
        try:
            self.analog_input.StopTask()
        except Exception as e:
            print(
                "Warning: failed to stop task (this normally happens with no consequences when taking multiple samples per trigger)."
            )
            pass
Exemple #30
0
class ElementwiseFilter(Filter):
    """Asynchronously perform elementwise operations on multiple streams:
    e.g. multiply or add all streams element-by-element"""

    sink = InputConnector()
    source = OutputConnector()
    filter_name = "GenericElementwise"  # To identify subclasses when naming data streams

    def __init__(self, **kwargs):
        super(ElementwiseFilter, self).__init__(**kwargs)
        self.sink.max_input_streams = 100
        self.quince_parameters = []

    def operation(self):
        """Must be overridden with the desired mathematical function"""
        pass

    def unit(self, base_unit):
        """Must be overridden accoriding the desired mathematical function
        e.g. return base_unit + "^{}".format(len(self.sink.input_streams))"""
        pass

    def update_descriptors(self):
        """Must be overridden depending on the desired mathematical function"""
        logger.debug(
            'Updating %s "%s" descriptors based on input descriptor: %s.',
            self.filter_name, self.name, self.sink.descriptor)

        # Sometimes not all of the input descriptors have been updated... pause here until they are:
        if None in [ss.descriptor for ss in self.sink.input_streams]:
            logger.debug(
                '%s "%s" waiting for all input streams to be updated.',
                self.filter_name, self.name)
            return

        self.descriptor = self.sink.descriptor.copy()
        self.descriptor.data_name = self.filter_name
        if self.descriptor.unit:
            self.descriptor.unit = self.descriptor.unit + "^{}".format(
                len(self.sink.input_streams))
        self.source.descriptor = self.descriptor
        self.source.update_descriptors()

    async def run(self):
        streams = self.sink.input_streams

        for s in streams[1:]:
            if not np.all(s.descriptor.expected_tuples() ==
                          streams[0].descriptor.expected_tuples()):
                raise ValueError(
                    "Multiple streams connected to correlator must have matching descriptors."
                )

        # Buffers for stream data
        stream_data = {
            s: np.zeros(0, dtype=self.sink.descriptor.dtype)
            for s in streams
        }

        # Store whether streams are done
        stream_done = {s: False for s in streams}

        while True:
            # Wait for all of the acquisition to complete
            # Against at least some peoples rational expectations, asyncio.wait doesn't return Futures
            # in the order of the iterable it was passed, but perhaps just in order of completion. So,
            # we construct a dictionary in order that that can be mapped back where we need them:

            futures = {
                asyncio.ensure_future(stream.queue.get()): stream
                for stream in streams
            }

            # Deal with non-equal number of messages using timeout
            responses, pending = await asyncio.wait(
                futures, return_when=asyncio.FIRST_COMPLETED, timeout=2.0)

            # Construct the inverse lookup, results in {stream: result}
            stream_results = {
                futures[res]: res.result()
                for res in list(responses)
            }

            # Cancel the futures
            for pend in list(pending):
                pend.cancel()

            # Add any new data to the
            for stream, message in stream_results.items():
                message_type = message['type']
                message_data = message['data']
                message_comp = message['compression']
                message_data = pickle.loads(zlib.decompress(
                    message_data)) if message_comp == 'zlib' else message_data
                message_data = message_data if hasattr(
                    message_data, 'size') else np.array([message_data])
                if message_type == 'event':
                    if message['event_type'] == 'done':
                        stream_done[stream] = True
                    elif message['event_type'] == 'refine':
                        logger.warning(
                            "Correlator doesn't handle refinement yet!")

                elif message_type == 'data':
                    stream_data[stream] = np.concatenate(
                        (stream_data[stream], message_data.flatten()))

            if False not in stream_done.values():
                for oc in self.output_connectors.values():
                    for os in oc.output_streams:
                        await os.push_event("done")
                logger.debug('%s "%s" is done', self.__class__.__name__,
                             self.name)
                break

            # Now process the data with the elementwise operation
            smallest_length = min([d.size for d in stream_data.values()])
            new_data = [d[:smallest_length] for d in stream_data.values()]
            result = new_data[0]
            for nd in new_data[1:]:
                result = self.operation()(result, nd)
            if result.size > 0:
                await self.source.push(result)

            # Add data to carry_data if necessary
            for stream in stream_data.keys():
                if stream_data[stream].size > smallest_length:
                    stream_data[stream] = stream_data[stream][smallest_length:]
                else:
                    stream_data[stream] = np.zeros(
                        0, dtype=self.sink.descriptor.dtype)