示例#1
0
class X6StreamSelector(Filter):
    """Digital demodulation and filtering to select a particular frequency multiplexed channel"""

    sink   = InputConnector()
    source = OutputConnector()
    phys_channel  = IntParameter(value_range=(1,3), snap=1)
    dsp_channel   = IntParameter(value_range=(0,4), snap=1)
    stream_type   = Parameter(allowed_values=["Raw", "Demodulated", "Integrated"], default='Demodulated')

    def __init__(self, name=""):
        super(X6StreamSelector, self).__init__(name=name)
        self.stream_type.value = "Raw" # One of Raw, Demodulated, Integrated
        self.quince_parameters = [self.phys_channel, self.dsp_channel, self.stream_type]

    def get_descriptor(self, source_instr_settings, channel_settings):
        # Create a channel
        channel = X6Channel(channel_settings)

        descrip = DataStreamDescriptor()
        # If it's an integrated stream, then the time axis has already been eliminated.
        # Otherswise, add the time axis.
        if channel_settings['stream_type'] == 'Raw':
            samp_time = 4.0e-9
            descrip.add_axis(DataAxis("time", samp_time*np.arange(source_instr_settings['record_length']//4)))
            descrip.dtype = np.float64
        elif channel_settings['stream_type'] == 'Demodulated':
            samp_time = 32.0e-9
            descrip.add_axis(DataAxis("time", samp_time*np.arange(source_instr_settings['record_length']//32)))
            descrip.dtype = np.complex128
        else: # Integrated
            descrip.dtype = np.complex128

        return channel, descrip
示例#2
0
class AlazarStreamSelector(Filter):
    """Digital demodulation and filtering to select a particular frequency multiplexed channel"""

    sink = InputConnector()
    source = OutputConnector()
    channel = IntParameter(value_range=(1, 2), snap=1)

    # def __init__(self, name=""):
    #     super(AlazarStreamSelector, self).__init__(name=name)
    # self.channel.value = 1 # Either 1 or 2
    # self.quince_parameters = [self.channel]

    def get_channel(self, channel_proxy):
        """Create and return a channel object corresponding to this stream selector"""
        return AlazarChannel(channel_proxy)

    def get_descriptor(self, stream_selector, receiver_channel):
        """Get the axis descriptor corresponding to this stream selector. For the Alazar cards this
        is always just a time axis."""
        samp_time = 1.0 / receiver_channel.receiver.sampling_rate
        descrip = DataStreamDescriptor()
        descrip.add_axis(
            DataAxis(
                "time",
                samp_time *
                np.arange(receiver_channel.receiver.record_length)))
        return descrip
示例#3
0
class X6StreamSelector(Filter):
    """Digital demodulation and filtering to select a particular frequency multiplexed channel"""

    sink = InputConnector()
    source = OutputConnector()

    channel = IntParameter(value_range=(1, 3), snap=1)
    dsp_channel = IntParameter(value_range=(0, 4), snap=1)
    stream_type = Parameter(
        allowed_values=["raw", "demodulated", "integrated"],
        default='demodulated')

    # def __init__(self, name=""):
    #     super(X6StreamSelector, self).__init__(name=name)
    # self.stream_type.value = "Raw" # One of Raw, Demodulated, Integrated
    # self.quince_parameters = [self.channel, self.dsp_channel, self.stream_type]

    def get_channel(self, channel_proxy):
        """Create and return a channel object corresponding to this stream selector"""
        return X6Channel(channel_proxy)

    def get_descriptor(self, stream_selector, receiver_channel):
        """Get the axis descriptor corresponding to this stream selector. If it's an integrated stream,
        then the time axis has already been eliminated. Otherswise, add the time axis."""
        descrip = DataStreamDescriptor()
        if stream_selector.stream_type == 'raw':
            samp_time = 4.0e-9
            descrip.add_axis(
                DataAxis(
                    "time",
                    samp_time *
                    np.arange(receiver_channel.receiver.record_length // 4)))
            descrip.dtype = np.float64
        elif stream_selector.stream_type == 'demodulated':
            samp_time = 32.0e-9
            descrip.add_axis(
                DataAxis(
                    "time",
                    samp_time *
                    np.arange(receiver_channel.receiver.record_length // 32)))
            descrip.dtype = np.complex128
        else:  # Integrated
            descrip.dtype = np.complex128
        return descrip
示例#4
0
class AlazarStreamSelector(Filter):
    """Digital demodulation and filtering to select a particular frequency multiplexed channel"""

    sink    = InputConnector()
    source  = OutputConnector()
    channel = IntParameter(value_range=(1,2), snap=1)

    def __init__(self, name=""):
        super(AlazarStreamSelector, self).__init__(name=name)
        self.channel.value = 1 # Either 1 or 2
        self.quince_parameters = [self.channel]

    def get_descriptor(self, source_instr_settings, channel_settings):
        channel = AlazarChannel(channel_settings)

        # Add the time axis
        samp_time = 1.0/source_instr_settings['sampling_rate']
        descrip = DataStreamDescriptor()
        descrip.add_axis(DataAxis("time", samp_time*np.arange(source_instr_settings['record_length'])))
        return channel, descrip
示例#5
0
class DummydigStreamSelector(Filter):

    sink = InputConnector()
    source = OutputConnector()
    channel = IntParameter(value_range=(1, 2), snap=1)

    def __init__(self, name=""):
        super(DummydigStreamSelector, self).__init__(name=name)
        self.channel.value = 1  # Either 1 or 2
        self.quince_parameters = [self.channel]

    def get_descriptor(self, source_instr_settings, channel_settings):
        channel = DummydigChannel(channel_settings)

        # Add the time axis
        samp_time = 1.0 / source_instr_settings['sampling_rate']
        descrip = DataStreamDescriptor()
        descrip.add_axis(
            DataAxis(
                "time",
                samp_time * np.arange(source_instr_settings['record_length'])))
        return channel, descrip
class SWERExperiment(Experiment):
    """ Experiment class for Switching probability measurment
    Determine switching probability for V << V0
    with varying V (and durations?)
    """

    field          = FloatParameter(default=0.0, unit="T")
    pulse_duration = FloatParameter(default=1.0e-9, unit="s")
    pulse_voltage  = FloatParameter(default=0.1, unit="V")
    repeats        = IntParameter(default = 1) # Dummy parameter for repeating
    voltage     = OutputConnector()

    attempts        = 1 << 12
    settle_delay    = 100e-6
    measure_current = 3.0e-6
    samps_per_trig  = 5

    polarity        = 1

    min_daq_voltage = 0.0
    max_daq_voltage = 0.4

    reset_amplitude = 0.1
    reset_duration  = 5.0e-9

    mag   = AMI430("192.168.5.109")
    lock  = SR865("USB0::0xB506::0x2000::002638::INSTR")
    # pspl  = Picosecond10070A("GPIB0::24::INSTR")
    arb   = KeysightM8190A("192.168.5.108")
    keith = Keithley2400("GPIB0::25::INSTR")

    def init_streams(self):
        # Baked in data axes
        descrip = DataStreamDescriptor()
        descrip.add_axis(DataAxis("sample", range(self.samps_per_trig)))
        descrip.add_axis(DataAxis("state", range(2)))
        descrip.add_axis(DataAxis("attempt", range(self.attempts)))
        self.voltage.set_descriptor(descrip)

    def init_instruments(self):

        # ===================
        #    Setup the Keithley
        # ===================

        self.keith.triad()
        self.keith.conf_meas_res(res_range=1e5)
        self.keith.conf_src_curr(comp_voltage=0.5, curr_range=1.0e-5)
        self.keith.current = self.measure_current
        self.mag.ramp()

        # ===================
        #    Setup the AWG
        # ===================

        self.arb.set_output(True, channel=1)
        self.arb.set_output(False, channel=2)
        self.arb.sample_freq = 12.0e9
        self.arb.waveform_output_mode = "WSPEED"
        self.arb.set_output_route("DC", channel=1)
        self.arb.voltage_amplitude = 1.0
        self.arb.set_marker_level_low(0.0, channel=1, marker_type="sync")
        self.arb.set_marker_level_high(1.5, channel=1, marker_type="sync")
        self.arb.continuous_mode = False
        self.arb.gate_mode = False
        self.setup_arb(self.pulse_voltage.value)

        # ===================
        #   Setup the NIDAQ
        # ===================

        self.analog_input = Task()
        self.read = int32()
        self.buf_points = 2*self.samps_per_trig*self.attempts
        self.analog_input.CreateAIVoltageChan("Dev1/ai1", "", DAQmx_Val_Diff,
            self.min_daq_voltage, self.max_daq_voltage, DAQmx_Val_Volts, None)
        self.analog_input.CfgSampClkTiming("", 1e6, DAQmx_Val_Rising, DAQmx_Val_FiniteSamps , self.samps_per_trig)
        self.analog_input.CfgInputBuffer(self.buf_points)
        self.analog_input.CfgDigEdgeStartTrig("/Dev1/PFI0", DAQmx_Val_Rising)
        self.analog_input.SetStartTrigRetriggerable(1)
        self.analog_input.StartTask()

        # Assign methods
        self.field.assign_method(self.mag.set_field)
        self.pulse_voltage.assign_method(self.setup_arb)

    def setup_arb(self,volt):
        def arb_pulse(amplitude, duration, sample_rate=12e9):
            arb_voltage = arb_voltage_lookup()
            pulse_points = int(duration*sample_rate)
            if pulse_points < 320:
                wf = np.zeros(320)
            else:
                wf = np.zeros(64*np.ceil(pulse_points/64.0))
            wf[:pulse_points] = np.sign(amplitude)*arb_voltage(abs(amplitude))
            return wf

        self.arb.abort()
        self.arb.delete_all_waveforms()
        self.arb.reset_sequence_table()

        # Reset waveform
        reset_wf    = arb_pulse(-self.polarity*self.reset_amplitude, self.reset_duration)
        wf_data     = KeysightM8190A.create_binary_wf_data(reset_wf)
        rst_segment_id  = self.arb.define_waveform(len(wf_data))
        self.arb.upload_waveform(wf_data, rst_segment_id)

        # Switching waveform
        switch_wf    = arb_pulse(self.polarity*volt, self.pulse_duration.value)
        wf_data     = KeysightM8190A.create_binary_wf_data(switch_wf)
        sw_segment_id  = self.arb.define_waveform(len(wf_data))
        self.arb.upload_waveform(wf_data, sw_segment_id)

        # NIDAQ trigger waveform
        nidaq_trig_wf = KeysightM8190A.create_binary_wf_data(np.zeros(3200), sync_mkr=1)
        nidaq_trig_segment_id = self.arb.define_waveform(len(nidaq_trig_wf))
        self.arb.upload_waveform(nidaq_trig_wf, nidaq_trig_segment_id)

        settle_pts = int(640*np.ceil(self.settle_delay * 12e9 / 640))

        scenario = Scenario()
        seq = Sequence(sequence_loop_ct=int(self.attempts))
        #First try with reset flipping pulse
        seq.add_waveform(rst_segment_id)
        seq.add_idle(settle_pts, 0.0)
        seq.add_waveform(nidaq_trig_segment_id)
        seq.add_idle(1 << 16, 0.0) # bonus non-contiguous memory delay
        seq.add_waveform(sw_segment_id)
        seq.add_idle(settle_pts, 0.0)
        seq.add_waveform(nidaq_trig_segment_id)
        seq.add_idle(1 << 16, 0.0) # bonus non-contiguous memory delay
        scenario.sequences.append(seq)
        self.arb.upload_scenario(scenario, start_idx=0)
        self.arb.sequence_mode = "SCENARIO"
        self.arb.scenario_advance_mode = "REPEAT"
        self.arb.scenario_start_index = 0
        self.arb.run()

    async def run(self):
        # Keep track of the previous values
        logger.debug("Waiting for filters.")
        await asyncio.sleep(1.0)
        self.arb.advance()
        self.arb.trigger()
        buf = np.empty(self.buf_points)
        self.analog_input.ReadAnalogF64(self.buf_points, -1, DAQmx_Val_GroupByChannel,
                                        buf, self.buf_points, byref(self.read), None)
        await self.voltage.push(buf)
        # Seemingly we need to give the filters some time to catch up here...
        await asyncio.sleep(0.002)
        logger.debug("Stream has filled {} of {} points".format(self.voltage.points_taken, self.voltage.num_points() ))

    def shutdown_instruments(self):
        self.keith.current = 0.0e-5
        # self.mag.zero()
        self.arb.stop()
        try:
            self.analog_input.StopTask()
        except Exception as e:
            print("Warning: failed to stop task (this normally happens with no consequences when taking multiple samples per trigger).")
            pass
示例#7
0
class Channelizer(Filter):
    """Digital demodulation and filtering to select a particular frequency multiplexed channel. If
    an axis name is supplied to `follow_axis` then the filter will demodulate at the freqency
    `axis_frequency_value - follow_freq_offset` otherwise it will demodulate at `frequency`. Note that
    the filter coefficients are still calculated with respect to the `frequency` paramter, so it should
    be chosen accordingly when `follow_axis` is defined."""

    sink = InputConnector()
    source = OutputConnector()
    follow_axis = Parameter(default="")  # Name of the axis to follow
    follow_freq_offset = FloatParameter(default=0.0)  # Offset
    decimation_factor = IntParameter(value_range=(1, 100), default=4, snap=1)
    frequency = FloatParameter(value_range=(-10e9, 10e9),
                               increment=1.0e6,
                               default=10e6)
    bandwidth = FloatParameter(value_range=(0.00, 100e6),
                               increment=0.1e6,
                               default=5e6)

    def __init__(self,
                 frequency=None,
                 bandwidth=None,
                 decimation_factor=None,
                 follow_axis=None,
                 follow_freq_offset=None,
                 **kwargs):
        super(Channelizer, self).__init__(**kwargs)
        if frequency:
            self.frequency.value = frequency
        if bandwidth:
            self.bandwidth.value = bandwidth
        if decimation_factor:
            self.decimation_factor.value = decimation_factor
        if follow_axis:
            self.follow_axis.value = follow_axis
        if follow_freq_offset:
            self.follow_freq_offset.value = follow_freq_offset
        self.quince_parameters = [
            self.decimation_factor, self.frequency, self.bandwidth
        ]
        self._phase = 0.0

    def final_init(self):
        self.init_filters(self.frequency.value, self.bandwidth.value)

        if self.follow_axis.value is not "":
            desc = self.sink.descriptor
            axis_num = desc.axis_num(self.follow_axis.value)
            self.pts_before_freq_update = desc.num_points_through_axis(
                axis_num + 1)
            self.pts_before_freq_reset = desc.num_points_through_axis(axis_num)
            self.demod_freqs = desc.axes[
                axis_num].points - self.follow_freq_offset.value
            self.current_freq = 0
            self.update_references(self.current_freq)
        self.idx = 0

        # For storing carryover if getting uneven buffers
        self.carry = np.zeros(0, dtype=self.output_descriptor.dtype)

    def update_references(self, frequency):
        # store decimated reference for mix down
        # phase_drift = 2j*np.pi*0.5e-6 * (abs(frequency) - 100e6)
        ref = np.exp(2j * np.pi * -frequency * self.time_pts[::self.d1] +
                     1j * self._phase,
                     dtype=np.complex64)

        self.reference = ref
        self.reference_r = np.real(ref)
        self.reference_i = np.imag(ref)

    def init_filters(self, frequency, bandwidth):
        # convert bandwidth normalized to Nyquist interval
        n_bandwidth = bandwidth * self.time_step * 2
        n_frequency = abs(frequency) * self.time_step * 2

        # arbitrarily decide on three stage filter pipeline
        # 1. first stage decimating filter on real data
        # 2. second stage decimating filter on mixed product to boost n_bandwidth
        # 3. final channel selecting filter at n_bandwidth/2

        # anecdotally don't decimate more than a factor of eight for stability

        self.decim_factors = [1] * 3
        self.filters = [None] * 3

        # first stage decimating filter
        # maximize first stage decimation:
        #     * minimize subsequent stages time taken
        #     * filter and decimate while signal is still real
        #     * first stage decimation cannot be too large or then 2omega signal from mixing will alias
        self.d1 = 1
        while (self.d1 < 8) and (2 * n_frequency <= 0.8 / self.d1) and (
                self.d1 < self.decimation_factor.value):
            self.d1 *= 2
            n_bandwidth *= 2
            n_frequency *= 2

        if self.d1 > 1:
            # create an anti-aliasing filter
            # pass-band to 0.8 * decimation factor; anecdotally single precision needs order <= 4 for stability
            b, a = scipy.signal.cheby1(4, 3, 0.8 / self.d1)
            b = np.float32(b)
            a = np.float32(a)
            self.decim_factors[0] = self.d1
            self.filters[0] = (b, a)

        # store decimated reference for mix down
        self.update_references(frequency)

        # second stage filter to bring n_bandwidth/2 up
        # decimation cannot be too large or will impinge on channel bandwidth (keep n_bandwidth/2 <= 0.8)
        self.d2 = 1
        while (self.d2 < 8) and (
            (self.d1 * self.d2) <
                self.decimation_factor.value) and (n_bandwidth / 2 <= 0.8):
            self.d2 *= 2
            n_bandwidth *= 2
            n_frequency *= 2

        if self.d2 > 1:
            # create an anti-aliasing filter
            # pass-band to 0.8 * decimation factor; anecdotally single precision needs order <= 4 for stability
            b, a = scipy.signal.cheby1(4, 3, 0.8 / self.d2)
            b = np.float32(b)
            a = np.float32(a)
            self.decim_factors[1] = self.d2
            self.filters[1] = (b, a)

        # final channel selection filter
        if n_bandwidth < 0.1:
            raise ValueError(
                "Insufficient decimation to achieve stable filter: {}.".format(
                    n_bandwidth))

        b, a = scipy.signal.cheby1(4, 3, n_bandwidth / 2)
        b = np.float32(b)
        a = np.float32(a)
        self.decim_factors[2] = self.decimation_factor.value // (self.d1 *
                                                                 self.d2)
        self.filters[2] = (b, a)

    def update_descriptors(self):
        logger.debug(
            'Updating Channelizer "%s" descriptors based on input descriptor: %s.',
            self.name, self.sink.descriptor)

        # extract record time sampling
        self.time_pts = self.sink.descriptor.axes[-1].points
        self.record_length = len(self.time_pts)
        self.time_step = self.time_pts[1] - self.time_pts[0]
        logger.debug("Channelizer time_step = {}".format(self.time_step))

        # We will be decimating along a time axis, which is always
        # going to be the last axis given the way we usually take data.
        # TODO: perform this function along a named axis rather than a numbered axis
        # in case something about this changes.

        # update output descriptors
        decimated_descriptor = DataStreamDescriptor()
        decimated_descriptor.axes = self.sink.descriptor.axes[:]
        decimated_descriptor.axes[-1] = deepcopy(self.sink.descriptor.axes[-1])
        decimated_descriptor.axes[-1].points = self.sink.descriptor.axes[
            -1].points[self.decimation_factor.value -
                       1::self.decimation_factor.value]
        decimated_descriptor.axes[
            -1].original_points = decimated_descriptor.axes[-1].points
        decimated_descriptor._exp_src = self.sink.descriptor._exp_src
        decimated_descriptor.dtype = np.complex64
        self.output_descriptor = decimated_descriptor
        for os in self.source.output_streams:
            os.set_descriptor(decimated_descriptor)
            if os.end_connector is not None:
                os.end_connector.update_descriptors()

    async def process_data(self, data):

        # Append any data carried from the last run
        if self.carry.size > 0:
            data = np.concatenate((self.carry, data))

        # This is the largest number of records we can handle
        num_records = data.size // self.record_length

        # This is the carryover that we'll store until next round.
        # If nothing is left then reset the carryover.
        remaining_points = data.size % self.record_length
        if remaining_points > 0:
            if num_records > 0:
                self.carry = data[-remaining_points:]
                data = data[:-remaining_points]
            else:
                self.carry = data
        else:
            self.carry = np.zeros(0, dtype=self.output_descriptor.dtype)

        if num_records > 0:
            # The records are processed in parallel after being reshaped here
            reshaped_data = np.reshape(data, (num_records, self.record_length),
                                       order="C")

            # Update demodulation frequency if necessary
            if self.follow_axis.value is not "":
                freq = self.demod_freqs[(self.idx % self.pts_before_freq_reset)
                                        // self.pts_before_freq_update]
                if freq != self.current_freq:
                    self.update_references(freq)
                    self.current_freq = freq

            self.idx += data.size

            # first stage decimating filter
            if self.filters[0] is None:
                filtered = reshaped_data
            else:
                stacked_coeffs = np.concatenate(self.filters[0])
                # filter
                if np.iscomplexobj(reshaped_data):
                    # TODO: compile complex versions of the IPP functions
                    filtered_r = np.empty_like(reshaped_data, dtype=np.float32)
                    filtered_i = np.empty_like(reshaped_data, dtype=np.float32)
                    libipp.filter_records_iir(
                        stacked_coeffs, self.filters[0][0].size - 1,
                        np.ascontiguousarray(
                            reshaped_data.real.astype(np.float32)),
                        self.record_length, num_records, filtered_r)
                    libipp.filter_records_iir(
                        stacked_coeffs, self.filters[0][0].size - 1,
                        np.ascontiguousarray(
                            reshaped_data.imag.astype(np.float32)),
                        self.record_length, num_records, filtered_i)
                    filtered = filtered_r + 1j * filtered_i
                    # decimate
                    if self.decim_factors[0] > 1:
                        filtered = filtered[:, ::self.decim_factors[0]]
                else:
                    filtered = np.empty_like(reshaped_data)
                    libipp.filter_records_iir(stacked_coeffs,
                                              self.filters[0][0].size - 1,
                                              reshaped_data,
                                              self.record_length, num_records,
                                              filtered)

                    # decimate
                    if self.decim_factors[0] > 1:
                        filtered = filtered[:, ::self.decim_factors[0]]

            # mix with reference
            # keep real and imaginary separate for filtering below
            if np.iscomplexobj(reshaped_data):
                filtered *= self.reference
                filtered_r = filtered.real
                filtered_i = filtered.imag
            else:
                filtered_r = self.reference_r * filtered
                filtered_i = self.reference_i * filtered

            # channel selection filters
            for ct in [1, 2]:
                if self.filters[ct] == None:
                    continue

                coeffs = self.filters[ct]
                stacked_coeffs = np.concatenate(self.filters[ct])
                out_r = np.empty_like(filtered_r).astype(np.float32)
                out_i = np.empty_like(filtered_i).astype(np.float32)
                libipp.filter_records_iir(
                    stacked_coeffs, self.filters[ct][0].size - 1,
                    np.ascontiguousarray(filtered_r.astype(np.float32)),
                    filtered_r.shape[-1], num_records, out_r)
                libipp.filter_records_iir(
                    stacked_coeffs, self.filters[ct][0].size - 1,
                    np.ascontiguousarray(filtered_i.astype(np.float32)),
                    filtered_i.shape[-1], num_records, out_i)

                # decimate
                if self.decim_factors[ct] > 1:
                    filtered_r = np.copy(out_r[:, ::self.decim_factors[ct]],
                                         order="C")
                    filtered_i = np.copy(out_i[:, ::self.decim_factors[ct]],
                                         order="C")
                else:
                    filtered_r = out_r
                    filtered_i = out_i

            filtered = filtered_r + 1j * filtered_i

            # recover gain from selecting single sideband
            filtered *= 2

            # push to ouptut connectors
            for os in self.source.output_streams:
                await os.push(filtered)
示例#8
0
class Plotter(Filter):
    sink = InputConnector()
    plot_dims = IntParameter(value_range=(0, 1, 2), snap=1,
                             default=0)  # 0 means auto
    plot_mode = Parameter(
        allowed_values=["real", "imag", "real/imag", "amp/phase", "quad"],
        default="quad")

    def __init__(self,
                 *args,
                 name="",
                 plot_dims=None,
                 plot_mode=None,
                 **plot_args):
        super(Plotter, self).__init__(*args, name=name)
        if plot_dims:
            self.plot_dims.value = plot_dims
        if plot_mode:
            self.plot_mode.value = plot_mode
        self.plot_args = plot_args
        self.full_update_interval = 0.5
        self.update_interval = 2.0  # slower for partial updates
        self.last_update = time.time()
        self.last_full_update = time.time()

        self.quince_parameters = [self.plot_dims, self.plot_mode]

        # This will hold the matplot server
        self.plot_server = None

    def desc(self):
        d = {
            'plot_type':
            'standard',
            'plot_mode':
            self.plot_mode.value,
            'plot_dims':
            int(self.plot_dims.value),
            'x_min':
            float(min(self.x_values)),
            'x_max':
            float(max(self.x_values)),
            'x_len':
            int(self.descriptor.axes[-1].num_points()),
            'x_label':
            self.axis_label(-1),
            'y_label':
            "{} ({})".format(self.descriptor.data_name,
                             self.descriptor.data_unit)
        }
        if self.plot_dims.value == 2:
            d['y_label'] = self.axis_label(-2)
            d['data_label'] = "{} ({})".format(self.descriptor.data_name,
                                               self.descriptor.data_unit)
            d['y_min'] = float(min(self.y_values))
            d['y_max'] = float(max(self.y_values))
            d['y_len'] = int(self.descriptor.axes[-2].num_points())
        return d

    def update_descriptors(self):
        logger.debug(
            "Updating Plotter %s descriptors based on input descriptor %s",
            self.name, self.sink.descriptor)
        self.stream = self.sink.input_streams[0]
        self.descriptor = self.sink.descriptor

    def final_init(self):

        # Determine the plot dimensions
        if not self.plot_dims.value:
            if len(self.descriptor.axes) > 1:
                self.plot_dims.value = 2
            else:
                self.plot_dims.value = 1

        # Check the descriptor axes
        num_axes = len(self.descriptor.axes)
        if self.plot_dims.value > num_axes:
            logger.info(
                "Cannot plot in more dimensions than there are data axes.")
            self.plot_dims.value = num_axes

        if self.plot_dims.value == 1:
            self.points_before_clear = self.descriptor.axes[-1].num_points()
        else:
            self.points_before_clear = self.descriptor.axes[-1].num_points(
            ) * self.descriptor.axes[-2].num_points()
        logger.debug("Plot will clear after every %d points.",
                     self.points_before_clear)

        self.x_values = self.descriptor.axes[-1].points

        if self.plot_dims.value == 2:
            self.y_values = self.descriptor.axes[-2].points

        self.plot_buffer = (np.nan * np.ones(self.points_before_clear)).astype(
            self.descriptor.dtype)
        self.idx = 0

    def update(self):
        if self.plot_dims.value == 1:
            self.plot_server.send(self.name, self.x_values,
                                  self.plot_buffer.copy())
        elif self.plot_dims.value == 2:
            self.plot_server.send(self.name, self.x_values, self.y_values,
                                  self.plot_buffer.copy())

    async def process_data(self, data):
        # If we get more than enough data, pause to update the plot if necessary
        if (self.idx + data.size) > self.points_before_clear:
            spill_over = (self.idx + data.size) % self.points_before_clear
            if spill_over == 0:
                spill_over = self.points_before_clear
            if (time.time() - self.last_full_update >=
                    self.full_update_interval):
                # If we are getting data quickly, then we can afford to wait
                # for a full frame before pushing to plot.
                self.plot_buffer[self.idx:] = data[:(self.points_before_clear -
                                                     self.idx)]
                self.update()
                self.last_full_update = time.time()
            self.plot_buffer[:] = np.nan
            self.plot_buffer[:spill_over] = data[-spill_over:]
            self.idx = spill_over
        else:  # just keep trucking
            self.plot_buffer[self.idx:self.idx + data.size] = data.flatten()
            self.idx += data.size
            if (time.time() - max(self.last_full_update, self.last_update) >=
                    self.update_interval):
                self.update()
                self.last_update = time.time()

    async def on_done(self):
        if self.plot_dims.value == 1:
            self.plot_server.send(self.name, self.x_values, self.plot_buffer)
        elif self.plot_dims.value == 2:
            self.plot_server.send(self.name, self.x_values, self.y_values,
                                  self.plot_buffer)

    def axis_label(self, index):
        unit_str = " ({})".format(self.descriptor.axes[index].unit
                                  ) if self.descriptor.axes[index].unit else ''
        return self.descriptor.axes[index].name + unit_str
示例#9
0
文件: plot.py 项目: ocakgun/Auspex
class Plotter(Filter):
    sink = InputConnector()
    plot_dims = IntParameter(value_range=(0, 1, 2), snap=1,
                             default=0)  # 0 means auto
    plot_mode = Parameter(
        allowed_values=["real", "imag", "real/imag", "amp/phase", "quad"],
        default="quad")

    def __init__(self,
                 *args,
                 name="",
                 plot_dims=None,
                 plot_mode=None,
                 **plot_args):
        super(Plotter, self).__init__(*args, name=name)
        if plot_dims:
            self.plot_dims.value = plot_dims
        if plot_mode:
            self.plot_mode.value = plot_mode
        self.plot_args = plot_args
        self.full_update_interval = 0.5
        self.update_interval = 2.0  # slower for partial updates
        self.last_update = time.time()
        self.last_full_update = time.time()

        self._final_buffer = Queue()
        self.final_buffer = None

        self.quince_parameters = [self.plot_dims, self.plot_mode]

        # Unique id for plot server
        self.uuid = None

        # Should we actually produce plots?
        self.do_plotting = True

    def send(self, message):
        if self.do_plotting:
            data = message['data']
            msg = message['msg']
            name = message['name']

            msg_contents = [self.uuid.encode(), msg.encode(), name.encode()]

            # We might be sending multiple axes, series, etc.
            # Just add them succesively to a multipart message.
            for dat in data:
                md = dict(
                    dtype=str(dat.dtype),
                    shape=dat.shape,
                )
                msg_contents.extend(
                    [json.dumps(md).encode(),
                     np.ascontiguousarray(dat)])
            self.socket.send_multipart(msg_contents)

    def get_final_plot(self, quad_funcs=[np.abs, np.angle]):
        if not self.done.is_set():
            raise Exception(
                "Cannot get final plot since plotter is not done or was not run."
            )

        from bqplot import LinearScale, ColorScale, ColorAxis, Axis, Lines, Figure, Tooltip, HeatMap
        from bqplot.toolbar import Toolbar
        from ipywidgets import VBox, HBox

        if self.final_buffer is None:
            self.final_buffer = self._final_buffer.get()
        if self.plot_dims.value == 2:
            raise NotImplementedError(
                "2 dimensional get_final_plot not yet implemented.")
        elif self.plot_dims.value == 1:
            figs = []
            for quad_func in quad_funcs:
                sx = LinearScale()
                sy = LinearScale()
                ax = Axis(label=self.axis_label(-1), scale=sx)
                ay = Axis(
                    label=
                    f"{self.descriptor.data_name} ({self.descriptor.data_unit})",
                    scale=sy,
                    orientation='vertical')
                line = Lines(x=self.x_values,
                             y=quad_func(self.final_buffer),
                             scales={
                                 'x': sx,
                                 'y': sy
                             })
                fig = Figure(marks=[line],
                             axes=[ax, ay],
                             title=self.filter_name)
                figs.append(fig)
        if len(figs) <= 2:
            return HBox(figs)
        elif len(figs) == 4:
            return VBox([HBox([figs[0], figs[1]]), HBox([figs[2], figs[3]])])
        elif len(figs) == 3 or len(figs) > 4:
            raise Exception("Please use 1, 2, or 4 quadrature functions.")

    def desc(self):
        d = {
            'plot_type':
            'standard',
            'plot_mode':
            self.plot_mode.value,
            'plot_dims':
            int(self.plot_dims.value),
            'x_min':
            float(min(self.x_values)),
            'x_max':
            float(max(self.x_values)),
            'x_len':
            int(self.descriptor.axes[-1].num_points()),
            'x_label':
            self.axis_label(-1),
            'y_label':
            "{} ({})".format(self.descriptor.data_name,
                             self.descriptor.data_unit)
        }
        if self.plot_dims.value == 2:
            d['y_label'] = self.axis_label(-2)
            d['data_label'] = "{} ({})".format(self.descriptor.data_name,
                                               self.descriptor.data_unit)
            d['y_min'] = float(min(self.y_values))
            d['y_max'] = float(max(self.y_values))
            d['y_len'] = int(self.descriptor.axes[-2].num_points())
        return d

    def set_done(self):
        self.send({
            'name': self.filter_name,
            'data': [np.array([])],
            "msg": "done"
        })

    def set_quit(self):
        self.send({
            'name': self.filter_name,
            'data': [np.array([])],
            "msg": "quit"
        })

    def update_descriptors(self):
        logger.debug(
            "Updating Plotter %s descriptors based on input descriptor %s",
            self.filter_name, self.sink.descriptor)
        self.stream = self.sink.input_streams[0]
        self.descriptor = self.sink.descriptor

    def final_init(self):
        # Determine the plot dimensions
        if not self.plot_dims.value:
            if len(self.descriptor.axes) > 1:
                self.plot_dims.value = 2
            else:
                self.plot_dims.value = 1

        # Check the descriptor axes
        num_axes = len(self.descriptor.axes)
        if self.plot_dims.value > num_axes:
            logger.info(
                "Cannot plot in more dimensions than there are data axes.")
            self.plot_dims.value = num_axes

        if self.plot_dims.value == 1:
            self.points_before_clear = self.descriptor.axes[-1].num_points()
        else:
            self.points_before_clear = self.descriptor.axes[-1].num_points(
            ) * self.descriptor.axes[-2].num_points()
        logger.debug("Plot will clear after every %d points.",
                     self.points_before_clear)

        self.x_values = self.descriptor.axes[-1].points

        if self.plot_dims.value == 2:
            self.y_values = self.descriptor.axes[-2].points

        #I'm so sorry everyone. Send Julia
        if 'complex' in np.dtype(self.descriptor.dtype).name:
            self.plot_buffer = (
                np.nan * np.ones(self.points_before_clear) +
                1.0j * np.nan * np.ones(self.points_before_clear)).astype(
                    self.descriptor.dtype)
        else:
            self.plot_buffer = np.nan * np.ones(self.points_before_clear)
        self.idx = 0

    def execute_on_run(self):
        # Connect to the plot server
        if self.do_plotting:
            try:
                self.context = zmq.Context()
                self.socket = self.context.socket(zmq.DEALER)
                self.socket.identity = f"Auspex_Experiment_{self.filter_name}_{hex(id(self))}".encode(
                )
                self.socket.connect("tcp://localhost:7762")
            except:
                logger.warning(
                    "Exception occured while contacting the plot server. Is it running?"
                )

    def update(self):
        if self.plot_dims.value == 1:
            self.send({
                'name': self.filter_name,
                'msg': 'data',
                'data': [self.x_values, self.plot_buffer.copy()]
            })
        elif self.plot_dims.value == 2:
            self.send({
                'name':
                self.filter_name,
                'msg':
                'data',
                'data':
                [self.x_values, self.y_values,
                 self.plot_buffer.copy()]
            })

    def process_data(self, data):
        # If we get more than enough data, pause to update the plot if necessary
        if (self.idx + data.size) > self.points_before_clear:
            spill_over = (self.idx + data.size) % self.points_before_clear
            if spill_over == 0:
                spill_over = self.points_before_clear
            if (time.time() - self.last_full_update >=
                    self.full_update_interval):
                # If we are getting data quickly, then we can afford to wait
                # for a full frame before pushing to plot.
                self.plot_buffer[self.idx:] = data[:(self.points_before_clear -
                                                     self.idx)]
                self.update()
                self.last_full_update = time.time()
            self.plot_buffer[:] = np.nan
            self.plot_buffer[:spill_over] = data[-spill_over:]
            self.idx = spill_over
        else:  # just keep trucking
            self.plot_buffer[self.idx:self.idx + data.size] = data.flatten()
            self.idx += data.size
            if (time.time() - max(self.last_full_update, self.last_update) >=
                    self.update_interval):
                self.update()
                self.last_update = time.time()

    def on_done(self):
        if self.plot_dims.value == 1:
            self.send({
                'name': self.filter_name,
                "msg": "data",
                'data': [self.x_values, self.plot_buffer.copy()],
            })
        elif self.plot_dims.value == 2:
            self.send({
                'name':
                self.filter_name,
                "msg":
                "data",
                'data':
                [self.x_values, self.y_values,
                 self.plot_buffer.copy()]
            })
        self._final_buffer.put(self.plot_buffer)
        if self.do_plotting:
            self.set_done()
            self.socket.close()
            self.context.term()

    def axis_label(self, index):
        unit_str = " ({})".format(self.descriptor.axes[index].unit
                                  ) if self.descriptor.axes[index].unit else ''
        return self.descriptor.axes[index].name + unit_str
示例#10
0
class Channelizer(Filter):
    """Digital demodulation and filtering to select a particular frequency multiplexed channel"""

    sink = InputConnector()
    source = OutputConnector()
    decimation_factor = IntParameter(value_range=(1, 100), default=2, snap=1)
    frequency = FloatParameter(value_range=(-5e9, 5e9),
                               increment=1.0e6,
                               default=-9e6)
    bandwidth = FloatParameter(value_range=(0.00, 100e6),
                               increment=0.1e6,
                               default=5e6)

    def __init__(self,
                 frequency=None,
                 bandwidth=None,
                 decimation_factor=None,
                 **kwargs):
        super(Channelizer, self).__init__(**kwargs)
        if frequency:
            self.frequency.value = frequency
        if bandwidth:
            self.bandwidth.value = bandwidth
        if decimation_factor:
            self.decimation_factor.value = decimation_factor
        self.quince_parameters = [
            self.decimation_factor, self.frequency, self.bandwidth
        ]

    def update_descriptors(self):
        logger.debug(
            'Updating Channelizer "%s" descriptors based on input descriptor: %s.',
            self.name, self.sink.descriptor)

        # extract record time sampling
        time_pts = self.sink.descriptor.axes[-1].points
        self.record_length = len(time_pts)
        self.time_step = time_pts[1] - time_pts[0]
        logger.debug("Channelizer time_step = {}".format(self.time_step))

        # convert bandwidth normalized to Nyquist interval
        n_bandwidth = self.bandwidth.value * self.time_step * 2
        n_frequency = self.frequency.value * self.time_step * 2

        # arbitrarily decide on three stage filter pipeline
        # 1. first stage decimating filter on real data
        # 2. second stage decimating filter on mixed product to boost n_bandwidth
        # 3. final channel selecting filter at n_bandwidth/2

        # anecdotally don't decimate more than a factor of eight for stability

        self.decim_factors = [1] * 3
        self.filters = [None] * 3

        # first stage decimating filter
        # maximize first stage decimation:
        #     * minimize subsequent stages time taken
        #     * filter and decimate while signal is still real
        #     * first stage decimation cannot be too large or then 2omega signal from mixing will alias
        d1 = 1
        while (d1 < 8) and (2 * n_frequency <=
                            0.8 / d1) and (d1 < self.decimation_factor.value):
            d1 *= 2
            n_bandwidth *= 2
            n_frequency *= 2

        if d1 > 1:
            # create an anti-aliasing filter
            # pass-band to 0.8 * decimation factor; anecdotally single precision needs order <= 4 for stability
            b, a = scipy.signal.cheby1(4, 3, 0.8 / d1)
            b = np.float32(b)
            a = np.float32(a)
            self.decim_factors[0] = d1
            self.filters[0] = (b, a)

        # store decimated reference for mix down
        ref = np.exp(2j * np.pi * self.frequency.value * time_pts[::d1],
                     dtype=np.complex64)
        self.reference_r = np.real(ref)
        self.reference_i = np.imag(ref)

        # second stage filter to bring n_bandwidth/2 up
        # decimation cannot be too large or will impinge on channel bandwidth (keep n_bandwidth/2 <= 0.8)
        d2 = 1
        while (d2 < 8) and ((d1 * d2) < self.decimation_factor.value) and (
                n_bandwidth / 2 <= 0.8):
            d2 *= 2
            n_bandwidth *= 2
            n_frequency *= 2

        if d2 > 1:
            # create an anti-aliasing filter
            # pass-band to 0.8 * decimation factor; anecdotally single precision needs order <= 4 for stability
            b, a = scipy.signal.cheby1(4, 3, 0.8 / d2)
            b = np.float32(b)
            a = np.float32(a)
            self.decim_factors[1] = d2
            self.filters[1] = (b, a)

        # final channel selection filter
        if n_bandwidth < 0.1:
            raise ValueError(
                "Insufficient decimation to achieve stable filter")

        b, a = scipy.signal.cheby1(4, 3, n_bandwidth / 2)
        b = np.float32(b)
        a = np.float32(a)
        self.decim_factors[2] = self.decimation_factor.value // (d1 * d2)
        self.filters[2] = (b, a)

        # update output descriptors
        decimated_descriptor = DataStreamDescriptor()
        decimated_descriptor.axes = self.sink.descriptor.axes[:]
        decimated_descriptor.axes[-1] = deepcopy(self.sink.descriptor.axes[-1])
        decimated_descriptor.axes[-1].points = self.sink.descriptor.axes[
            -1].points[self.decimation_factor.value -
                       1::self.decimation_factor.value]
        decimated_descriptor.axes[
            -1].original_points = decimated_descriptor.axes[-1].points
        decimated_descriptor.exp_src = self.sink.descriptor.exp_src
        decimated_descriptor.dtype = np.complex64
        for os in self.source.output_streams:
            os.set_descriptor(decimated_descriptor)
            if os.end_connector is not None:
                os.end_connector.update_descriptors()

    async def process_data(self, data):
        # Assume for now we get a integer number of records at a time
        # TODO: handle partial records
        num_records = data.size // self.record_length
        reshaped_data = np.reshape(data, (num_records, self.record_length),
                                   order="C")

        # first stage decimating filter
        if self.filters[0] is not None:
            stacked_coeffs = np.concatenate(self.filters[0])
            # filter
            filtered = np.empty_like(reshaped_data)
            libipp.filter_records_iir(stacked_coeffs,
                                      self.filters[0][0].size - 1,
                                      reshaped_data, self.record_length,
                                      num_records, filtered)

            # decimate
            if self.decim_factors[0] > 1:
                filtered = filtered[:, ::self.decim_factors[0]]

        # mix with reference
        # keep real and imaginary separate for filtering below
        filtered_r = self.reference_r * filtered
        filtered_i = self.reference_i * filtered

        # channel selection filters
        for ct in [1, 2]:
            if self.filters[ct] == None:
                continue

            coeffs = self.filters[ct]
            stacked_coeffs = np.concatenate(self.filters[ct])
            out_r = np.empty_like(filtered_r)
            out_i = np.empty_like(filtered_i)
            libipp.filter_records_iir(stacked_coeffs,
                                      self.filters[ct][0].size - 1, filtered_r,
                                      filtered_r.shape[-1], num_records, out_r)
            libipp.filter_records_iir(stacked_coeffs,
                                      self.filters[ct][0].size - 1, filtered_i,
                                      filtered_i.shape[-1], num_records, out_i)

            # decimate
            if self.decim_factors[ct] > 1:
                filtered_r = np.copy(out_r[:, ::self.decim_factors[ct]],
                                     order="C")
                filtered_i = np.copy(out_i[:, ::self.decim_factors[ct]],
                                     order="C")
            else:
                filtered_r = out_r
                filtered_i = out_i

        filtered = filtered_r + 1j * filtered_i

        # recover gain from selecting single sideband
        filtered *= 2

        # push to ouptut connectors
        for os in self.source.output_streams:
            await os.push(filtered)