Beispiel #1
0
class FieldSwitchingExperiment(Experiment):
    """ Field Switching Experimen: measure resistance on Keithley while sweeping AMI430 field
    """
    field = FloatParameter(default=0.0, unit="T")
    measure_current = FloatParameter(default=3e-6, unit="A")
    resistance = OutputConnector(unit="Ohm")

    mag = AMI430("192.168.5.109")
    keith = Keithley2400("GPIB0::25::INSTR")

    def init_instruments(self):
        self.keith.triad()
        self.keith.conf_meas_res(NPLC=10, res_range=1e5)
        self.keith.conf_src_curr(comp_voltage=0.5, curr_range=1.0e-5)
        self.keith.current = self.measure_current.value

        self.mag.ramp()

        self.measure_current.assign_method(self.keith.set_current)
        self.field.assign_method(self.mag.set_field)
        self.field.add_post_push_hook(
            lambda: time.sleep(0.1))  # Field set delay

    async def run(self):
        """This is run for each step in a sweep."""
        await self.resistance.push(self.keith.resistance)
        logger.debug("Stream has filled {} of {} points".format(
            self.resistance.points_taken, self.resistance.num_points()))
        await asyncio.sleep(0.02)  # Give the filters some time to catch up?

    def shutdown_instruments(self):
        self.keith.current = 0.0e-5
        self.mag.zero()
Beispiel #2
0
class SweptTestExperimentMetadata(Experiment):
    """Here the run loop merely spews data until it fills up the stream. """

    # Parameters
    field = FloatParameter(unit="Oe")
    freq  = FloatParameter(unit="Hz")
    dur   = FloatParameter(default=5,unit="ns")

    # DataStreams
    voltage = OutputConnector()
    current = OutputConnector()

    # Constants
    samples = 5
    time_val = 0

    def init_instruments(self):
        self.field.assign_method(lambda x: logger.debug("Field got value " + str(x)))
        self.freq.assign_method(lambda x: logger.debug("Freq got value " + str(x)))
        self.dur.assign_method(lambda x: logger.debug("Duration got value " + str(x)))

    def init_streams(self):
        # Add a "base" data axis: say we are averaging 5 samples per trigger
        self.voltage.add_axis(DataAxis("samples", np.array([0,1,2,np.nan,np.nan]), metadata=["data", "data", "data", "0", "1"]))

    def __repr__(self):
        return "<SweptTestExperimentMetadata>"

    async def run(self):
        time_step = 0.1
        await asyncio.sleep(0.002)
        data_row = np.sin(2*np.pi*self.time_val)*np.ones(self.samples) + 0.1*np.random.random(self.samples)
        self.time_val += time_step
        await self.voltage.push(data_row)
        await self.current.push(np.sin(2*np.pi*self.time_val) + 0.1*np.random.random(1))
Beispiel #3
0
class SweptTestExperiment(Experiment):
    """Here the run loop merely spews data until it fills up the stream. """

    # Parameters
    field = FloatParameter(unit="Oe")
    freq = FloatParameter(unit="Hz")
    dur = FloatParameter(default=5, unit="ns")

    # DataStreams
    voltage = OutputConnector()
    current = OutputConnector()

    # Constants
    samples = 5
    time_val = 0

    # Complex?
    is_complex = False

    def init_instruments(self):
        self.field.assign_method(
            lambda x: logger.debug("Field got value " + str(x)))
        self.freq.assign_method(
            lambda x: logger.debug("Freq got value " + str(x)))
        self.dur.assign_method(
            lambda x: logger.debug("Duration got value " + str(x)))

    def init_streams(self):
        # Add a "base" data axis: say we are averaging 5 samples per trigger
        descrip = DataStreamDescriptor()
        descrip.data_name = 'voltage'
        if self.is_complex:
            descrip.dtype = np.complex128
        descrip.add_axis(DataAxis("samples", list(range(self.samples))))
        self.voltage.set_descriptor(descrip)

    def __repr__(self):
        return "<SweptTestExperiment>"

    async def run(self):
        # logger.debug("Data taker running (inner loop)")
        time_step = 0.1
        await asyncio.sleep(0.001)
        data_row = np.sin(2 * np.pi * self.time_val) * np.ones(
            self.samples) + 0.1 * np.random.random(self.samples)
        self.time_val += time_step
        if self.is_complex:
            await self.voltage.push(
                np.array(data_row + 0.5j * data_row, dtype=np.complex128))
            await self.current.push(
                np.array(np.sin(2 * np.pi * self.time_val) +
                         0.1 * np.random.random(1) +
                         0.5j * np.random.random(1),
                         dtype=np.complex128))
        else:
            await self.voltage.push(data_row)
            await self.current.push(
                np.sin(2 * np.pi * self.time_val) + 0.1 * np.random.random(1))
Beispiel #4
0
class FieldSwitchingLockinExperiment(Experiment):
    """ Field Switching Experimen: measure resistance on Keithley while sweeping AMI430 field
    """
    field = FloatParameter(default=0.0, unit="T")
    # measure_current = FloatParameter(default=3e-6, unit="A")
    resistance = OutputConnector(unit="Ohm")

    res_reference = 1e3
    vsource = 10e-3
    mag = AMI430("192.168.5.109")
    # keith = Keithley2400("GPIB0::25::INSTR")
    lock = SR865("USB0::0xB506::0x2000::002638::INSTR")

    def init_instruments(self):
        # Initialize lockin
        self.lock.amp = self.vsource
        self.lock.tc = 3
        self.mag.ramp()
        self.delay = self.lock.measure_delay()
        self.field.assign_method(self.mag.set_field)
        self.field.add_post_push_hook(
            lambda: time.sleep(0.1))  # Field set delay
        time.sleep(self.delay)

    async def run(self):
        """This is run for each step in a sweep."""

        await asyncio.sleep(self.delay)
        await self.resistance.push(self.res_reference /
                                   ((self.lock.amp / self.lock.mag) - 1.0))

    def shutdown_instruments(self):
        self.mag.zero()
        self.lock.amp = 0
Beispiel #5
0
class FieldSwitchingLockinExperiment(Experiment):
    """ Field Switching Experimen: measure resistance on Keithley while sweeping AMI430 field
    """
    field           = FloatParameter(default=0.0, unit="T")
    resistance      = OutputConnector(unit="Ohm")

    # Default values for lockin measurement. These will need to be changed in a notebook to match the MR and switching current of the sample being measured
    res_reference = 1e3
    measure_current = 10e-6
    fdB = 18
    tc = 100e-3
    mag   = AMI430("192.168.5.109")
    lock  = SR865("USB0::0xB506::0x2000::002638::INSTR")

    def init_instruments(self):
        # Initialize lockin
        self.lock.amp = self.res_reference*self.measure_current
        self.lock.tc = self.tc
        self.lock.filter_slope = self.fdB
        self.mag.ramp()
        self.delay = self.lock.measure_delay()
        self.field.assign_method(self.mag.set_field)
        self.field.add_post_push_hook(lambda: time.sleep(0.1)) # Field set delay
        time.sleep(self.delay)

    async def run(self):
        """This is run for each step in a sweep."""

        await asyncio.sleep(self.delay)
        await self.resistance.push(self.res_reference/((self.lock.amp/self.lock.mag)-1.0))

    def shutdown_instruments(self):
        self.mag.zero()
        self.lock.amp = 0
Beispiel #6
0
class SweptTestExperiment(Experiment):
    """Here the run loop merely spews data until it fills up the stream. """

    # Parameters
    field = FloatParameter(unit="Oe")
    freq  = FloatParameter(unit="Hz")
    dur   = FloatParameter(default=5,unit="ns")

    # DataStreams
    voltage = OutputConnector()
    current = OutputConnector()

    # Constants
    samples = 5
    time_val = 0

    # Complex Values?
    complex_data = False

    def init_instruments(self):
        self.field.assign_method(lambda x: logger.debug("Field got value " + str(x)))
        self.freq.assign_method(lambda x: logger.debug("Freq got value " + str(x)))
        self.dur.assign_method(lambda x: logger.debug("Duration got value " + str(x)))

    def init_streams(self):
        # Add a "base" data axis: say we are averaging 5 samples per trigger
        self.voltage.add_axis(DataAxis("samples", list(range(self.samples))))
        self.current.add_axis(DataAxis("samples", list(range(self.samples))))

    def __repr__(self):
        return "<SweptTestExperiment>"

    def run(self):
        logger.debug("Data taker running (inner loop)")
        time_step = 0.1
        time.sleep(0.002)
        if self.complex_data:
            data_row = np.sin(2*np.pi*self.time_val)*np.ones(5) + 2.0j*np.sin(2*np.pi*self.time_val)*np.ones(5)
        else:
            data_row = np.sin(2*np.pi*self.time_val)*np.ones(5) + 0.1*np.random.random(5)
        self.time_val += time_step
        self.voltage.push(data_row)
        self.current.push(data_row*2.0)
        logger.debug("Stream pushed points {}.".format(data_row))
        logger.debug("Stream has filled {} of {} points".format(self.voltage.points_taken, self.voltage.num_points() ))
Beispiel #7
0
class TestExperiment(Experiment):

    # Create instances of instruments
    fake_instr_1 = TestInstrument1("FAKE::RESOURE::NAME")
    fake_instr_2 = TestInstrument2("FAKE::RESOURE::NAME")
    fake_instr_3 = TestInstrument3("FAKE::RESOURE::NAME")

    # Parameters
    freq_1 = FloatParameter(unit="Hz")
    freq_2 = FloatParameter(unit="Hz")

    # DataStreams
    chan1 = OutputConnector()
    chan2 = OutputConnector()

    # Constants
    samples = 3
    num_trials = 5
    time_val = 0.0

    def init_instruments(self):
        self.freq_1.assign_method(lambda x: logger.debug("Set: {}".format(x)))
        self.freq_2.assign_method(lambda x: logger.debug("Set: {}".format(x)))

    def init_streams(self):
        # Add "base" data axes
        self.chan1.add_axis(DataAxis("samples", list(range(self.samples))))
        self.chan2.add_axis(DataAxis("trials", list(range(self.num_trials))))

    async def run(self):
        logger.debug("Data taker running (inner loop)")
        time_step = 0.1
        await asyncio.sleep(0.002)
        data_row = np.sin(2 * np.pi * self.time_val) * np.ones(
            self.samples) + 0.1 * np.random.random(self.samples)
        self.time_val += time_step
        await self.chan1.push(data_row)
        data_row = np.sin(2 * np.pi * self.time_val) * np.ones(
            self.num_trials) + 0.1 * np.random.random(self.num_trials)
        await self.chan2.push(data_row)
        logger.debug("Stream pushed points {}.".format(data_row))
        logger.debug("Stream has filled {} of {} points".format(
            self.chan1.points_taken, self.chan1.num_points()))
Beispiel #8
0
class IcLockinExperiment(Experiment):
	""" Nano-wire Ic measurement using Lockin with DC offset
	"""
	source       = FloatParameter(default=0.0, unit="V")
	voltage      = OutputConnector(unit="V")
	current      = OutputConnector(unit="A")
	resistance   = OutputConnector(unit="Ohm")

	R_ref = 1e3
	sense = 5e-6

	lock  = SR865("USB0::0xB506::0x2000::002638::INSTR")

	def init_instruments(self):
		# self.keith.triad()
		# self.keith.conf_meas_res(NPLC=10, res_range=1e5)
		# self.keith.conf_src_curr(comp_voltage=0.5, curr_range=1.0e-5)
		# self.keith.current = self.measure_current.value

		# Initialize lockin
		self.lock.amp = self.sense*self.R_ref
		#self.lock.tc  = self.integration_time
		self.delay = self.lock.measure_delay()

		# Define source method
		self.source.assign_method(self.set_source)
		#self.source.add_post_push_hook(lambda: time.sleep(2*self.integration_time))

	def set_source(self,source):

		self.lock.dc = source
		time.sleep(self.lock.measure_delay())


	async def run(self):
		"""This is run for each step in a sweep."""

		await asyncio.sleep(self.delay)
		R_load = self.lock.mag/(self.sense - self.lock.mag)*self.R_ref
		await self.resistance.push(R_load)
		await self.current.push(self.lock.dc/(self.R_ref+R_load))
		await self.voltage.push(self.lock.dc*R_load/(self.R_ref+R_load))

		logger.debug("Stream has filled {} of {} points".format(self.resistance.points_taken,
																self.resistance.num_points() ))

		#await asyncio.sleep(2*self.integration_time) # Give the filters some time to catch up?

	def shutdown_instruments(self):
		self.lock.dc = 0
		self.lock.amp = 0
Beispiel #9
0
class TestExperiment(Experiment):

    # Parameters
    freq_1 = FloatParameter(unit="Hz")
    freq_2 = FloatParameter(unit="Hz")

    # DataStreams
    chan1 = OutputConnector()
    chan2 = OutputConnector()

    # Constants
    samples = 3
    num_trials = 5
    time_val = 0.0

    def init_instruments(self):
        self.freq_1.assign_method(lambda x: logger.debug("Set: {}".format(x)))
        self.freq_2.assign_method(lambda x: logger.debug("Set: {}".format(x)))

    def init_streams(self):
        self.chan1.add_axis(DataAxis("samples", list(range(self.samples))))
        self.chan1.add_axis(DataAxis("trials", list(range(self.num_trials))))
        self.chan2.add_axis(DataAxis("samples", list(range(self.samples))))
        self.chan2.add_axis(DataAxis("trials", list(range(self.num_trials))))

    def run(self):
        logger.debug("Data taker running (inner loop)")
        time_step = 0.1
        time.sleep(0.002)
        data_row = np.ones(
            self.samples * self.num_trials) + 0.1 * np.random.random(
                self.samples * self.num_trials)
        self.time_val += time_step
        # logger.info(f"IN RUN METHOD PUSHING: {data_row} {data_row.size} {data_row.shape}")
        self.chan1.push(data_row)
        logger.debug("Stream pushed points {}.".format(data_row))
        logger.debug("Stream has filled {} of {} points".format(
            self.chan1.points_taken, self.chan1.num_points()))
Beispiel #10
0
 def set(self, instrs_to_set=[]):
     meta_file = compile_to_hardware(self.sequence(),
                                     fileName=self.filename,
                                     axis_descriptor=self.axis_descriptor)
     self.exp = QubitExpFactory.create(meta_file=meta_file,
                                       calibration=True,
                                       cw_mode=self.cw_mode)
     if self.plot:
         # Add the manual plotter and the update method to the experiment
         self.exp.add_manual_plotter(self.plot)
     self.exp.connect_instruments()
     #set instruments for calibration
     for instr_to_set in instrs_to_set:
         par = FloatParameter()
         par.assign_method(
             getattr(self.exp._instruments[instr_to_set['instr']],
                     instr_to_set['method']))
         # Either sweep or set single value
         if 'sweep_values' in instr_to_set.keys():
             par.value = instr_to_set['sweep_values'][0]
             self.exp.add_sweep(par, instr_to_set['sweep_values'])
         else:
             par.value = instr_to_set['value']
             par.push()
Beispiel #11
0
class SweptTestExperiment(Experiment):
    """Here the run loop merely spews data until it fills up the stream. """

    # Parameters
    temperature = FloatParameter(unit="K")

    # DataStreams
    resistance = OutputConnector()

    # Constants
    samples = 5
    time_val = 0

    def __repr__(self):
        return "<SweptTestExperiment>"

    async def run(self):
        logger.debug("Data taker running (inner loop)")
        await asyncio.sleep(0.002)

        def ideal_tc(t, tc=9.0, k=20.0):
            return t*1.0/(1.0 + np.exp(-k*(t-tc)))

        await self.resistance.push(ideal_tc(self.temperature.value))
class SWERExperiment(Experiment):
    """ Experiment class for Switching probability measurment
    Determine switching probability for V << V0
    with varying V (and durations?)
    """

    field          = FloatParameter(default=0.0, unit="T")
    pulse_duration = FloatParameter(default=1.0e-9, unit="s")
    pulse_voltage  = FloatParameter(default=0.1, unit="V")
    repeats        = IntParameter(default = 1) # Dummy parameter for repeating
    voltage     = OutputConnector()

    attempts        = 1 << 12
    settle_delay    = 100e-6
    measure_current = 3.0e-6
    samps_per_trig  = 5

    polarity        = 1

    min_daq_voltage = 0.0
    max_daq_voltage = 0.4

    reset_amplitude = 0.1
    reset_duration  = 5.0e-9

    mag   = AMI430("192.168.5.109")
    lock  = SR865("USB0::0xB506::0x2000::002638::INSTR")
    # pspl  = Picosecond10070A("GPIB0::24::INSTR")
    arb   = KeysightM8190A("192.168.5.108")
    keith = Keithley2400("GPIB0::25::INSTR")

    def init_streams(self):
        # Baked in data axes
        descrip = DataStreamDescriptor()
        descrip.add_axis(DataAxis("sample", range(self.samps_per_trig)))
        descrip.add_axis(DataAxis("state", range(2)))
        descrip.add_axis(DataAxis("attempt", range(self.attempts)))
        self.voltage.set_descriptor(descrip)

    def init_instruments(self):

        # ===================
        #    Setup the Keithley
        # ===================

        self.keith.triad()
        self.keith.conf_meas_res(res_range=1e5)
        self.keith.conf_src_curr(comp_voltage=0.5, curr_range=1.0e-5)
        self.keith.current = self.measure_current
        self.mag.ramp()

        # ===================
        #    Setup the AWG
        # ===================

        self.arb.set_output(True, channel=1)
        self.arb.set_output(False, channel=2)
        self.arb.sample_freq = 12.0e9
        self.arb.waveform_output_mode = "WSPEED"
        self.arb.set_output_route("DC", channel=1)
        self.arb.voltage_amplitude = 1.0
        self.arb.set_marker_level_low(0.0, channel=1, marker_type="sync")
        self.arb.set_marker_level_high(1.5, channel=1, marker_type="sync")
        self.arb.continuous_mode = False
        self.arb.gate_mode = False
        self.setup_arb(self.pulse_voltage.value)

        # ===================
        #   Setup the NIDAQ
        # ===================

        self.analog_input = Task()
        self.read = int32()
        self.buf_points = 2*self.samps_per_trig*self.attempts
        self.analog_input.CreateAIVoltageChan("Dev1/ai1", "", DAQmx_Val_Diff,
            self.min_daq_voltage, self.max_daq_voltage, DAQmx_Val_Volts, None)
        self.analog_input.CfgSampClkTiming("", 1e6, DAQmx_Val_Rising, DAQmx_Val_FiniteSamps , self.samps_per_trig)
        self.analog_input.CfgInputBuffer(self.buf_points)
        self.analog_input.CfgDigEdgeStartTrig("/Dev1/PFI0", DAQmx_Val_Rising)
        self.analog_input.SetStartTrigRetriggerable(1)
        self.analog_input.StartTask()

        # Assign methods
        self.field.assign_method(self.mag.set_field)
        self.pulse_voltage.assign_method(self.setup_arb)

    def setup_arb(self,volt):
        def arb_pulse(amplitude, duration, sample_rate=12e9):
            arb_voltage = arb_voltage_lookup()
            pulse_points = int(duration*sample_rate)
            if pulse_points < 320:
                wf = np.zeros(320)
            else:
                wf = np.zeros(64*np.ceil(pulse_points/64.0))
            wf[:pulse_points] = np.sign(amplitude)*arb_voltage(abs(amplitude))
            return wf

        self.arb.abort()
        self.arb.delete_all_waveforms()
        self.arb.reset_sequence_table()

        # Reset waveform
        reset_wf    = arb_pulse(-self.polarity*self.reset_amplitude, self.reset_duration)
        wf_data     = KeysightM8190A.create_binary_wf_data(reset_wf)
        rst_segment_id  = self.arb.define_waveform(len(wf_data))
        self.arb.upload_waveform(wf_data, rst_segment_id)

        # Switching waveform
        switch_wf    = arb_pulse(self.polarity*volt, self.pulse_duration.value)
        wf_data     = KeysightM8190A.create_binary_wf_data(switch_wf)
        sw_segment_id  = self.arb.define_waveform(len(wf_data))
        self.arb.upload_waveform(wf_data, sw_segment_id)

        # NIDAQ trigger waveform
        nidaq_trig_wf = KeysightM8190A.create_binary_wf_data(np.zeros(3200), sync_mkr=1)
        nidaq_trig_segment_id = self.arb.define_waveform(len(nidaq_trig_wf))
        self.arb.upload_waveform(nidaq_trig_wf, nidaq_trig_segment_id)

        settle_pts = int(640*np.ceil(self.settle_delay * 12e9 / 640))

        scenario = Scenario()
        seq = Sequence(sequence_loop_ct=int(self.attempts))
        #First try with reset flipping pulse
        seq.add_waveform(rst_segment_id)
        seq.add_idle(settle_pts, 0.0)
        seq.add_waveform(nidaq_trig_segment_id)
        seq.add_idle(1 << 16, 0.0) # bonus non-contiguous memory delay
        seq.add_waveform(sw_segment_id)
        seq.add_idle(settle_pts, 0.0)
        seq.add_waveform(nidaq_trig_segment_id)
        seq.add_idle(1 << 16, 0.0) # bonus non-contiguous memory delay
        scenario.sequences.append(seq)
        self.arb.upload_scenario(scenario, start_idx=0)
        self.arb.sequence_mode = "SCENARIO"
        self.arb.scenario_advance_mode = "REPEAT"
        self.arb.scenario_start_index = 0
        self.arb.run()

    async def run(self):
        # Keep track of the previous values
        logger.debug("Waiting for filters.")
        await asyncio.sleep(1.0)
        self.arb.advance()
        self.arb.trigger()
        buf = np.empty(self.buf_points)
        self.analog_input.ReadAnalogF64(self.buf_points, -1, DAQmx_Val_GroupByChannel,
                                        buf, self.buf_points, byref(self.read), None)
        await self.voltage.push(buf)
        # Seemingly we need to give the filters some time to catch up here...
        await asyncio.sleep(0.002)
        logger.debug("Stream has filled {} of {} points".format(self.voltage.points_taken, self.voltage.num_points() ))

    def shutdown_instruments(self):
        self.keith.current = 0.0e-5
        # self.mag.zero()
        self.arb.stop()
        try:
            self.analog_input.StopTask()
        except Exception as e:
            print("Warning: failed to stop task (this normally happens with no consequences when taking multiple samples per trigger).")
            pass
Beispiel #13
0
class KernelIntegrator(Filter):

    sink = InputConnector()
    source = OutputConnector()
    kernel = Parameter()
    bias = FloatParameter(default=0.0)
    simple_kernel = BoolParameter(default=True)
    box_car_start = FloatParameter(default=0.0)
    box_car_stop = FloatParameter(default=100e-9)
    frequency = FloatParameter(default=0.0)
    """Integrate with a given kernel. Kernel will be padded/truncated to match record length"""
    def __init__(self, **kwargs):
        super(KernelIntegrator, self).__init__(**kwargs)
        self.pre_int_op = None
        self.post_int_op = None
        for k, v in kwargs.items():
            if hasattr(self, k) and isinstance(getattr(self, k), Parameter):
                getattr(self, k).value = v
        if "pre_integration_operation" in kwargs:
            self.pre_int_op = kwargs["pre_integration_operation"]
        if "post_integration_operation" in kwargs:
            self.post_int_op = kwargs["post_integration_operation"]
        self.quince_parameters = [
            self.simple_kernel, self.frequency, self.box_car_start,
            self.box_car_stop
        ]

    def update_descriptors(self):
        if not self.simple_kernel and self.kernel.value is None:
            raise ValueError("Integrator was passed kernel None")

        logger.debug(
            'Updating KernelIntegrator "%s" descriptors based on input descriptor: %s.',
            self.name, self.sink.descriptor)

        record_length = self.sink.descriptor.axes[-1].num_points()
        if self.simple_kernel.value:
            time_pts = self.sink.descriptor.axes[-1].points
            time_step = time_pts[1] - time_pts[0]
            kernel = np.zeros(record_length, dtype=np.complex128)
            sample_start = int(self.box_car_start.value / time_step)
            sample_stop = int(self.box_car_stop.value / time_step) + 1
            kernel[sample_start:sample_stop] = 1.0
            # add modulation
            kernel *= np.exp(2j * np.pi * self.frequency.value * time_step *
                             time_pts)
        else:
            kernel = eval(self.kernel.value.encode('unicode_escape'))
        # pad or truncate the kernel to match the record length
        if kernel.size < record_length:
            self.aligned_kernel = np.append(
                kernel,
                np.zeros(record_length - kernel.size, dtype=np.complex128))
        else:
            self.aligned_kernel = np.resize(kernel, record_length)

        # Integrator reduces and removes axis on output stream
        # update output descriptors
        output_descriptor = DataStreamDescriptor()
        # TODO: handle reduction to single point
        output_descriptor.axes = self.sink.descriptor.axes[:-1]
        output_descriptor._exp_src = self.sink.descriptor._exp_src
        output_descriptor.dtype = np.complex128
        for os in self.source.output_streams:
            os.set_descriptor(output_descriptor)
            os.end_connector.update_descriptors()

    async def process_data(self, data):

        # TODO: handle variable partial records
        if self.pre_int_op:
            data = self.pre_int_op(data)
        filtered = np.inner(np.reshape(data, (-1, len(self.aligned_kernel))),
                            self.aligned_kernel)
        if self.post_int_op:
            filtered = self.post_int_op(filtered)
        # push to ouptut connectors
        for os in self.source.output_streams:
            await os.push(filtered)
Beispiel #14
0
class KernelIntegrator(Filter):

    sink = InputConnector()
    source = OutputConnector()
    kernel = Parameter()
    bias = FloatParameter(default=0.0)
    simple_kernel = BoolParameter(default=True)
    box_car_start = FloatParameter(default=0.0)
    box_car_stop = FloatParameter(default=100e-9)
    demod_frequency = FloatParameter(default=0.0)
    """Integrate with a given kernel. Kernel will be padded/truncated to match record length"""
    def __init__(self, **kwargs):
        super(KernelIntegrator, self).__init__(**kwargs)
        self.pre_int_op = None
        self.post_int_op = None
        for k, v in kwargs.items():
            if hasattr(self, k) and isinstance(getattr(self, k), Parameter):
                getattr(self, k).value = v
        if "pre_integration_operation" in kwargs:
            self.pre_int_op = kwargs["pre_integration_operation"]
        if "post_integration_operation" in kwargs:
            self.post_int_op = kwargs["post_integration_operation"]
        # self.quince_parameters = [self.simple_kernel, self.demod_frequency, self.box_car_start, self.box_car_stop]

    def update_descriptors(self):
        if not self.simple_kernel and self.kernel.value is None:
            raise ValueError("Integrator was passed kernel None")

        logger.debug(
            'Updating KernelIntegrator "%s" descriptors based on input descriptor: %s.',
            self.filter_name, self.sink.descriptor)

        record_length = self.sink.descriptor.axes[-1].num_points()

        if self.kernel.value:
            if os.path.exists(
                    os.path.join(config.KernelDir,
                                 self.kernel.value + '.txt')):
                kernel = np.loadtxt(
                    os.path.join(config.KernelDir, self.kernel.value + '.txt'),
                    dtype=complex,
                    converters={
                        0: lambda s: complex(s.decode().replace('+-', '-'))
                    })
            else:
                try:
                    kernel = eval(self.kernel.value.encode('unicode_escape'))
                except:
                    raise ValueError(
                        'Kernel invalid. Provide a file name or an expression to evaluate'
                    )
            if self.simple_kernel.value:
                logger.warning(
                    "Using specified kernel. To use a box car filter instead, clear kernel.value"
                )

        elif self.simple_kernel.value:
            time_pts = self.sink.descriptor.axes[-1].points
            time_step = time_pts[1] - time_pts[0]
            kernel = np.zeros(record_length, dtype=np.complex128)
            sample_start = int(self.box_car_start.value / time_step)
            sample_stop = int(self.box_car_stop.value / time_step) + 1
            kernel[sample_start:sample_stop] = 1.0
            # add modulation
            kernel *= np.exp(2j * np.pi * self.demod_frequency.value *
                             time_pts)
        else:
            raise ValueError(
                'Kernel invalid. Either provide a file name or an expression to evaluate or set simple_kernel.value to true'
            )
        # pad or truncate the kernel to match the record length
        if kernel.size < record_length:
            self.aligned_kernel = np.append(
                kernel,
                np.zeros(record_length - kernel.size, dtype=np.complex128))
        else:
            self.aligned_kernel = np.resize(kernel, record_length)

        # Integrator reduces and removes axis on output stream
        # update output descriptors
        output_descriptor = DataStreamDescriptor()
        # TODO: handle reduction to single point
        output_descriptor.axes = self.sink.descriptor.axes[:-1]
        output_descriptor._exp_src = self.sink.descriptor._exp_src
        output_descriptor.dtype = np.complex128
        for ost in self.source.output_streams:
            ost.set_descriptor(output_descriptor)
            ost.end_connector.update_descriptors()

    def process_data(self, data):

        # TODO: handle variable partial records
        if self.pre_int_op:
            data = self.pre_int_op(data)
        filtered = np.inner(np.reshape(data, (-1, len(self.aligned_kernel))),
                            self.aligned_kernel)
        if self.post_int_op:
            filtered = self.post_int_op(filtered)
        # push to ouptut connectors
        for os in self.source.output_streams:
            os.push(filtered)
Beispiel #15
0
class Channelizer(Filter):
    """Digital demodulation and filtering to select a particular frequency multiplexed channel"""

    sink = InputConnector()
    source = OutputConnector()
    decimation_factor = IntParameter(value_range=(1, 100), default=2, snap=1)
    frequency = FloatParameter(value_range=(-5e9, 5e9),
                               increment=1.0e6,
                               default=-9e6)
    bandwidth = FloatParameter(value_range=(0.00, 100e6),
                               increment=0.1e6,
                               default=5e6)

    def __init__(self,
                 frequency=None,
                 bandwidth=None,
                 decimation_factor=None,
                 **kwargs):
        super(Channelizer, self).__init__(**kwargs)
        if frequency:
            self.frequency.value = frequency
        if bandwidth:
            self.bandwidth.value = bandwidth
        if decimation_factor:
            self.decimation_factor.value = decimation_factor
        self.quince_parameters = [
            self.decimation_factor, self.frequency, self.bandwidth
        ]

    def update_descriptors(self):
        logger.debug(
            'Updating Channelizer "%s" descriptors based on input descriptor: %s.',
            self.name, self.sink.descriptor)

        # extract record time sampling
        time_pts = self.sink.descriptor.axes[-1].points
        self.record_length = len(time_pts)
        self.time_step = time_pts[1] - time_pts[0]
        logger.debug("Channelizer time_step = {}".format(self.time_step))

        # convert bandwidth normalized to Nyquist interval
        n_bandwidth = self.bandwidth.value * self.time_step * 2
        n_frequency = self.frequency.value * self.time_step * 2

        # arbitrarily decide on three stage filter pipeline
        # 1. first stage decimating filter on real data
        # 2. second stage decimating filter on mixed product to boost n_bandwidth
        # 3. final channel selecting filter at n_bandwidth/2

        # anecdotally don't decimate more than a factor of eight for stability

        self.decim_factors = [1] * 3
        self.filters = [None] * 3

        # first stage decimating filter
        # maximize first stage decimation:
        #     * minimize subsequent stages time taken
        #     * filter and decimate while signal is still real
        #     * first stage decimation cannot be too large or then 2omega signal from mixing will alias
        d1 = 1
        while (d1 < 8) and (2 * n_frequency <=
                            0.8 / d1) and (d1 < self.decimation_factor.value):
            d1 *= 2
            n_bandwidth *= 2
            n_frequency *= 2

        if d1 > 1:
            # create an anti-aliasing filter
            # pass-band to 0.8 * decimation factor; anecdotally single precision needs order <= 4 for stability
            b, a = scipy.signal.cheby1(4, 3, 0.8 / d1)
            b = np.float32(b)
            a = np.float32(a)
            self.decim_factors[0] = d1
            self.filters[0] = (b, a)

        # store decimated reference for mix down
        ref = np.exp(2j * np.pi * self.frequency.value * time_pts[::d1],
                     dtype=np.complex64)
        self.reference_r = np.real(ref)
        self.reference_i = np.imag(ref)

        # second stage filter to bring n_bandwidth/2 up
        # decimation cannot be too large or will impinge on channel bandwidth (keep n_bandwidth/2 <= 0.8)
        d2 = 1
        while (d2 < 8) and ((d1 * d2) < self.decimation_factor.value) and (
                n_bandwidth / 2 <= 0.8):
            d2 *= 2
            n_bandwidth *= 2
            n_frequency *= 2

        if d2 > 1:
            # create an anti-aliasing filter
            # pass-band to 0.8 * decimation factor; anecdotally single precision needs order <= 4 for stability
            b, a = scipy.signal.cheby1(4, 3, 0.8 / d2)
            b = np.float32(b)
            a = np.float32(a)
            self.decim_factors[1] = d2
            self.filters[1] = (b, a)

        # final channel selection filter
        if n_bandwidth < 0.1:
            raise ValueError(
                "Insufficient decimation to achieve stable filter")

        b, a = scipy.signal.cheby1(4, 3, n_bandwidth / 2)
        b = np.float32(b)
        a = np.float32(a)
        self.decim_factors[2] = self.decimation_factor.value // (d1 * d2)
        self.filters[2] = (b, a)

        # update output descriptors
        decimated_descriptor = DataStreamDescriptor()
        decimated_descriptor.axes = self.sink.descriptor.axes[:]
        decimated_descriptor.axes[-1] = deepcopy(self.sink.descriptor.axes[-1])
        decimated_descriptor.axes[-1].points = self.sink.descriptor.axes[
            -1].points[self.decimation_factor.value -
                       1::self.decimation_factor.value]
        decimated_descriptor.axes[
            -1].original_points = decimated_descriptor.axes[-1].points
        decimated_descriptor.exp_src = self.sink.descriptor.exp_src
        decimated_descriptor.dtype = np.complex64
        for os in self.source.output_streams:
            os.set_descriptor(decimated_descriptor)
            if os.end_connector is not None:
                os.end_connector.update_descriptors()

    async def process_data(self, data):
        # Assume for now we get a integer number of records at a time
        # TODO: handle partial records
        num_records = data.size // self.record_length
        reshaped_data = np.reshape(data, (num_records, self.record_length),
                                   order="C")

        # first stage decimating filter
        if self.filters[0] is not None:
            stacked_coeffs = np.concatenate(self.filters[0])
            # filter
            filtered = np.empty_like(reshaped_data)
            libipp.filter_records_iir(stacked_coeffs,
                                      self.filters[0][0].size - 1,
                                      reshaped_data, self.record_length,
                                      num_records, filtered)

            # decimate
            if self.decim_factors[0] > 1:
                filtered = filtered[:, ::self.decim_factors[0]]

        # mix with reference
        # keep real and imaginary separate for filtering below
        filtered_r = self.reference_r * filtered
        filtered_i = self.reference_i * filtered

        # channel selection filters
        for ct in [1, 2]:
            if self.filters[ct] == None:
                continue

            coeffs = self.filters[ct]
            stacked_coeffs = np.concatenate(self.filters[ct])
            out_r = np.empty_like(filtered_r)
            out_i = np.empty_like(filtered_i)
            libipp.filter_records_iir(stacked_coeffs,
                                      self.filters[ct][0].size - 1, filtered_r,
                                      filtered_r.shape[-1], num_records, out_r)
            libipp.filter_records_iir(stacked_coeffs,
                                      self.filters[ct][0].size - 1, filtered_i,
                                      filtered_i.shape[-1], num_records, out_i)

            # decimate
            if self.decim_factors[ct] > 1:
                filtered_r = np.copy(out_r[:, ::self.decim_factors[ct]],
                                     order="C")
                filtered_i = np.copy(out_i[:, ::self.decim_factors[ct]],
                                     order="C")
            else:
                filtered_r = out_r
                filtered_i = out_i

        filtered = filtered_r + 1j * filtered_i

        # recover gain from selecting single sideband
        filtered *= 2

        # push to ouptut connectors
        for os in self.source.output_streams:
            await os.push(filtered)
Beispiel #16
0
class Averager(Filter):
    """Takes data and collapses along the specified axis."""

    sink = InputConnector()
    partial_average = OutputConnector()
    source = OutputConnector()
    final_variance = OutputConnector()
    final_counts = OutputConnector()
    axis = Parameter()
    threshold = FloatParameter()

    def __init__(self, axis=None, threshold=0.5, **kwargs):
        super(Averager, self).__init__(**kwargs)
        self.axis.value = axis
        self.threshold.value = threshold
        self.points_before_final_average = None
        self.points_before_partial_average = None
        self.sum_so_far = None
        self.num_averages = None
        self.passthrough = False

        # Rate limiting for partial averages
        self.last_update = time.time()
        self.update_interval = 0.5

    def update_descriptors(self):
        logger.debug(
            'Updating averager "%s" descriptors based on input descriptor: %s.',
            self.filter_name, self.sink.descriptor)
        descriptor_in = self.sink.descriptor
        names = [a.name for a in descriptor_in.axes]

        self.axis.allowed_values = names

        if self.axis.value is None:
            self.axis.value = descriptor_in.axes[0].name

        # Convert named axes to an index
        if self.axis.value not in names:
            raise ValueError(
                "Could not find axis {} within the DataStreamDescriptor {}".
                format(self.axis.value, descriptor_in))
        self.axis_num = descriptor_in.axis_num(self.axis.value)
        logger.debug("Averaging over axis #%d: %s", self.axis_num,
                     self.axis.value)

        self.data_dims = descriptor_in.data_dims()
        # If we only have a single point along this axis, then just pass the data straight through
        if self.data_dims[self.axis_num] == 1:
            logger.debug("Averaging over a singleton axis")
            self.passthrough = True

        if self.axis_num == len(descriptor_in.axes) - 1:
            logger.debug("Performing scalar average!")
            self.points_before_partial_average = 1
            self.avg_dims = [1]
        else:
            self.points_before_partial_average = descriptor_in.num_points_through_axis(
                self.axis_num + 1)
            self.avg_dims = self.data_dims[self.axis_num + 1:]

        # If we get multiple final average simultaneously
        self.reshape_dims = self.data_dims[self.axis_num:]
        if self.axis_num > 0:
            self.reshape_dims = [-1] + self.reshape_dims
        self.mean_axis = self.axis_num - len(self.data_dims)

        self.points_before_final_average = descriptor_in.num_points_through_axis(
            self.axis_num)
        logger.debug("Points before partial average: %s.",
                     self.points_before_partial_average)
        logger.debug("Points before final average: %s.",
                     self.points_before_final_average)
        logger.debug("Data dimensions are %s", self.data_dims)
        logger.debug("Averaging dimensions are %s", self.avg_dims)

        # Define final axis descriptor
        descriptor = descriptor_in.copy()
        self.num_averages = descriptor.pop_axis(self.axis.value).num_points()
        logger.debug("Number of partial averages is %d", self.num_averages)

        if len(descriptor.axes) == 0:
            # We will be left with only a single point here!
            descriptor.add_axis(DataAxis("result", [0]))

        self.sum_so_far = np.zeros(self.avg_dims, dtype=descriptor.dtype)
        self.current_avg_frame = np.zeros(self.points_before_final_average,
                                          dtype=descriptor.dtype)
        self.partial_average.descriptor = descriptor
        self.source.descriptor = descriptor
        self.excited_counts = np.zeros(self.data_dims, dtype=np.int64)

        # We can update the visited_tuples upfront if none
        # of the sweeps are adaptive...
        desc_out_dtype = descriptor_in.axis_data_type(
            with_metadata=True, excluding_axis=self.axis.value)
        if not descriptor_in.is_adaptive():
            vals = [
                a.points_with_metadata() for a in descriptor_in.axes
                if a.name != self.axis.value
            ]
            nested_list = list(itertools.product(*vals))
            flattened_list = [
                tuple((val for sublist in line for val in sublist))
                for line in nested_list
            ]
            descriptor.visited_tuples = np.core.records.fromrecords(
                flattened_list, dtype=desc_out_dtype)
        else:
            descriptor.visited_tuples = np.empty((0), dtype=desc_out_dtype)

        for stream in self.partial_average.output_streams:
            stream.set_descriptor(descriptor)
            stream.descriptor.buffer_mult_factor = 20
            stream.end_connector.update_descriptors()

        for stream in self.source.output_streams:
            stream.set_descriptor(descriptor)
            stream.end_connector.update_descriptors()

        # Define variance axis descriptor
        descriptor_var = descriptor_in.copy()
        descriptor_var.data_name = "Variance"
        descriptor_var.pop_axis(self.axis.value)
        if descriptor_var.unit:
            descriptor_var.unit = descriptor_var.unit + "^2"
        descriptor_var.metadata["num_averages"] = self.num_averages
        self.final_variance.descriptor = descriptor_var

        # Define counts axis descriptor
        descriptor_count = descriptor_in.copy()
        descriptor_count.data_name = "Counts"
        descriptor_count.dtype = np.float64
        descriptor_count.pop_axis(self.axis.value)
        descriptor_count.add_axis(DataAxis("state", [0, 1]), position=0)
        if descriptor_count.unit:
            descriptor_count.unit = "counts"
        descriptor_count.metadata["num_counts"] = self.num_averages
        self.final_counts.descriptor = descriptor_count

        if not descriptor_in.is_adaptive():
            descriptor_var.visited_tuples = np.core.records.fromrecords(
                flattened_list, dtype=desc_out_dtype)
        else:
            descriptor_var.visited_tuples = np.empty((0), dtype=desc_out_dtype)

        for stream in self.final_variance.output_streams:
            stream.set_descriptor(descriptor_var)
            stream.end_connector.update_descriptors()

        for stream in self.final_counts.output_streams:
            stream.set_descriptor(descriptor_count)
            stream.end_connector.update_descriptors()

    def final_init(self):
        if self.points_before_final_average is None:
            raise Exception(
                "Average has not been initialized. Run 'update_descriptors'")

        self.completed_averages = 0
        self.idx_frame = 0
        self.idx_global = 0
        # We only need to accumulate up to the averaging axis
        # BUT we may get something longer at any given time!
        self.carry = np.zeros(0, dtype=self.source.descriptor.dtype)

    def process_data(self, data):

        if self.passthrough:
            for os in self.source.output_streams:
                os.push(data)
            for os in self.final_variance.output_streams:
                os.push(data * 0.0)
            for os in self.partial_average.output_streams:
                os.push(data)
            return

        # TODO: handle unflattened data separately
        if len(data.shape) > 1:
            data = data.flatten()
        #handle single points
        elif not isinstance(data, np.ndarray) and (data.size == 1):
            data = np.array([data])

        if self.carry.size > 0:
            data = np.concatenate((self.carry, data))
            self.carry = np.zeros(0, dtype=self.source.descriptor.dtype)

        idx = 0
        while idx < data.size:
            #check whether we have enough data to fill an averaging frame
            if data.size - idx >= self.points_before_final_average:
                #logger.debug("Have {} points, enough for final avg.".format(data.size))
                # How many chunks can we process at once?
                num_chunks = int(
                    (data.size - idx) / self.points_before_final_average)
                new_points = num_chunks * self.points_before_final_average
                reshaped = data[idx:idx + new_points].reshape(
                    self.reshape_dims)
                averaged = reshaped.mean(axis=self.mean_axis)
                idx += new_points

                # do state assignment
                excited_states = (np.real(reshaped) >
                                  self.threshold.value).sum(
                                      axis=self.mean_axis)
                ground_states = self.num_averages - excited_states

                if self.sink.descriptor.is_adaptive():
                    new_tuples = self.sink.descriptor.tuples(
                    )[self.idx_global:self.idx_global + new_points]
                    new_tuples_stripped = remove_fields(
                        new_tuples, self.axis.value)
                    take_axis = -1 if self.axis_num > 0 else 0
                    reduced_tuples = new_tuples_stripped.reshape(
                        self.reshape_dims).take((0, ), axis=take_axis)
                    self.idx_global += new_points

                # Add to Visited tuples
                if self.sink.descriptor.is_adaptive():
                    for os in self.source.output_streams + self.final_variance.output_streams + self.partial_average.output_streams:
                        os.descriptor.visited_tuples = np.append(
                            os.descriptor.visited_tuples, reduced_tuples)

                for os in self.source.output_streams:
                    os.push(averaged)

                for os in self.final_variance.output_streams:
                    os.push(reshaped.var(axis=self.mean_axis,
                                         ddof=1))  # N-1 in the denominator

                for os in self.partial_average.output_streams:
                    os.push(averaged)

                for os in self.final_counts.output_streams:
                    os.push(ground_states)
                    os.push(excited_states)

            # Maybe we can fill a partial frame
            elif data.size - idx >= self.points_before_partial_average:
                # logger.info("Have {} points, enough for partial avg.".format(data.size))
                # How many chunks can we process at once?
                num_chunks = int(
                    (data.size - idx) / self.points_before_partial_average)
                new_points = num_chunks * self.points_before_partial_average

                # Find the appropriate dimensions for the partial
                partial_reshape_dims = self.reshape_dims[:]
                partial_reshape_dims[self.mean_axis] = -1
                partial_reshape_dims = partial_reshape_dims[self.mean_axis:]

                reshaped = data[idx:idx +
                                new_points].reshape(partial_reshape_dims)
                summed = reshaped.sum(axis=self.mean_axis)
                self.sum_so_far += summed

                self.current_avg_frame[self.idx_frame:self.idx_frame +
                                       new_points] = data[idx:idx + new_points]
                idx += new_points
                self.idx_frame += new_points

                self.completed_averages += num_chunks

                # If we now have enoough for the final average, push to both partial and final...
                if self.completed_averages == self.num_averages:
                    reshaped = self.current_avg_frame.reshape(
                        partial_reshape_dims)
                    for os in self.source.output_streams + self.partial_average.output_streams:
                        os.push(reshaped.mean(axis=self.mean_axis))
                    for os in self.final_variance.output_streams:
                        os.push(
                            np.real(reshaped).var(axis=self.mean_axis,
                                                  ddof=1) + 1j *
                            np.imag(reshaped).var(axis=self.mean_axis, ddof=1)
                        )  # N-1 in the denominator

                    # do state assignment
                    excited_states = (np.real(reshaped) <
                                      self.threshold.value).sum(
                                          axis=self.mean_axis)
                    ground_states = self.num_averages - excited_states
                    for os in self.final_counts.output_streams:
                        os.push(ground_states)
                        os.push(excited_states)

                    self.sum_so_far[:] = 0.0
                    self.current_avg_frame[:] = 0.0
                    self.completed_averages = 0
                    self.idx_frame = 0
                else:
                    # Emit a partial average since we've accumulated enough data
                    if (time.time() - self.last_update >=
                            self.update_interval):
                        for os in self.partial_average.output_streams:
                            os.push(self.sum_so_far / self.completed_averages)
                        self.last_update = time.time()

            # otherwise just add it to the carry
            else:
                self.carry = data[idx:]
                break
Beispiel #17
0
class WindowIntegrator(Filter):
    """
    Allow a kernel from the set {'chebwin', 'blackman', 'slepian',
    'boxcar'} to be set for the duration of the start and stop values.

    YAML parameters are:
    type: WindowIntegrator
    source: Demod-q1
    kernel_type: 'chebwin'
    start: 5.0e-07
    stop: 9.0e-07

    See: https://docs.scipy.org/doc/scipy/reference/signal.html for more
    details on the filters specifics.
    """

    sink = InputConnector()
    source = OutputConnector()
    bias = FloatParameter(default=0.0)
    kernel_type = Parameter(default='boxcar', allowed_values=['chebwin',\
        'blackman', 'slepian', 'boxcar'])
    start = FloatParameter(default=0.0)
    stop = FloatParameter(default=100e-9)
    frequency = FloatParameter(default=0.0)
    """Integrate with a given kernel. Kernel will be padded/truncated to match record length"""
    def __init__(self, **kwargs):
        super(WindowIntegrator, self).__init__(**kwargs)
        self.pre_int_op = None
        self.post_int_op = None
        for k, v in kwargs.items():
            if hasattr(self, k) and isinstance(getattr(self, k), Parameter):
                getattr(self, k).value = v
        if "pre_integration_operation" in kwargs:
            self.pre_int_op = kwargs["pre_integration_operation"]
        if "post_integration_operation" in kwargs:
            self.post_int_op = kwargs["post_integration_operation"]
        self.quince_parameters = [
            self.kernel_type, self.frequency, self.start, self.stop
        ]

    def update_descriptors(self):
        if not self.kernel_type:
            raise ValueError("Integrator was passed kernel None")

        logger.debug(
            'Updating WindowIntegrator "%s" descriptors based on input descriptor: %s.',
            self.name, self.sink.descriptor)

        record_length = self.sink.descriptor.axes[-1].num_points()

        time_pts = self.sink.descriptor.axes[-1].points
        time_step = time_pts[1] - time_pts[0]
        kernel = np.zeros(record_length, dtype=np.complex128)
        sample_start = int(self.box_car_start.value / time_step)
        sample_stop = int(self.box_car_stop.value / time_step) + 1
        if self.kernel_type == 'boxcar':
            kernel[sample_start:sample_stop] = 1.0
        elif self.kernel_type == 'chebwin':
            # create a Dolph-Chebyshev window with 100 dB attenuation
            kernel[sample_start:sample_stop] = \
                chebwin(sample_start-sample_stop, at=100)
        elif self.kernel_type == 'blackman':
            kernel[sample_start:sample_stop] = \
                blackman(sample_start-sample_stop)
        elif self.kernel_type == 'slepian':
            # create a Slepian window with 0.2 bandwidth
            kernel[sample_start:sample_stop] = \
                slepian(sample_start-sample_stop, width=0.2)

        # add modulation
        kernel *= np.exp(2j * np.pi * self.frequency.value * time_step *
                         time_pts)

        # pad or truncate the kernel to match the record length
        if kernel.size < record_length:
            self.aligned_kernel = np.append(
                kernel,
                np.zeros(record_length - kernel.size, dtype=np.complex128))
        else:
            self.aligned_kernel = np.resize(kernel, record_length)

        # Integrator reduces and removes axis on output stream
        # update output descriptors
        output_descriptor = DataStreamDescriptor()
        # TODO: handle reduction to single point
        output_descriptor.axes = self.sink.descriptor.axes[:-1]
        output_descriptor._exp_src = self.sink.descriptor._exp_src
        output_descriptor.dtype = np.complex128
        for os in self.source.output_streams:
            os.set_descriptor(output_descriptor)
            os.end_connector.update_descriptors()

    async def process_data(self, data):

        # TODO: handle variable partial records
        if self.pre_int_op:
            data = self.pre_int_op(data)
        filtered = np.inner(np.reshape(data, (-1, len(self.aligned_kernel))),
                            self.aligned_kernel)
        if self.post_int_op:
            filtered = self.post_int_op(filtered)
        # push to ouptut connectors
        for os in self.source.output_streams:
            await os.push(filtered)
Beispiel #18
0
class Channelizer(Filter):
    """Digital demodulation and filtering to select a particular frequency multiplexed channel. If
    an axis name is supplied to `follow_axis` then the filter will demodulate at the freqency
    `axis_frequency_value - follow_freq_offset` otherwise it will demodulate at `frequency`. Note that
    the filter coefficients are still calculated with respect to the `frequency` paramter, so it should
    be chosen accordingly when `follow_axis` is defined."""

    sink = InputConnector()
    source = OutputConnector()
    follow_axis = Parameter(default="")  # Name of the axis to follow
    follow_freq_offset = FloatParameter(default=0.0)  # Offset
    decimation_factor = IntParameter(value_range=(1, 100), default=4, snap=1)
    frequency = FloatParameter(value_range=(-10e9, 10e9),
                               increment=1.0e6,
                               default=10e6)
    bandwidth = FloatParameter(value_range=(0.00, 100e6),
                               increment=0.1e6,
                               default=5e6)

    def __init__(self,
                 frequency=None,
                 bandwidth=None,
                 decimation_factor=None,
                 follow_axis=None,
                 follow_freq_offset=None,
                 **kwargs):
        super(Channelizer, self).__init__(**kwargs)
        if frequency:
            self.frequency.value = frequency
        if bandwidth:
            self.bandwidth.value = bandwidth
        if decimation_factor:
            self.decimation_factor.value = decimation_factor
        if follow_axis:
            self.follow_axis.value = follow_axis
        if follow_freq_offset:
            self.follow_freq_offset.value = follow_freq_offset
        self.quince_parameters = [
            self.decimation_factor, self.frequency, self.bandwidth
        ]
        self._phase = 0.0

    def final_init(self):
        self.init_filters(self.frequency.value, self.bandwidth.value)

        if self.follow_axis.value is not "":
            desc = self.sink.descriptor
            axis_num = desc.axis_num(self.follow_axis.value)
            self.pts_before_freq_update = desc.num_points_through_axis(
                axis_num + 1)
            self.pts_before_freq_reset = desc.num_points_through_axis(axis_num)
            self.demod_freqs = desc.axes[
                axis_num].points - self.follow_freq_offset.value
            self.current_freq = 0
            self.update_references(self.current_freq)
        self.idx = 0

        # For storing carryover if getting uneven buffers
        self.carry = np.zeros(0, dtype=self.output_descriptor.dtype)

    def update_references(self, frequency):
        # store decimated reference for mix down
        # phase_drift = 2j*np.pi*0.5e-6 * (abs(frequency) - 100e6)
        ref = np.exp(2j * np.pi * -frequency * self.time_pts[::self.d1] +
                     1j * self._phase,
                     dtype=np.complex64)

        self.reference = ref
        self.reference_r = np.real(ref)
        self.reference_i = np.imag(ref)

    def init_filters(self, frequency, bandwidth):
        # convert bandwidth normalized to Nyquist interval
        n_bandwidth = bandwidth * self.time_step * 2
        n_frequency = abs(frequency) * self.time_step * 2

        # arbitrarily decide on three stage filter pipeline
        # 1. first stage decimating filter on real data
        # 2. second stage decimating filter on mixed product to boost n_bandwidth
        # 3. final channel selecting filter at n_bandwidth/2

        # anecdotally don't decimate more than a factor of eight for stability

        self.decim_factors = [1] * 3
        self.filters = [None] * 3

        # first stage decimating filter
        # maximize first stage decimation:
        #     * minimize subsequent stages time taken
        #     * filter and decimate while signal is still real
        #     * first stage decimation cannot be too large or then 2omega signal from mixing will alias
        self.d1 = 1
        while (self.d1 < 8) and (2 * n_frequency <= 0.8 / self.d1) and (
                self.d1 < self.decimation_factor.value):
            self.d1 *= 2
            n_bandwidth *= 2
            n_frequency *= 2

        if self.d1 > 1:
            # create an anti-aliasing filter
            # pass-band to 0.8 * decimation factor; anecdotally single precision needs order <= 4 for stability
            b, a = scipy.signal.cheby1(4, 3, 0.8 / self.d1)
            b = np.float32(b)
            a = np.float32(a)
            self.decim_factors[0] = self.d1
            self.filters[0] = (b, a)

        # store decimated reference for mix down
        self.update_references(frequency)

        # second stage filter to bring n_bandwidth/2 up
        # decimation cannot be too large or will impinge on channel bandwidth (keep n_bandwidth/2 <= 0.8)
        self.d2 = 1
        while (self.d2 < 8) and (
            (self.d1 * self.d2) <
                self.decimation_factor.value) and (n_bandwidth / 2 <= 0.8):
            self.d2 *= 2
            n_bandwidth *= 2
            n_frequency *= 2

        if self.d2 > 1:
            # create an anti-aliasing filter
            # pass-band to 0.8 * decimation factor; anecdotally single precision needs order <= 4 for stability
            b, a = scipy.signal.cheby1(4, 3, 0.8 / self.d2)
            b = np.float32(b)
            a = np.float32(a)
            self.decim_factors[1] = self.d2
            self.filters[1] = (b, a)

        # final channel selection filter
        if n_bandwidth < 0.1:
            raise ValueError(
                "Insufficient decimation to achieve stable filter: {}.".format(
                    n_bandwidth))

        b, a = scipy.signal.cheby1(4, 3, n_bandwidth / 2)
        b = np.float32(b)
        a = np.float32(a)
        self.decim_factors[2] = self.decimation_factor.value // (self.d1 *
                                                                 self.d2)
        self.filters[2] = (b, a)

    def update_descriptors(self):
        logger.debug(
            'Updating Channelizer "%s" descriptors based on input descriptor: %s.',
            self.name, self.sink.descriptor)

        # extract record time sampling
        self.time_pts = self.sink.descriptor.axes[-1].points
        self.record_length = len(self.time_pts)
        self.time_step = self.time_pts[1] - self.time_pts[0]
        logger.debug("Channelizer time_step = {}".format(self.time_step))

        # We will be decimating along a time axis, which is always
        # going to be the last axis given the way we usually take data.
        # TODO: perform this function along a named axis rather than a numbered axis
        # in case something about this changes.

        # update output descriptors
        decimated_descriptor = DataStreamDescriptor()
        decimated_descriptor.axes = self.sink.descriptor.axes[:]
        decimated_descriptor.axes[-1] = deepcopy(self.sink.descriptor.axes[-1])
        decimated_descriptor.axes[-1].points = self.sink.descriptor.axes[
            -1].points[self.decimation_factor.value -
                       1::self.decimation_factor.value]
        decimated_descriptor.axes[
            -1].original_points = decimated_descriptor.axes[-1].points
        decimated_descriptor._exp_src = self.sink.descriptor._exp_src
        decimated_descriptor.dtype = np.complex64
        self.output_descriptor = decimated_descriptor
        for os in self.source.output_streams:
            os.set_descriptor(decimated_descriptor)
            if os.end_connector is not None:
                os.end_connector.update_descriptors()

    async def process_data(self, data):

        # Append any data carried from the last run
        if self.carry.size > 0:
            data = np.concatenate((self.carry, data))

        # This is the largest number of records we can handle
        num_records = data.size // self.record_length

        # This is the carryover that we'll store until next round.
        # If nothing is left then reset the carryover.
        remaining_points = data.size % self.record_length
        if remaining_points > 0:
            if num_records > 0:
                self.carry = data[-remaining_points:]
                data = data[:-remaining_points]
            else:
                self.carry = data
        else:
            self.carry = np.zeros(0, dtype=self.output_descriptor.dtype)

        if num_records > 0:
            # The records are processed in parallel after being reshaped here
            reshaped_data = np.reshape(data, (num_records, self.record_length),
                                       order="C")

            # Update demodulation frequency if necessary
            if self.follow_axis.value is not "":
                freq = self.demod_freqs[(self.idx % self.pts_before_freq_reset)
                                        // self.pts_before_freq_update]
                if freq != self.current_freq:
                    self.update_references(freq)
                    self.current_freq = freq

            self.idx += data.size

            # first stage decimating filter
            if self.filters[0] is None:
                filtered = reshaped_data
            else:
                stacked_coeffs = np.concatenate(self.filters[0])
                # filter
                if np.iscomplexobj(reshaped_data):
                    # TODO: compile complex versions of the IPP functions
                    filtered_r = np.empty_like(reshaped_data, dtype=np.float32)
                    filtered_i = np.empty_like(reshaped_data, dtype=np.float32)
                    libipp.filter_records_iir(
                        stacked_coeffs, self.filters[0][0].size - 1,
                        np.ascontiguousarray(
                            reshaped_data.real.astype(np.float32)),
                        self.record_length, num_records, filtered_r)
                    libipp.filter_records_iir(
                        stacked_coeffs, self.filters[0][0].size - 1,
                        np.ascontiguousarray(
                            reshaped_data.imag.astype(np.float32)),
                        self.record_length, num_records, filtered_i)
                    filtered = filtered_r + 1j * filtered_i
                    # decimate
                    if self.decim_factors[0] > 1:
                        filtered = filtered[:, ::self.decim_factors[0]]
                else:
                    filtered = np.empty_like(reshaped_data)
                    libipp.filter_records_iir(stacked_coeffs,
                                              self.filters[0][0].size - 1,
                                              reshaped_data,
                                              self.record_length, num_records,
                                              filtered)

                    # decimate
                    if self.decim_factors[0] > 1:
                        filtered = filtered[:, ::self.decim_factors[0]]

            # mix with reference
            # keep real and imaginary separate for filtering below
            if np.iscomplexobj(reshaped_data):
                filtered *= self.reference
                filtered_r = filtered.real
                filtered_i = filtered.imag
            else:
                filtered_r = self.reference_r * filtered
                filtered_i = self.reference_i * filtered

            # channel selection filters
            for ct in [1, 2]:
                if self.filters[ct] == None:
                    continue

                coeffs = self.filters[ct]
                stacked_coeffs = np.concatenate(self.filters[ct])
                out_r = np.empty_like(filtered_r).astype(np.float32)
                out_i = np.empty_like(filtered_i).astype(np.float32)
                libipp.filter_records_iir(
                    stacked_coeffs, self.filters[ct][0].size - 1,
                    np.ascontiguousarray(filtered_r.astype(np.float32)),
                    filtered_r.shape[-1], num_records, out_r)
                libipp.filter_records_iir(
                    stacked_coeffs, self.filters[ct][0].size - 1,
                    np.ascontiguousarray(filtered_i.astype(np.float32)),
                    filtered_i.shape[-1], num_records, out_i)

                # decimate
                if self.decim_factors[ct] > 1:
                    filtered_r = np.copy(out_r[:, ::self.decim_factors[ct]],
                                         order="C")
                    filtered_i = np.copy(out_i[:, ::self.decim_factors[ct]],
                                         order="C")
                else:
                    filtered_r = out_r
                    filtered_i = out_i

            filtered = filtered_r + 1j * filtered_i

            # recover gain from selecting single sideband
            filtered *= 2

            # push to ouptut connectors
            for os in self.source.output_streams:
                await os.push(filtered)