예제 #1
0
class X6StreamSelector(Filter):
    """Digital demodulation and filtering to select a particular frequency multiplexed channel"""

    sink   = InputConnector()
    source = OutputConnector()
    phys_channel  = IntParameter(value_range=(1,3), snap=1)
    dsp_channel   = IntParameter(value_range=(0,4), snap=1)
    stream_type   = Parameter(allowed_values=["Raw", "Demodulated", "Integrated"], default='Demodulated')

    def __init__(self, name=""):
        super(X6StreamSelector, self).__init__(name=name)
        self.stream_type.value = "Raw" # One of Raw, Demodulated, Integrated
        self.quince_parameters = [self.phys_channel, self.dsp_channel, self.stream_type]

    def get_descriptor(self, source_instr_settings, channel_settings):
        # Create a channel
        channel = X6Channel(channel_settings)

        descrip = DataStreamDescriptor()
        # If it's an integrated stream, then the time axis has already been eliminated.
        # Otherswise, add the time axis.
        if channel_settings['stream_type'] == 'Raw':
            samp_time = 4.0e-9
            descrip.add_axis(DataAxis("time", samp_time*np.arange(source_instr_settings['record_length']//4)))
            descrip.dtype = np.float64
        elif channel_settings['stream_type'] == 'Demodulated':
            samp_time = 32.0e-9
            descrip.add_axis(DataAxis("time", samp_time*np.arange(source_instr_settings['record_length']//32)))
            descrip.dtype = np.complex128
        else: # Integrated
            descrip.dtype = np.complex128

        return channel, descrip
예제 #2
0
class AlazarStreamSelector(Filter):
    """Digital demodulation and filtering to select a particular frequency multiplexed channel"""

    sink = InputConnector()
    source = OutputConnector()
    channel = IntParameter(value_range=(1, 2), snap=1)

    # def __init__(self, name=""):
    #     super(AlazarStreamSelector, self).__init__(name=name)
    # self.channel.value = 1 # Either 1 or 2
    # self.quince_parameters = [self.channel]

    def get_channel(self, channel_proxy):
        """Create and return a channel object corresponding to this stream selector"""
        return AlazarChannel(channel_proxy)

    def get_descriptor(self, stream_selector, receiver_channel):
        """Get the axis descriptor corresponding to this stream selector. For the Alazar cards this
        is always just a time axis."""
        samp_time = 1.0 / receiver_channel.receiver.sampling_rate
        descrip = DataStreamDescriptor()
        descrip.add_axis(
            DataAxis(
                "time",
                samp_time *
                np.arange(receiver_channel.receiver.record_length)))
        return descrip
예제 #3
0
class ProgressBar(Filter):
    """ Display progress bar(s) on the terminal/notebook.

    num: number of progress bars to be display, \
    corresponding to the number of axes (counting from outer most)

        For running in Jupyter Notebook:
    Needs to open '_tqdm_notebook.py',\
    search for 'n = int(s[:npos])'\
    then replace it with 'n = float(s[:npos])'
    """
    sink = InputConnector()
    def __init__(self, num=0, notebook=False):
        super(ProgressBar,self).__init__()
        self.num    = num
        self.notebook = notebook
        self.bars   = []
        self.w_id   = 0

    async def run(self):
        self.stream = self.sink.input_streams[0]
        axes = self.stream.descriptor.axes
        num_axes = len(axes)
        totals = [self.stream.descriptor.num_points_through_axis(axis) for axis in range(num_axes)]
        chunk_sizes = [max(1,self.stream.descriptor.num_points_through_axis(axis+1)) for axis in range(num_axes)]
        self.num = min(self.num, num_axes)

        self.bars   = []
        for i in range(self.num):
            if self.notebook:
                self.bars.append(tqdm_notebook(total=totals[i]/chunk_sizes[i]))
            else:
                self.bars.append(tqdm(total=totals[i]/chunk_sizes[i]))
        self.w_id   = 0
        while True:
            if self.stream.done() and self.w_id==self.stream.num_points():
                break

            new_data = np.array(await self.stream.queue.get()).flatten()
            while self.stream.queue.qsize() > 0:
                new_data = np.append(new_data, np.array(self.stream.queue.get_nowait()).flatten())
            self.w_id += new_data.size
            num_data = self.stream.points_taken
            for i in range(self.num):
                if num_data == 0:
                    if self.notebook:
                        self.bars[i].sp(close=True)
                        # Reset the progress bar with a new one
                        self.bars[i] = tqdm_notebook(total=totals[i]/chunk_sizes[i])
                    else:
                        # Reset the progress bar with a new one
                        self.bars[i].close()
                        self.bars[i] = tqdm(total=totals[i]/chunk_sizes[i])
                pos = int(10*num_data / chunk_sizes[i])/10.0 # One decimal is good enough
                if pos > self.bars[i].n:
                    self.bars[i].update(pos - self.bars[i].n)
                num_data = num_data % chunk_sizes[i]
예제 #4
0
파일: io.py 프로젝트: ocakgun/Auspex
class WriteToFile(Filter):
    """Writes data to file using the Auspex container type, which is a simple directory structure
    with subdirectories, binary datafiles, and json meta files that store the axis descriptors
    and other information."""

    sink = InputConnector()
    filename = FilenameParameter()
    groupname = Parameter(default='main')

    def __init__(self,
                 filename=None,
                 groupname=None,
                 datasetname='data',
                 **kwargs):
        super(WriteToFile, self).__init__(**kwargs)
        if filename:
            self.filename.value = filename
        if groupname:
            self.groupname.value = groupname
        if datasetname:
            self.datasetname = datasetname

        self.ret_queue = None  # MP queue For returning data

    def final_init(self):
        assert self.filename.value, "Filename never supplied to writer."
        assert self.groupname.value, "Groupname never supplied to writer."
        assert self.datasetname, "Dataset name never supplied to writer."

        self.descriptor = self.sink.input_streams[0].descriptor
        self.container = AuspexDataContainer(self.filename.value)
        self.group = self.container.new_group(self.groupname.value)
        self.mmap = self.container.new_dataset(self.groupname.value,
                                               self.datasetname,
                                               self.descriptor)

        self.w_idx = 0
        self.points_taken = 0

    def get_data_while_running(self, return_queue):
        """Return data to the main thread or user as requested. Use a MP queue to transmit."""
        assert not self.done.is_set(), Exception(
            "Experiment is over and filter done. Please use get_data")
        self.return_queue.put(np.array(self.mmap))

    def get_data(self):
        assert self.done.is_set(), Exception(
            "Experiment is still running. Please use get_data_while_running")
        container = AuspexDataContainer(self.filename.value)
        return container.open_dataset(self.groupname.value, self.datasetname)

    def process_data(self, data):
        # Write the data
        self.mmap[self.w_idx:self.w_idx + data.size] = data
        self.w_idx += data.size
        self.points_taken = self.w_idx
예제 #5
0
파일: debug.py 프로젝트: ocakgun/Auspex
class Passthrough(Filter):
    sink   = InputConnector()
    source = OutputConnector()

    def __init__(self, *args, **kwargs):
        super(Passthrough, self).__init__(*args, **kwargs)

    def process_data(self, data):
        for os in self.source.output_streams:
            os.push(data)
예제 #6
0
파일: debug.py 프로젝트: ocakgun/Auspex
class Print(Filter):
    """Debug printer that prints data comming through filter"""

    sink = InputConnector()

    def __init__(self, *args, **kwargs):
        super(Print, self).__init__(*args, **kwargs)

    def process_data(self, data):
        logger.debug('%s "%s" received points: %s', self.__class__.__name__, self.name, data)
예제 #7
0
class Correlator(ElementwiseFilter):
    sink        = InputConnector()
    source      = OutputConnector()
    filter_name = "Correlator"

    def operation(self):
        return np.multiply

    def unit(self, base_unit):
        return base_unit + "^{}".format(len(self.sink.input_streams))
예제 #8
0
    def __init__(self, name=None, **kwargs):
        self.name = name
        self.input_connectors = {}
        self.output_connectors = {}
        self.parameters = {}
        self.experiment = None # Keep a reference to the parent experiment

        # For objectively measuring doneness
        self.finished_processing = False

        # For signaling to Quince that something is wrong
        self.out_of_spec = False

        for ic in self._input_connectors:
            a = InputConnector(name=ic, parent=self)
            a.parent = self
            self.input_connectors[ic] = a
            setattr(self, ic, a)
        for oc in self._output_connectors:
            a = OutputConnector(name=oc, parent=self)
            a.parent = self
            self.output_connectors[oc] = a
            setattr(self, oc, a)
        for param in self._parameters:
            a = copy.deepcopy(param)
            a.parent = self
            self.parameters[param.name] = a
            setattr(self, param.name, a)
예제 #9
0
class MeshPlotter(Filter):
    sink = InputConnector()
    plot_mode = Parameter(
        allowed_values=["real", "imag", "real/imag", "amp/phase", "quad"],
        default="quad")

    def __init__(self,
                 *args,
                 name="",
                 plot_mode=None,
                 x_label="",
                 y_label="",
                 **plot_args):
        super(MeshPlotter, self).__init__(*args, name=name)
        if plot_mode:
            self.plot_mode.value = plot_mode
        self.plot_args = plot_args
        self.update_interval = 0.5
        self.last_update = time.time()
        self.x_label = x_label
        self.y_label = y_label

        self.quince_parameters = [self.plot_mode]

        # This will hold the matplot server
        self.plot_server = None

    def desc(self):
        d = {
            'plot_type': 'mesh',
            'plot_mode': self.plot_mode.value,
            'x_label': self.x_label,
            'y_label': self.y_label
        }
        return d

    def update_descriptors(self):
        logger.info(
            "Updating MeshPlotter %s descriptors based on input descriptor %s",
            self.name, self.sink.descriptor)

    def final_init(self):
        pass

    async def process_direct(self, data):
        self.plot_server.send(self.name, data)

    async def on_done(self):
        self.plot_server.send(self.name, np.array([]), msg="done")
        time.sleep(0.1)
예제 #10
0
파일: io.py 프로젝트: ocakgun/Auspex
class DataBuffer(Filter):
    """Writes data to IO."""

    sink = InputConnector()

    def __init__(self, **kwargs):
        super(DataBuffer, self).__init__(**kwargs)
        self._final_buffer = Queue()
        self._temp_buffer = Queue()
        self._get_buffer = Event()
        self.final_buffer = None

    def final_init(self):
        self.w_idx = 0
        self.points_taken = 0
        self.descriptor = self.sink.input_streams[0].descriptor
        self.buff = np.empty(self.descriptor.expected_num_points(),
                             dtype=self.descriptor.dtype)

    def checkin(self):
        if self._get_buffer.is_set():
            self._temp_buffer.put(self.buff)
        self._get_buffer.clear()

    def process_data(self, data):
        # Write the data
        self.buff[self.w_idx:self.w_idx + data.size] = data
        self.w_idx += data.size
        self.points_taken = self.w_idx

    def main(self):
        super(DataBuffer, self).main()
        self._final_buffer.put(self.buff)

    def get_data(self):
        if self.done.is_set():
            if self.final_buffer is None:
                self.final_buffer = self._final_buffer.get()
            time.sleep(0.05)
            return np.reshape(self.final_buffer,
                              self.descriptor.dims()), self.descriptor
        else:
            self._get_buffer.set()
            temp_buffer = self._temp_buffer.get()
            time.sleep(0.05)
            return np.reshape(temp_buffer,
                              self.descriptor.dims()), self.descriptor
예제 #11
0
class X6StreamSelector(Filter):
    """Digital demodulation and filtering to select a particular frequency multiplexed channel"""

    sink = InputConnector()
    source = OutputConnector()

    channel = IntParameter(value_range=(1, 3), snap=1)
    dsp_channel = IntParameter(value_range=(0, 4), snap=1)
    stream_type = Parameter(
        allowed_values=["raw", "demodulated", "integrated"],
        default='demodulated')

    # def __init__(self, name=""):
    #     super(X6StreamSelector, self).__init__(name=name)
    # self.stream_type.value = "Raw" # One of Raw, Demodulated, Integrated
    # self.quince_parameters = [self.channel, self.dsp_channel, self.stream_type]

    def get_channel(self, channel_proxy):
        """Create and return a channel object corresponding to this stream selector"""
        return X6Channel(channel_proxy)

    def get_descriptor(self, stream_selector, receiver_channel):
        """Get the axis descriptor corresponding to this stream selector. If it's an integrated stream,
        then the time axis has already been eliminated. Otherswise, add the time axis."""
        descrip = DataStreamDescriptor()
        if stream_selector.stream_type == 'raw':
            samp_time = 4.0e-9
            descrip.add_axis(
                DataAxis(
                    "time",
                    samp_time *
                    np.arange(receiver_channel.receiver.record_length // 4)))
            descrip.dtype = np.float64
        elif stream_selector.stream_type == 'demodulated':
            samp_time = 32.0e-9
            descrip.add_axis(
                DataAxis(
                    "time",
                    samp_time *
                    np.arange(receiver_channel.receiver.record_length // 32)))
            descrip.dtype = np.complex128
        else:  # Integrated
            descrip.dtype = np.complex128
        return descrip
예제 #12
0
class AlazarStreamSelector(Filter):
    """Digital demodulation and filtering to select a particular frequency multiplexed channel"""

    sink    = InputConnector()
    source  = OutputConnector()
    channel = IntParameter(value_range=(1,2), snap=1)

    def __init__(self, name=""):
        super(AlazarStreamSelector, self).__init__(name=name)
        self.channel.value = 1 # Either 1 or 2
        self.quince_parameters = [self.channel]

    def get_descriptor(self, source_instr_settings, channel_settings):
        channel = AlazarChannel(channel_settings)

        # Add the time axis
        samp_time = 1.0/source_instr_settings['sampling_rate']
        descrip = DataStreamDescriptor()
        descrip.add_axis(DataAxis("time", samp_time*np.arange(source_instr_settings['record_length'])))
        return channel, descrip
예제 #13
0
class DummydigStreamSelector(Filter):

    sink = InputConnector()
    source = OutputConnector()
    channel = IntParameter(value_range=(1, 2), snap=1)

    def __init__(self, name=""):
        super(DummydigStreamSelector, self).__init__(name=name)
        self.channel.value = 1  # Either 1 or 2
        self.quince_parameters = [self.channel]

    def get_descriptor(self, source_instr_settings, channel_settings):
        channel = DummydigChannel(channel_settings)

        # Add the time axis
        samp_time = 1.0 / source_instr_settings['sampling_rate']
        descrip = DataStreamDescriptor()
        descrip.add_axis(
            DataAxis(
                "time",
                samp_time * np.arange(source_instr_settings['record_length'])))
        return channel, descrip
예제 #14
0
파일: filter.py 프로젝트: ocakgun/Auspex
    def __init__(self, name=None, **kwargs):
        super(Filter, self).__init__()
        self.filter_name = name
        self.input_connectors = {}
        self.output_connectors = {}
        self.parameters = {}
        self.qubit_name = ""

        # Event for killing the filter properly
        self.exit = Event()
        self.done = Event()

        # Keep track of data throughput
        self.processed = 0

        # For objectively measuring doneness
        self.finished_processing = Event()
        self.finished_processing.clear()

        for ic in self._input_connectors:
            a = InputConnector(name=ic, parent=self)
            a.parent = self
            self.input_connectors[ic] = a
            setattr(self, ic, a)
        for oc in self._output_connectors:
            a = OutputConnector(name=oc, parent=self)
            a.parent = self
            self.output_connectors[oc] = a
            setattr(self, oc, a)
        for param in self._parameters:
            a = copy.deepcopy(param)
            a.parent = self
            self.parameters[param.name] = a
            setattr(self, param.name, a)

        # For sending performance information
        self.last_performance_update = datetime.datetime.now()
        self.beginning = datetime.datetime.now()
        self.perf_queue = None
예제 #15
0
class KernelIntegrator(Filter):

    sink = InputConnector()
    source = OutputConnector()
    kernel = Parameter()
    bias = FloatParameter(default=0.0)
    simple_kernel = BoolParameter(default=True)
    box_car_start = FloatParameter(default=0.0)
    box_car_stop = FloatParameter(default=100e-9)
    demod_frequency = FloatParameter(default=0.0)
    """Integrate with a given kernel. Kernel will be padded/truncated to match record length"""
    def __init__(self, **kwargs):
        super(KernelIntegrator, self).__init__(**kwargs)
        self.pre_int_op = None
        self.post_int_op = None
        for k, v in kwargs.items():
            if hasattr(self, k) and isinstance(getattr(self, k), Parameter):
                getattr(self, k).value = v
        if "pre_integration_operation" in kwargs:
            self.pre_int_op = kwargs["pre_integration_operation"]
        if "post_integration_operation" in kwargs:
            self.post_int_op = kwargs["post_integration_operation"]
        # self.quince_parameters = [self.simple_kernel, self.demod_frequency, self.box_car_start, self.box_car_stop]

    def update_descriptors(self):
        if not self.simple_kernel and self.kernel.value is None:
            raise ValueError("Integrator was passed kernel None")

        logger.debug(
            'Updating KernelIntegrator "%s" descriptors based on input descriptor: %s.',
            self.filter_name, self.sink.descriptor)

        record_length = self.sink.descriptor.axes[-1].num_points()

        if self.kernel.value:
            if os.path.exists(
                    os.path.join(config.KernelDir,
                                 self.kernel.value + '.txt')):
                kernel = np.loadtxt(
                    os.path.join(config.KernelDir, self.kernel.value + '.txt'),
                    dtype=complex,
                    converters={
                        0: lambda s: complex(s.decode().replace('+-', '-'))
                    })
            else:
                try:
                    kernel = eval(self.kernel.value.encode('unicode_escape'))
                except:
                    raise ValueError(
                        'Kernel invalid. Provide a file name or an expression to evaluate'
                    )
            if self.simple_kernel.value:
                logger.warning(
                    "Using specified kernel. To use a box car filter instead, clear kernel.value"
                )

        elif self.simple_kernel.value:
            time_pts = self.sink.descriptor.axes[-1].points
            time_step = time_pts[1] - time_pts[0]
            kernel = np.zeros(record_length, dtype=np.complex128)
            sample_start = int(self.box_car_start.value / time_step)
            sample_stop = int(self.box_car_stop.value / time_step) + 1
            kernel[sample_start:sample_stop] = 1.0
            # add modulation
            kernel *= np.exp(2j * np.pi * self.demod_frequency.value *
                             time_pts)
        else:
            raise ValueError(
                'Kernel invalid. Either provide a file name or an expression to evaluate or set simple_kernel.value to true'
            )
        # pad or truncate the kernel to match the record length
        if kernel.size < record_length:
            self.aligned_kernel = np.append(
                kernel,
                np.zeros(record_length - kernel.size, dtype=np.complex128))
        else:
            self.aligned_kernel = np.resize(kernel, record_length)

        # Integrator reduces and removes axis on output stream
        # update output descriptors
        output_descriptor = DataStreamDescriptor()
        # TODO: handle reduction to single point
        output_descriptor.axes = self.sink.descriptor.axes[:-1]
        output_descriptor._exp_src = self.sink.descriptor._exp_src
        output_descriptor.dtype = np.complex128
        for ost in self.source.output_streams:
            ost.set_descriptor(output_descriptor)
            ost.end_connector.update_descriptors()

    def process_data(self, data):

        # TODO: handle variable partial records
        if self.pre_int_op:
            data = self.pre_int_op(data)
        filtered = np.inner(np.reshape(data, (-1, len(self.aligned_kernel))),
                            self.aligned_kernel)
        if self.post_int_op:
            filtered = self.post_int_op(filtered)
        # push to ouptut connectors
        for os in self.source.output_streams:
            os.push(filtered)
예제 #16
0
파일: plot.py 프로젝트: ocakgun/Auspex
class Plotter(Filter):
    sink = InputConnector()
    plot_dims = IntParameter(value_range=(0, 1, 2), snap=1,
                             default=0)  # 0 means auto
    plot_mode = Parameter(
        allowed_values=["real", "imag", "real/imag", "amp/phase", "quad"],
        default="quad")

    def __init__(self,
                 *args,
                 name="",
                 plot_dims=None,
                 plot_mode=None,
                 **plot_args):
        super(Plotter, self).__init__(*args, name=name)
        if plot_dims:
            self.plot_dims.value = plot_dims
        if plot_mode:
            self.plot_mode.value = plot_mode
        self.plot_args = plot_args
        self.full_update_interval = 0.5
        self.update_interval = 2.0  # slower for partial updates
        self.last_update = time.time()
        self.last_full_update = time.time()

        self._final_buffer = Queue()
        self.final_buffer = None

        self.quince_parameters = [self.plot_dims, self.plot_mode]

        # Unique id for plot server
        self.uuid = None

        # Should we actually produce plots?
        self.do_plotting = True

    def send(self, message):
        if self.do_plotting:
            data = message['data']
            msg = message['msg']
            name = message['name']

            msg_contents = [self.uuid.encode(), msg.encode(), name.encode()]

            # We might be sending multiple axes, series, etc.
            # Just add them succesively to a multipart message.
            for dat in data:
                md = dict(
                    dtype=str(dat.dtype),
                    shape=dat.shape,
                )
                msg_contents.extend(
                    [json.dumps(md).encode(),
                     np.ascontiguousarray(dat)])
            self.socket.send_multipart(msg_contents)

    def get_final_plot(self, quad_funcs=[np.abs, np.angle]):
        if not self.done.is_set():
            raise Exception(
                "Cannot get final plot since plotter is not done or was not run."
            )

        from bqplot import LinearScale, ColorScale, ColorAxis, Axis, Lines, Figure, Tooltip, HeatMap
        from bqplot.toolbar import Toolbar
        from ipywidgets import VBox, HBox

        if self.final_buffer is None:
            self.final_buffer = self._final_buffer.get()
        if self.plot_dims.value == 2:
            raise NotImplementedError(
                "2 dimensional get_final_plot not yet implemented.")
        elif self.plot_dims.value == 1:
            figs = []
            for quad_func in quad_funcs:
                sx = LinearScale()
                sy = LinearScale()
                ax = Axis(label=self.axis_label(-1), scale=sx)
                ay = Axis(
                    label=
                    f"{self.descriptor.data_name} ({self.descriptor.data_unit})",
                    scale=sy,
                    orientation='vertical')
                line = Lines(x=self.x_values,
                             y=quad_func(self.final_buffer),
                             scales={
                                 'x': sx,
                                 'y': sy
                             })
                fig = Figure(marks=[line],
                             axes=[ax, ay],
                             title=self.filter_name)
                figs.append(fig)
        if len(figs) <= 2:
            return HBox(figs)
        elif len(figs) == 4:
            return VBox([HBox([figs[0], figs[1]]), HBox([figs[2], figs[3]])])
        elif len(figs) == 3 or len(figs) > 4:
            raise Exception("Please use 1, 2, or 4 quadrature functions.")

    def desc(self):
        d = {
            'plot_type':
            'standard',
            'plot_mode':
            self.plot_mode.value,
            'plot_dims':
            int(self.plot_dims.value),
            'x_min':
            float(min(self.x_values)),
            'x_max':
            float(max(self.x_values)),
            'x_len':
            int(self.descriptor.axes[-1].num_points()),
            'x_label':
            self.axis_label(-1),
            'y_label':
            "{} ({})".format(self.descriptor.data_name,
                             self.descriptor.data_unit)
        }
        if self.plot_dims.value == 2:
            d['y_label'] = self.axis_label(-2)
            d['data_label'] = "{} ({})".format(self.descriptor.data_name,
                                               self.descriptor.data_unit)
            d['y_min'] = float(min(self.y_values))
            d['y_max'] = float(max(self.y_values))
            d['y_len'] = int(self.descriptor.axes[-2].num_points())
        return d

    def set_done(self):
        self.send({
            'name': self.filter_name,
            'data': [np.array([])],
            "msg": "done"
        })

    def set_quit(self):
        self.send({
            'name': self.filter_name,
            'data': [np.array([])],
            "msg": "quit"
        })

    def update_descriptors(self):
        logger.debug(
            "Updating Plotter %s descriptors based on input descriptor %s",
            self.filter_name, self.sink.descriptor)
        self.stream = self.sink.input_streams[0]
        self.descriptor = self.sink.descriptor

    def final_init(self):
        # Determine the plot dimensions
        if not self.plot_dims.value:
            if len(self.descriptor.axes) > 1:
                self.plot_dims.value = 2
            else:
                self.plot_dims.value = 1

        # Check the descriptor axes
        num_axes = len(self.descriptor.axes)
        if self.plot_dims.value > num_axes:
            logger.info(
                "Cannot plot in more dimensions than there are data axes.")
            self.plot_dims.value = num_axes

        if self.plot_dims.value == 1:
            self.points_before_clear = self.descriptor.axes[-1].num_points()
        else:
            self.points_before_clear = self.descriptor.axes[-1].num_points(
            ) * self.descriptor.axes[-2].num_points()
        logger.debug("Plot will clear after every %d points.",
                     self.points_before_clear)

        self.x_values = self.descriptor.axes[-1].points

        if self.plot_dims.value == 2:
            self.y_values = self.descriptor.axes[-2].points

        #I'm so sorry everyone. Send Julia
        if 'complex' in np.dtype(self.descriptor.dtype).name:
            self.plot_buffer = (
                np.nan * np.ones(self.points_before_clear) +
                1.0j * np.nan * np.ones(self.points_before_clear)).astype(
                    self.descriptor.dtype)
        else:
            self.plot_buffer = np.nan * np.ones(self.points_before_clear)
        self.idx = 0

    def execute_on_run(self):
        # Connect to the plot server
        if self.do_plotting:
            try:
                self.context = zmq.Context()
                self.socket = self.context.socket(zmq.DEALER)
                self.socket.identity = f"Auspex_Experiment_{self.filter_name}_{hex(id(self))}".encode(
                )
                self.socket.connect("tcp://localhost:7762")
            except:
                logger.warning(
                    "Exception occured while contacting the plot server. Is it running?"
                )

    def update(self):
        if self.plot_dims.value == 1:
            self.send({
                'name': self.filter_name,
                'msg': 'data',
                'data': [self.x_values, self.plot_buffer.copy()]
            })
        elif self.plot_dims.value == 2:
            self.send({
                'name':
                self.filter_name,
                'msg':
                'data',
                'data':
                [self.x_values, self.y_values,
                 self.plot_buffer.copy()]
            })

    def process_data(self, data):
        # If we get more than enough data, pause to update the plot if necessary
        if (self.idx + data.size) > self.points_before_clear:
            spill_over = (self.idx + data.size) % self.points_before_clear
            if spill_over == 0:
                spill_over = self.points_before_clear
            if (time.time() - self.last_full_update >=
                    self.full_update_interval):
                # If we are getting data quickly, then we can afford to wait
                # for a full frame before pushing to plot.
                self.plot_buffer[self.idx:] = data[:(self.points_before_clear -
                                                     self.idx)]
                self.update()
                self.last_full_update = time.time()
            self.plot_buffer[:] = np.nan
            self.plot_buffer[:spill_over] = data[-spill_over:]
            self.idx = spill_over
        else:  # just keep trucking
            self.plot_buffer[self.idx:self.idx + data.size] = data.flatten()
            self.idx += data.size
            if (time.time() - max(self.last_full_update, self.last_update) >=
                    self.update_interval):
                self.update()
                self.last_update = time.time()

    def on_done(self):
        if self.plot_dims.value == 1:
            self.send({
                'name': self.filter_name,
                "msg": "data",
                'data': [self.x_values, self.plot_buffer.copy()],
            })
        elif self.plot_dims.value == 2:
            self.send({
                'name':
                self.filter_name,
                "msg":
                "data",
                'data':
                [self.x_values, self.y_values,
                 self.plot_buffer.copy()]
            })
        self._final_buffer.put(self.plot_buffer)
        if self.do_plotting:
            self.set_done()
            self.socket.close()
            self.context.term()

    def axis_label(self, index):
        unit_str = " ({})".format(self.descriptor.axes[index].unit
                                  ) if self.descriptor.axes[index].unit else ''
        return self.descriptor.axes[index].name + unit_str
예제 #17
0
class SingleShotMeasurement(Filter):

    save_kernel = BoolParameter(default=False)
    optimal_integration_time = BoolParameter(default=False)
    set_threshold = BoolParameter(default=False)
    zero_mean = BoolParameter(default=False)
    logistic_regression = BoolParameter(default=False)

    sink = InputConnector()
    source = OutputConnector() # Single shot fidelity

    TOLERANCE = 1e-3

    def __init__(self, save_kernel=False, optimal_integration_time=False,
                    zero_mean=False, set_threshold=False,
                    logistic_regression=False, **kwargs):
        super(SingleShotMeasurement, self).__init__(**kwargs)
        if len(kwargs) > 0:
            self.save_kernel.value = save_kernel
            self.optimal_integration_time.value = optimal_integration_time
            self.zero_mean.value = zero_mean
            self.set_threshold.value = set_threshold
            self.logistic_regression.value = logistic_regression

        self.quince_parameters = [self.save_kernel, self.optimal_integration_time,
            self.zero_mean, self.set_threshold, self.logistic_regression]

        self.pdf_data_queue = Queue() #Output queue
        self.fidelity       = self.source

    def update_descriptors(self):

        logger.debug("Updating Plotter %s descriptors based on input descriptor %s", self.filter_name, self.sink.descriptor)
        self.stream = self.sink.input_streams[0]
        self.descriptor = self.sink.descriptor
        try:
            self.time_pts = self.descriptor.axes[self.descriptor.axis_num("time")].points
            self.record_length = len(self.time_pts)
        except ValueError:
            raise ValueError("Single shot filter sink does not appear to have a time axis!")
        self.num_averages = len(self.sink.descriptor.axes[self.descriptor.axis_num("averages")].points)
        self.num_segments = len(self.sink.descriptor.axes[self.descriptor.axis_num("segment")].points)
        self.ground_data = np.zeros((self.record_length, self.num_averages), dtype=np.complex)
        self.excited_data = np.zeros((self.record_length, self.num_averages), dtype=np.complex)
        self.total_points = self.num_segments*self.record_length*self.num_averages # Total points BEFORE sweep axes

        output_descriptor = DataStreamDescriptor()
        output_descriptor.axes = [_ for _ in self.descriptor.axes if type(_) is SweepAxis]
        output_descriptor._exp_src = self.sink.descriptor._exp_src
        output_descriptor.dtype = np.complex128

        if len(output_descriptor.axes) == 0:
            output_descriptor.add_axis(DataAxis("Fidelity", [1]))

        for os in self.fidelity.output_streams:
            os.set_descriptor(output_descriptor)
            os.end_connector.update_descriptors()


    def final_init(self):
        self.fid_buffer = np.empty(self.record_length*self.num_averages*self.num_segments, dtype=np.complex)
        self.idx = 0

    def process_data(self, data):
        """Fill the ground and excited data bins"""

        self.fid_buffer[self.idx:self.idx+len(data)] = data
        self.idx += len(data)

        if self.idx == self.record_length*self.num_averages*self.num_segments:
            self.idx = 0
            reshaped = self.fid_buffer.reshape(self.record_length, -1, order='F')
            self.ground_data = reshaped[:, ::2]
            self.excited_data = reshaped[:, 1::2]
            self.compute_filter()
            if self.logistic_regression.value:
                self.logistic_fidelity()
            if self.save_kernel.value:
                self._save_kernel()
            for os in self.fidelity.output_streams:
                os.push(self.fidelity_result)
            self.pdf_data_queue.put(self.pdf_data)

    def compute_filter(self):
        """Compute the single shot kernel and obtain single-shot measurement
        fidelity.

        Expects that the data will be in self.ground_data and self.excited_data,
        which are (T, N)-shaped numpy arrays, with T the time axis and N the
        number of shots."""
        #get excited and ground state data
        try:
            ground_mean = np.mean(self.ground_data, axis=1)
            excited_mean = np.mean(self.excited_data, axis=1)
        except AttributeError:
            raise Exception("Single shot filter does not appear to have any data!")
        distance = np.abs(np.mean(ground_mean - excited_mean))
        bias = np.mean(ground_mean + excited_mean) / distance
        logger.debug("Found single-shot measurement distance: {} and bias {}.".format(distance, bias))
        #construct matched filter kernel
        old_settings = np.seterr(divide='ignore', invalid='ignore')
        kernel = np.nan_to_num(np.divide(np.conj(ground_mean - excited_mean), np.var(self.ground_data, ddof=1, axis=1)))
        np.seterr(**old_settings)
        #sets kernel to zero when difference is too small, and prevents
        #kernel from diverging when var->0 at beginning of record_length
        kernel = np.multiply(kernel, np.greater(np.abs(ground_mean - excited_mean), self.TOLERANCE * distance))
        #subtract offset to cancel low-frequency fluctuations when integrating
        #raw data (not demod)
        if self.zero_mean.value:
            kernel = kernel - np.mean(kernel)
        logger.debug("Found single shot filter norm: {}.".format(np.sum(np.abs(kernel))))
        #annoyingly numpy's isreal has the opposite behavior to MATLAB's
        if not np.any(np.imag(kernel) > np.finfo(np.complex128).eps):
            #construct analytic signal from Hilbert transform
            kernel = hilbert(np.real(kernel))
        #normalize between -1 and 1
        kernel = kernel / np.amax(np.hstack([np.abs(np.real(kernel)), np.abs(np.imag(kernel))]))
        #apply matched filter
        weighted_ground = self.ground_data * kernel[:, np.newaxis]
        weighted_excited = self.excited_data * kernel[:, np.newaxis]

        if self.optimal_integration_time.value:
            #take cumulative sum up to each time step
            ground_I = np.real(weighted_ground)
            ground_Q = np.imag(weighted_ground)
            excited_I = np.real(weighted_excited)
            excited_Q = np.imag(weighted_excited)
            int_ground_I = np.cumsum(ground_I, axis=0)
            int_ground_Q = np.cumsum(ground_Q, axis=0)
            int_excited_I = np.cumsum(excited_I, axis=0)
            int_excited_Q = np.cumsum(excited_Q, axis=0)
            I_mins = np.amin(np.minimum(int_ground_I, int_excited_I), axis=1)
            I_maxes = np.amax(np.maximum(int_ground_I, int_excited_I), axis=1)
            num_times = int_ground_I.shape[0]
            fidelities = np.zeros((num_times, ))
            #Loop through each integration point; estimate the CDF and
            #then calculate best measurement fidelity
            for pt in range(num_times):
                bins = np.linspace(I_mins[pt], I_maxes[pt], 100)
                g_PDF = np.histogram(int_ground_I[pt, :], bins)[0]
                e_PDF = np.histogram(int_excited_I[pt,:], bins)[0]
                fidelities[pt] = np.sum(np.abs(g_PDF - e_PDF)) / np.sum(g_PDF + e_PDF)
            best_idx = fidelities.argmax(axis=0)
            self.best_integration_time = best_idx
            logger.info("Found best integration time at {} out of {} decimated points.".format(best_idx, num_times))
            #redo calculation with KDEs to get a more accurate estimate
            bins = np.linspace(I_mins[best_idx], I_maxes[best_idx], 100)
            g_KDE = gaussian_kde(int_ground_I[best_idx, :])
            e_KDE = gaussian_kde(int_excited_I[best_idx, :])
            g_PDF = g_KDE(bins)
            e_PDF = e_KDE(bins)
        else:
            ground_I = np.sum(np.real(weighted_ground), axis=0)
            ground_Q = np.sum(np.imag(weighted_excited), axis=0)
            excited_I = np.sum(np.real(weighted_excited), axis=0)
            excited_Q = np.sum(np.imag(weighted_excited), axis=0)
            I_min = np.amin(np.minimum(ground_I, excited_I))
            I_max = np.amax(np.maximum(ground_I, excited_I))
            bins = np.linspace(I_min, I_max, 100)
            g_KDE = gaussian_kde(ground_I)
            e_KDE = gaussian_kde(excited_I)
            g_PDF = g_KDE(bins)
            e_PDF = e_KDE(bins)

        self.kernel = kernel
        max_F_I = 1 - 0.5 * (1 - 0.5 * (bins[2] - bins[1]) * np.sum(np.abs(g_PDF - e_PDF)))
        self.pdf_data = {"Max I Fidelity": max_F_I,
                         "I Bins": bins,
                         "Ground I PDF": g_PDF,
                         "Excited I PDF": e_PDF}

        if self.set_threshold.value:
            indmax = (np.abs(np.cumsum(g_PDF / np.sum(g_PDF))
                        - np.cumsum(e_PDF / np.sum(e_PDF)))).argmax(axis=0)
            self.pdf_data["I Threshold"] = bins[indmax]
            logger.info("Single shot kernel found I threshold at {}.".format(bins[indmax]))

        if self.optimal_integration_time.value:
            mu_g, sigma_g = norm.fit(int_ground_I[best_idx, :])
            mu_e, sigma_e = norm.fit(int_excited_I[best_idx, :])
        else:
            mu_g, sigma_g = norm.fit(ground_I)
            mu_e, sigma_e = norm.fit(excited_I)
        self.pdf_data["Ground I Gaussian PDF"] = norm.pdf(bins, mu_g, sigma_g)
        self.pdf_data["Excited I Gaussian PDF"] = norm.pdf(bins, mu_e, sigma_e)

        #calculate kernel density estimates for other quadrature
        if self.optimal_integration_time.value:
            Q_min = np.amin([int_ground_Q[best_idx,:], int_excited_Q[best_idx,:]])
            Q_max = np.argmax([int_ground_Q[best_idx,:], int_excited_Q[best_idx,:]])
            qbins = np.linspace(Q_min, Q_max, 100)
            g_KDE = gaussian_kde(int_ground_Q[best_idx, :])
            e_KDE = gaussian_kde(int_excited_Q[best_idx, :])
        else:
            qbins = np.linspace(np.amin([ground_Q, excited_Q]), np.amax([ground_Q, excited_Q]), 100)
            g_KDE = gaussian_kde(ground_Q)
            e_KDE = gaussian_kde(excited_Q)
        self.pdf_data["Q Bins"] = qbins
        g_PDF_Q = g_KDE(qbins)
        e_PDF_Q = e_KDE(qbins)
        self.pdf_data["Ground Q PDF"] =  g_PDF_Q
        self.pdf_data["Excited Q PDF"] =  e_PDF_Q
        self.pdf_data["Max Q Fidelity"] = 1 - 0.5 * (1 - 0.5 * (qbins[2] - qbins[1]) * np.sum(np.abs(g_PDF_Q - e_PDF_Q)))

        if self.optimal_integration_time.value:
            mu_g, sigma_g = norm.fit(int_ground_Q[best_idx, :])
            mu_e, sigma_e = norm.fit(int_excited_Q[best_idx, :])
        else:
            mu_g, sigma_g = norm.fit(ground_Q)
            mu_e, sigma_e = norm.fit(excited_Q)
        self.pdf_data["Ground Q Gaussian PDF"] = norm.pdf(bins, mu_g, sigma_g)
        self.pdf_data["Excited Q Gaussian PDF"] = norm.pdf(bins, mu_e, sigma_e)

        self.fidelity_result = self.pdf_data["Max I Fidelity"] + 1j * self.pdf_data["Max Q Fidelity"]
        logger.info("Single shot fidelity filter found: {}".format(self.fidelity_result))

    def logistic_fidelity(self):
        #group data and assign state labels
        gnd_features = np.hstack([np.real(self.ground_data.T),
                                np.imag(self.ground_data.T)])
        ex_features = np.hstack([np.real(self.excited_data.T),
                                np.imag(self.excited_data.T)])
        #liblinear wants arrays in C order
        features = np.ascontiguousarray(np.vstack([gnd_features, ex_features]))
        state = np.ascontiguousarray(np.hstack([np.zeros(self.ground_data.shape[1]),
                                                np.ones(self.excited_data.shape[1])]))
        #Set up logistic regression with cross-validation using liblinear.
        #Cs sets the inverse of the regularization strength, which will be optimized
        #through cross-validation. Uses the default Stratified K-Folds
        #CV generator, with 3 folds.
        #This is set up to be as consistent with the MATLAB implementation
        #as I can make it. --GJR
        Cs = np.logspace(-1,2,5)
        logreg = LogisticRegressionCV(Cs, cv=3, solver='liblinear')
        logreg.fit(features, state) #fit the model
        predictions = logreg.predict(features) #in-place classification
        score = logreg.score(features,state) #mean accuracy of classification
        N = len(predictions)
        S = np.sum(predictions == state) #how many we got right
        #now calculate confidence intervals
        c = 0.95
        flo = betaincinv(S+1, N-S+1, (1-c)/2., )
        fhi = betaincinv(S+1, N-S+1, (1+c)/2., )
        logger.info(("In-place logistic regression fidelity: " +
                "{:.2f}% ({:.2f}, {:.2f})".format(100*score, 100*flo, 100*fhi)))

    def _save_kernel(self):
        import QGL.config as qconfig
        if not qconfig.KernelDir or not os.path.exists(qconfig.KernelDir):
            logger.warning("No kernel directory provided, please set auspex.config.KernelDir")
            logger.warning("Saving kernel to local directory.")
            dir = "./"
        else:
            dir = qconfig.KernelDir
        try:
            logger.info(self.filter_name)
            filename = self.filter_name + "_kernel.txt"
            header = "Single shot fidelity filter - {}:\nSource: {}".format(time.strftime("%m/%d/%y -- %H:%M"), self.filter_name)
            np.savetxt(os.path.join(dir, filename), self.kernel, header=header, comments="#")
        except (AttributeError, IOError) as ex:
            raise AttributeError("Could not save single shot fidelity kernel!") from ex
예제 #18
0
class WindowIntegrator(Filter):
    """
    Allow a kernel from the set {'chebwin', 'blackman', 'slepian',
    'boxcar'} to be set for the duration of the start and stop values.

    YAML parameters are:
    type: WindowIntegrator
    source: Demod-q1
    kernel_type: 'chebwin'
    start: 5.0e-07
    stop: 9.0e-07

    See: https://docs.scipy.org/doc/scipy/reference/signal.html for more
    details on the filters specifics.
    """

    sink = InputConnector()
    source = OutputConnector()
    bias = FloatParameter(default=0.0)
    kernel_type = Parameter(default='boxcar', allowed_values=['chebwin',\
        'blackman', 'slepian', 'boxcar'])
    start = FloatParameter(default=0.0)
    stop = FloatParameter(default=100e-9)
    frequency = FloatParameter(default=0.0)
    """Integrate with a given kernel. Kernel will be padded/truncated to match record length"""
    def __init__(self, **kwargs):
        super(WindowIntegrator, self).__init__(**kwargs)
        self.pre_int_op = None
        self.post_int_op = None
        for k, v in kwargs.items():
            if hasattr(self, k) and isinstance(getattr(self, k), Parameter):
                getattr(self, k).value = v
        if "pre_integration_operation" in kwargs:
            self.pre_int_op = kwargs["pre_integration_operation"]
        if "post_integration_operation" in kwargs:
            self.post_int_op = kwargs["post_integration_operation"]
        self.quince_parameters = [
            self.kernel_type, self.frequency, self.start, self.stop
        ]

    def update_descriptors(self):
        if not self.kernel_type:
            raise ValueError("Integrator was passed kernel None")

        logger.debug(
            'Updating WindowIntegrator "%s" descriptors based on input descriptor: %s.',
            self.name, self.sink.descriptor)

        record_length = self.sink.descriptor.axes[-1].num_points()

        time_pts = self.sink.descriptor.axes[-1].points
        time_step = time_pts[1] - time_pts[0]
        kernel = np.zeros(record_length, dtype=np.complex128)
        sample_start = int(self.box_car_start.value / time_step)
        sample_stop = int(self.box_car_stop.value / time_step) + 1
        if self.kernel_type == 'boxcar':
            kernel[sample_start:sample_stop] = 1.0
        elif self.kernel_type == 'chebwin':
            # create a Dolph-Chebyshev window with 100 dB attenuation
            kernel[sample_start:sample_stop] = \
                chebwin(sample_start-sample_stop, at=100)
        elif self.kernel_type == 'blackman':
            kernel[sample_start:sample_stop] = \
                blackman(sample_start-sample_stop)
        elif self.kernel_type == 'slepian':
            # create a Slepian window with 0.2 bandwidth
            kernel[sample_start:sample_stop] = \
                slepian(sample_start-sample_stop, width=0.2)

        # add modulation
        kernel *= np.exp(2j * np.pi * self.frequency.value * time_step *
                         time_pts)

        # pad or truncate the kernel to match the record length
        if kernel.size < record_length:
            self.aligned_kernel = np.append(
                kernel,
                np.zeros(record_length - kernel.size, dtype=np.complex128))
        else:
            self.aligned_kernel = np.resize(kernel, record_length)

        # Integrator reduces and removes axis on output stream
        # update output descriptors
        output_descriptor = DataStreamDescriptor()
        # TODO: handle reduction to single point
        output_descriptor.axes = self.sink.descriptor.axes[:-1]
        output_descriptor._exp_src = self.sink.descriptor._exp_src
        output_descriptor.dtype = np.complex128
        for os in self.source.output_streams:
            os.set_descriptor(output_descriptor)
            os.end_connector.update_descriptors()

    async def process_data(self, data):

        # TODO: handle variable partial records
        if self.pre_int_op:
            data = self.pre_int_op(data)
        filtered = np.inner(np.reshape(data, (-1, len(self.aligned_kernel))),
                            self.aligned_kernel)
        if self.post_int_op:
            filtered = self.post_int_op(filtered)
        # push to ouptut connectors
        for os in self.source.output_streams:
            await os.push(filtered)
예제 #19
0
class Framer(Filter):
    """Mete out data in increments defined by the specified axis."""

    sink   = InputConnector()
    source = OutputConnector()
    axis   = Parameter()

    def __init__(self, axis=None, **kwargs):
        super(Framer, self).__init__(**kwargs)
        self.axis.value = axis
        self.points_before_final_average   = None
        self.points_before_partial_average = None
        self.sum_so_far = None
        self.num_averages = None

        self.quince_parameters = [self.axis]

    def final_init(self):
        descriptor_in = self.sink.descriptor
        names = [a.name for a in descriptor_in.axes]

        self.axis.allowed_values = names

        if self.axis.value is None:
            self.axis.value = descriptor_in.axes[0].name

        # Convert named axes to an index
        if self.axis.value not in names:
            raise ValueError("Could not find axis {} within the DataStreamDescriptor {}".format(self.axis.value, descriptor_in))
        self.axis_num = descriptor_in.axis_num(self.axis.value)
        logger.debug("Framing on axis #%d: %s", self.axis_num, self.axis.value)

        # Find how many points we want to spit out at a time
        self.data_dims = descriptor_in.data_dims()
        if self.axis_num == len(descriptor_in.axes) - 1:
            raise Exception("Framer has refused to frame along single points.")
        else:
            self.frame_points = descriptor_in.num_points_through_axis(self.axis_num+1)

        logger.debug("Points before emitting frame: %s.", self.frame_points)

        # For storing carryover if getting uneven buffers
        self.idx = 0
        self.carry = np.zeros(0, dtype=self.sink.descriptor.dtype)

    def process_data(self, data):
        # Append any data carried from the last run
        if self.carry.size > 0:
            data = np.concatenate((self.carry, data))

        # This is the largest number of frames we can emit for the time being
        num_frames = data.size // self.frame_points

        # This is the carryover that we'll store until next round.
        # If nothing is left then reset the carryover.
        remaining_points = data.size % self.frame_points
        if remaining_points > 0:
            if num_frames > 0:
                self.carry = data[-remaining_points:]
                data = data[:-remaining_points]
            else:
                self.carry = data
        else:
            self.carry = np.zeros(0, dtype=self.sink.descriptor.dtype)

        if num_frames > 0:
            for i in range(num_frames):
                for os in self.source.output_streams:
                    os.push(data[i*self.frame_points:(i+1)*self.frame_points])
예제 #20
0
파일: plot.py 프로젝트: ocakgun/Auspex
class MeshPlotter(Filter):
    sink = InputConnector()
    plot_mode = Parameter(
        allowed_values=["real", "imag", "real/imag", "amp/phase", "quad"],
        default="quad")

    def __init__(self,
                 *args,
                 name="",
                 plot_mode=None,
                 x_label="",
                 y_label="",
                 **plot_args):
        super(MeshPlotter, self).__init__(*args, name=name)
        if plot_mode:
            self.plot_mode.value = plot_mode
        self.plot_args = plot_args
        self.update_interval = 0.5
        self.last_update = time.time()
        self.x_label = x_label
        self.y_label = y_label

        self.quince_parameters = [self.plot_mode]

        # Unique id for plot server
        self.uuid = None

        # Should we actually produce plots?
        self.do_plotting = True

    def desc(self):
        d = {
            'plot_type': 'mesh',
            'plot_mode': self.plot_mode.value,
            'x_label': self.x_label,
            'y_label': self.y_label
        }
        return d

    def send(self, message):
        if self.do_plotting:
            data = message['data']
            msg = message['msg']
            name = message['name']

            msg_contents = [self.uuid.encode(), msg.encode(), name.encode()]

            # We might be sending multiple axes, series, etc.
            # Just add them succesively to a multipart message.
            for dat in data:
                md = dict(
                    dtype=str(dat.dtype),
                    shape=dat.shape,
                )
                msg_contents.extend(
                    [json.dumps(md).encode(),
                     np.ascontiguousarray(dat)])
            self.socket.send_multipart(msg_contents)

    def update_descriptors(self):
        logger.info(
            "Updating MeshPlotter %s descriptors based on input descriptor %s",
            self.filter_name, self.sink.descriptor)

    def execute_on_run(self):
        # Connect to the plot server
        if self.do_plotting:
            try:
                self.context = zmq.Context()
                self.socket = self.context.socket(zmq.DEALER)
                self.socket.identity = "Auspex_Experiment".encode()
                self.socket.connect("tcp://localhost:7762")
            except:
                logger.warning(
                    "Exception occured while contacting the plot server. Is it running?"
                )

    def process_direct(self, data):
        self.send({
            'name': self.filter_name,
            "msg": "data",
            'data': [self.plot_buffer.copy()]
        })

    def on_done(self):
        self.send({
            'name': self.filter_name,
            'data': [np.array([])],
            "msg": "done"
        })
        if self.do_plotting:
            self.socket.close()
            self.context.term()
예제 #21
0
class DataBuffer(Filter):
    """Writes data to IO."""

    sink = InputConnector()

    def __init__(self, store_tuples=True, **kwargs):
        super(DataBuffer, self).__init__(**kwargs)
        self.quince_parameters = []
        self.sink.max_input_streams = 100
        self.store_tuples = store_tuples

    def final_init(self):
        self.buffers = {s: np.empty(s.descriptor.expected_num_points(), dtype=s.descriptor.dtype) for s in self.sink.input_streams}
        self.w_idxs  = {s: 0 for s in self.sink.input_streams}

    async def run(self):
        self.finished_processing = False
        streams = self.sink.input_streams

        for s in streams[1:]:
            if not np.all(s.descriptor.expected_tuples() == streams[0].descriptor.expected_tuples()):
                raise ValueError("Multiple streams connected to DataBuffer must have matching descriptors.")

        self.descriptor = streams[0].descriptor

        # Buffers for stream data
        stream_data = {s: np.zeros(0, dtype=self.sink.descriptor.dtype) for s in streams}

        # Store whether streams are done
        stream_done = {s: False for s in streams}

        while True:

            futures = {
                asyncio.ensure_future(stream.queue.get()): stream
                for stream in streams
            }

            # Deal with non-equal number of messages using timeout
            responses, pending = await asyncio.wait(futures, return_when=asyncio.FIRST_COMPLETED, timeout=2.0)

            # Construct the inverse lookup, results in {stream: result}
            stream_results = {futures[res]: res.result() for res in list(responses)}

            # Cancel the futures
            for pend in list(pending):
                pend.cancel()

            # Add any new data to the
            for stream, message in stream_results.items():
                message_type = message['type']
                message_data = message['data']
                message_comp = message['compression']
                message_data = pickle.loads(zlib.decompress(message_data)) if message_comp == 'zlib' else message_data
                message_data = message_data if hasattr(message_data, 'size') else np.array([message_data])
                if message_type == 'event':
                    if message['event_type'] == 'done':
                        stream_done[stream] = True
                    elif message['event_type'] == 'refined':
                        # Single we don't have much structure here we simply
                        # create a new buffer and paste the old buffer into it
                        old_buffer = self.buffers[stream]
                        new_size   = stream.descriptor.num_points()
                        self.buffers[stream] = np.empty(stream.descriptor.num_points(), dtype=stream.descriptor.dtype)
                        self.buffers[stream][:old_buffer.size] = old_buffer

                elif message_type == 'data':
                    stream_data[stream] = message_data.flatten()

            if False not in stream_done.values():
                logger.debug('%s "%s" is done', self.__class__.__name__, self.name)
                break

            for stream in stream_results.keys():
                data = stream_data[stream]

                self.buffers[stream][self.w_idxs[stream]:self.w_idxs[stream]+data.size] = data
                self.w_idxs[stream] += data.size

            # If we have gotten all our data and process_data has returned, then we are done!
            if np.all([v.done() for v in self.input_connectors.values()]):
                self.finished_processing = True

    def get_data(self):
        streams = self.sink.input_streams
        desc = streams[0].descriptor

        # Set the dtype for the parameter columns
        if self.store_tuples:
            dtype = desc.axis_data_type(with_metadata=True)
        else:
            dtype = []

        # Extend the dtypes for each data column
        for stream in streams:
            dtype.append((stream.descriptor.data_name, stream.descriptor.dtype))
        data = np.empty(self.buffers[streams[0]].size, dtype=dtype)

        if self.store_tuples:
            tuples = desc.tuples(as_structured_array=True)
            for a in desc.axis_names(with_metadata=True):
                data[a] = tuples[a]
        for stream in streams:
            data[stream.descriptor.data_name] = self.buffers[stream]
        return data

    def get_descriptor(self):
        return self.sink.input_streams[0].descriptor
예제 #22
0
class WriteToHDF5(Filter):
    """Writes data to file."""

    sink = InputConnector()
    filename = FilenameParameter()
    groupname = Parameter(default='main')
    add_date = BoolParameter(default = False)
    save_settings = BoolParameter(default = True)

    def __init__(self, filename=None, groupname=None, add_date=False, save_settings=True, compress=True, store_tuples=True, exp_log=True, **kwargs):
        super(WriteToHDF5, self).__init__(**kwargs)
        self.compress = compress
        if filename:
            self.filename.value = filename
        if groupname:
            self.groupname.value = groupname
        self.points_taken = 0
        self.file = None
        self.group = None
        self.store_tuples = store_tuples
        self.create_group = True
        self.up_to_date = False
        self.sink.max_input_streams = 100
        self.add_date.value = add_date
        self.save_settings.value = save_settings
        self.exp_log = exp_log
        self.quince_parameters = [self.filename, self.groupname, self.add_date, self.save_settings]

    def final_init(self):
        if not self.filename.value:
            raise Exception("Filename never supplied to writer.")
        # If self.file is still None, then we need to create
        # the file object. Otherwise, we presume someone has
        # already set it up for us.
        if not self.file:
            self.file = self.new_file()

    def new_filename(self):
        filename = self.filename.value
        basename, ext = os.path.splitext(filename)
        if ext == "":
            logger.debug("Filename for writer {} does not have an extension -- using default '.h5'".format(self.name))
            ext = ".h5"

        dirname = os.path.dirname(os.path.abspath(filename))

        if self.add_date.value:
            date     = time.strftime("%y%m%d")
            dirname  = os.path.join(dirname, date)
            basename = os.path.join(dirname, os.path.basename(basename))

        # Set the file number to the maximum in the current folder + 1
        filenums = []
        if os.path.exists(dirname):
            for f in os.listdir(dirname):
                if ext in f:
                    filenums += [int(re.findall('-(\d{4})\.', f)[0])] if os.path.isfile(os.path.join(dirname, f)) else []

        i = max(filenums) + 1 if filenums else 0
        return "{}-{:04d}{}".format(basename,i,ext)

    def new_file(self):
        """ Open a new data file to write """
        # Close the current file, if any
        if self.file is not None:
            try:
                self.file.close()
            except Exception as e:
                logger.error("Encounter exception: {}".format(e))
                logger.error("Cannot close file '{}'. File may be damaged.".format(self.file.filename))
        # Get new file name
        self.filename.value = self.new_filename()
        head = os.path.dirname(self.filename.value)
        head = os.path.normpath(head)
        dirs = head.split(os.sep)
        # Check if path exists. If not, create new one(s).
        os.makedirs(head, exist_ok=True)
        logger.debug("Create new data file: %s." % self.filename.value)
        # Copy current settings to a folder with the file name
        if self.save_settings.value:
            # just move copies to a new directory
            self.save_yaml()
        if self.exp_log:
            self.write_to_log()
        return h5py.File(self.filename.value, 'w', libver='latest')

    def write_to_log(self):
        """ Record the experiment in a log file """
        logfile = os.path.join(config.LogDir, "experiment_log.tsv")
        if os.path.isfile(logfile):
            lf = pd.read_csv(logfile, sep="\t")
        else:
            logger.info("Experiment log file created.")
            lf = pd.DataFrame(columns = ["Filename", "Date", "Time"])
        lf = lf.append(pd.DataFrame([[self.filename.value, time.strftime("%y%m%d"), time.strftime("%H:%M:%S")]],columns=["Filename", "Date", "Time"]),ignore_index=True)
        lf.to_csv(logfile, sep = "\t", index = False)

    def save_yaml(self):
        """ Save a copy of current experiment settings """
        head = os.path.dirname(self.filename.value)
        fulldir = os.path.splitext(self.filename.value)[0]
        if not os.path.exists(fulldir):
            os.makedirs(fulldir)
            config.yaml_dump(config.yaml_load(config.configFile), os.path.join(fulldir, os.path.split(config.configFile)[1]), flatten = True)

    def save_yaml_h5(self):
        """ Save a copy of current experiment settings in the h5 metadata"""
        header = self.file.create_group("header")
        # load them dump to get the 'include' information
        header.attrs['settings'] = config.yaml_dump(config.yaml_load(config.configFile), flatten = True)

    async def run(self):
        self.finished_processing = False
        streams    = self.sink.input_streams
        stream     = streams[0]

        for s in streams[1:]:
            if not np.all(s.descriptor.expected_tuples() == streams[0].descriptor.expected_tuples()):
                raise ValueError("Multiple streams connected to writer must have matching descriptors.")

        desc       = stream.descriptor
        axes       = desc.axes
        params     = desc.params
        axis_names = desc.axis_names(with_metadata=True)

        self.file.attrs['exp_src'] = desc._exp_src
        num_axes   = len(axes)

        if desc.is_adaptive() and not self.store_tuples:
            raise Exception("Cannot omit writing tuples with an adaptive sweep... please enabled store_tuples.")

        if self.store_tuples:
            # All of the combinations for the present values of the sweep parameters only
            tuples          = desc.expected_tuples(with_metadata=True, as_structured_array=True)
        expected_length = desc.expected_num_points()

        compression = 'gzip' if self.compress else None

        # If desired, create the group in which the dataset and axes will reside
        if self.create_group:
            self.group = self.file.create_group(self.groupname.value)
        else:
            self.group = self.file

        self.data_group = self.group.create_group("data")

        # If desired, push experimental metadata into the h5 file
        if self.save_settings.value and 'header' not in self.file.keys(): # only save header once for multiple writers
            self.save_yaml_h5()

        # Create datasets for each stream
        dset_for_streams = {}
        for stream in streams:
            dset = self.data_group.create_dataset(stream.descriptor.data_name, (expected_length,),
                                        dtype=stream.descriptor.dtype,
                                        chunks=True, maxshape=(None,),
                                        compression=compression)
            dset.attrs['is_data'] = True
            dset.attrs['store_tuples'] = self.store_tuples
            dset.attrs['name'] = stream.descriptor.data_name
            dset_for_streams[stream] = dset

        # Write params into attrs
        for k,v in params.items():
            if k not in axis_names:
                self.data_group.attrs[k] = v

        # Create a table for the DataStreamDescriptor
        ref_dtype = h5py.special_dtype(ref=h5py.Reference)
        self.descriptor = self.group.create_dataset("descriptor", (len(axes),), dtype=ref_dtype)
        for k,v in desc.metadata.items():
            self.descriptor.attrs[k] = v

        # Create axis data sets for storing the base axes as well as the
        # full set of tuples. For the former we add
        # references to the descriptor.
        tuple_dset_for_axis_name = {}
        for i, a in enumerate(axes):
            if a.unstructured:
                name = "+".join(a.name)
            else:
                name = a.name

            if a.unstructured:
                # Create another reference table to refer to the constituent axes
                unstruc_ref_dset = self.group.create_dataset(name, (len(a.name),), dtype=ref_dtype)
                unstruc_ref_dset.attrs['unstructured'] = True

                for j, (col_name, col_unit) in enumerate(zip(a.name, a.unit)):
                    # Create table to store the axis value independently for each column
                    unstruc_dset = self.group.create_dataset(col_name, (a.num_points(),), dtype=a.dtype)
                    unstruc_ref_dset[j] = unstruc_dset.ref
                    unstruc_dset[:] = a.points[:,j]
                    unstruc_dset.attrs['unit'] = col_unit
                    unstruc_dset.attrs['name'] = col_name

                    # This stores the values taking during the experiment sweeps
                    if self.store_tuples:
                        dset = self.data_group.create_dataset(col_name, (expected_length,), dtype=a.dtype,
                                                             chunks=True, compression=compression, maxshape=(None,) )
                        dset.attrs['unit'] = col_unit
                        dset.attrs['is_data'] = False
                        dset.attrs['name'] = col_name
                        tuple_dset_for_axis_name[col_name] = dset

                self.descriptor[i] = self.group[name].ref
            else:
                # This stores the axis values
                self.group.create_dataset(name, (a.num_points(),), dtype=a.dtype, maxshape=(None,) )
                self.group[name].attrs['unstructured'] = False
                self.group[name][:] = a.points
                self.group[name].attrs['unit'] = "None" if a.unit is None else a.unit
                self.group[name].attrs['name'] = a.name
                self.descriptor[i] = self.group[name].ref

                # This stores the values taking during the experiment sweeps
                if self.store_tuples:
                    dset = self.data_group.create_dataset(name, (expected_length,), dtype=a.dtype,
                                                          chunks=True, compression=compression, maxshape=(None,) )
                    dset.attrs['unit'] = "None" if a.unit is None else a.unit
                    dset.attrs['is_data'] = False
                    dset.attrs['name'] = name
                    tuple_dset_for_axis_name[name] = dset

            # Give the reader some warning about the usefulness of these axes
            self.group[name].attrs['was_refined'] = False

            if a.metadata is not None:
                # Create the axis table for the metadata
                dset = self.group.create_dataset(name + "_metadata", (a.metadata.size,), dtype=np.uint8, maxshape=(None,) )
                dset[:] = a.metadata
                dset = self.group.create_dataset(name + "_metadata_enum", (a.metadata_enum.size,), dtype='S128', maxshape=(None,) )
                dset[:] = np.asarray(a.metadata_enum, dtype='S128')

                # Associate the metadata with the data axis
                self.group[name].attrs['metadata'] = self.group[name + "_metadata"].ref
                self.group[name].attrs['metadata_enum'] = self.group[name + "_metadata_enum"].ref
                self.group[name].attrs['name'] = name + "_metadata"

                # Create the dataset that stores the individual tuple values
                if self.store_tuples:
                    dset = self.data_group.create_dataset(name + "_metadata" , (expected_length,),
                                                          dtype=np.uint8, maxshape=(None,) )
                    dset.attrs['name'] = name + "_metadata"
                    tuple_dset_for_axis_name[name + "_metadata"] = dset

        # Write all the tuples if this isn't adaptive
        if self.store_tuples:
            if not desc.is_adaptive():
                for i, a in enumerate(axis_names):
                    tuple_dset_for_axis_name[a][:] = tuples[a]

        # Write pointer
        w_idx = 0

        while True:
            # Wait for all of the acquisition to complete
            # Against at least some peoples rational expectations, asyncio.wait doesn't return Futures
            # in the order of the iterable it was passed, but perhaps just in order of completion. So,
            # we construct a dictionary in order that that can be mapped back where we need them:
            futures = {
                asyncio.ensure_future(stream.queue.get()): stream
                for stream in streams
            }

            responses, _ = await asyncio.wait(futures)

            # Construct the inverse lookup
            response_for_stream = {futures[res]: res for res in list(responses)}
            messages = [response_for_stream[stream].result() for stream in streams]

            # Ensure we aren't getting different types of messages at the same time.
            message_types = [m['type'] for m in messages]
            try:
                if len(set(message_types)) > 1:
                    raise ValueError("Writer received concurrent messages with different message types {}".format([m['type'] for m in messages]))
            except:
                import ipdb; ipdb.set_trace()

            # Infer the type from the first message
            message_type = messages[0]['type']

            # If we receive a message
            if message_type == 'event':
                logger.debug('%s "%s" received event of type "%s"', self.__class__.__name__, self.name, message_type)
                if messages[0]['event_type'] == 'done':
                    break
                elif messages[0]['event_type'] == 'refined':
                    refined_axis = messages[0]['data']

                    # Resize the data set
                    num_new_points = desc.num_new_points_through_axis(refined_axis)
                    for stream in streams:
                        dset_for_streams[stream].resize((len(dset_for_streams[streams[0]])+num_new_points,))

                    if self.store_tuples:
                        for an in axis_names:
                            tuple_dset_for_axis_name[an].resize((len(tuple_dset_for_axis_name[an])+num_new_points,))

                    # Generally speaking the descriptors are now insufficient to reconstruct
                    # the full set of tuples. The user should know this, so let's mark the
                    # descriptor axes accordingly.
                    self.group[name].attrs['was_refined'] = True


            elif message_type == 'data':
                message_data = [message['data'] for message in messages]
                message_comp = [message['compression'] for message in messages]
                message_data = [pickle.loads(zlib.decompress(dat)) if comp == 'zlib' else dat for comp, dat in zip(message_comp, message_data)]
                message_data = [dat if hasattr(dat, 'size') else np.array([dat]) for dat in message_data]  # Convert single values to arrays

                for ii in range(len(message_data)):
                    if not hasattr(message_data[ii], 'size'):
                        message_data[ii] = np.array([message_data[ii]])
                    message_data[ii] = message_data[ii].flatten()
                    if message_data[ii].size != message_data[0].size:
                        raise ValueError("Writer received data of unequal length.")

                logger.debug('%s "%s" received %d points', self.__class__.__name__, self.name, message_data[0].size)
                logger.debug("Now has %d of %d points.", stream.points_taken, stream.num_points())

                self.up_to_date = (w_idx == dset_for_streams[streams[0]].len())

                # Write the data
                for s, d in zip(streams, message_data):
                    dset_for_streams[s][w_idx:w_idx+d.size] = d

                # Write the coordinate tuples
                if self.store_tuples:
                    if desc.is_adaptive():
                        tuples = desc.tuples()
                        for axis_name in axis_names:
                            tuple_dset_for_axis_name[axis_name][w_idx:w_idx+d.size] = tuples[axis_name][w_idx:w_idx+d.size]

                self.file.flush()
                w_idx += message_data[0].size
                self.points_taken = w_idx

                logger.debug("HDF5: Write index at %d", w_idx)
                logger.debug("HDF5: %s has written %d points", stream.name, w_idx)

            # If we have gotten all our data and process_data has returned, then we are done!
            if np.all([v.done() for v in self.input_connectors.values()]):
                self.finished_processing = True
예제 #23
0
class KernelIntegrator(Filter):

    sink = InputConnector()
    source = OutputConnector()
    kernel = Parameter()
    bias = FloatParameter(default=0.0)
    simple_kernel = BoolParameter(default=True)
    box_car_start = FloatParameter(default=0.0)
    box_car_stop = FloatParameter(default=100e-9)
    frequency = FloatParameter(default=0.0)
    """Integrate with a given kernel. Kernel will be padded/truncated to match record length"""
    def __init__(self, **kwargs):
        super(KernelIntegrator, self).__init__(**kwargs)
        self.pre_int_op = None
        self.post_int_op = None
        for k, v in kwargs.items():
            if hasattr(self, k) and isinstance(getattr(self, k), Parameter):
                getattr(self, k).value = v
        if "pre_integration_operation" in kwargs:
            self.pre_int_op = kwargs["pre_integration_operation"]
        if "post_integration_operation" in kwargs:
            self.post_int_op = kwargs["post_integration_operation"]
        self.quince_parameters = [
            self.simple_kernel, self.frequency, self.box_car_start,
            self.box_car_stop
        ]

    def update_descriptors(self):
        if not self.simple_kernel and self.kernel.value is None:
            raise ValueError("Integrator was passed kernel None")

        logger.debug(
            'Updating KernelIntegrator "%s" descriptors based on input descriptor: %s.',
            self.name, self.sink.descriptor)

        record_length = self.sink.descriptor.axes[-1].num_points()
        if self.simple_kernel.value:
            time_pts = self.sink.descriptor.axes[-1].points
            time_step = time_pts[1] - time_pts[0]
            kernel = np.zeros(record_length, dtype=np.complex128)
            sample_start = int(self.box_car_start.value / time_step)
            sample_stop = int(self.box_car_stop.value / time_step) + 1
            kernel[sample_start:sample_stop] = 1.0
            # add modulation
            kernel *= np.exp(2j * np.pi * self.frequency.value * time_step *
                             time_pts)
        else:
            kernel = eval(self.kernel.value.encode('unicode_escape'))
        # pad or truncate the kernel to match the record length
        if kernel.size < record_length:
            self.aligned_kernel = np.append(
                kernel,
                np.zeros(record_length - kernel.size, dtype=np.complex128))
        else:
            self.aligned_kernel = np.resize(kernel, record_length)

        # Integrator reduces and removes axis on output stream
        # update output descriptors
        output_descriptor = DataStreamDescriptor()
        # TODO: handle reduction to single point
        output_descriptor.axes = self.sink.descriptor.axes[:-1]
        output_descriptor._exp_src = self.sink.descriptor._exp_src
        output_descriptor.dtype = np.complex128
        for os in self.source.output_streams:
            os.set_descriptor(output_descriptor)
            os.end_connector.update_descriptors()

    async def process_data(self, data):

        # TODO: handle variable partial records
        if self.pre_int_op:
            data = self.pre_int_op(data)
        filtered = np.inner(np.reshape(data, (-1, len(self.aligned_kernel))),
                            self.aligned_kernel)
        if self.post_int_op:
            filtered = self.post_int_op(filtered)
        # push to ouptut connectors
        for os in self.source.output_streams:
            await os.push(filtered)
예제 #24
0
class Averager(Filter):
    """Takes data and collapses along the specified axis."""

    sink = InputConnector()
    partial_average = OutputConnector()
    final_average = OutputConnector()
    final_variance = OutputConnector()
    axis = Parameter()

    def __init__(self, axis=None, **kwargs):
        super(Averager, self).__init__(**kwargs)
        self.axis.value = axis
        self.points_before_final_average = None
        self.points_before_partial_average = None
        self.sum_so_far = None
        self.num_averages = None

        self.quince_parameters = [self.axis]

        # Rate limiting for partial averages
        self.last_update = time.time()
        self.update_interval = 0.5

    def update_descriptors(self):
        logger.debug(
            'Updating averager "%s" descriptors based on input descriptor: %s.',
            self.name, self.sink.descriptor)
        descriptor_in = self.sink.descriptor
        names = [a.name for a in descriptor_in.axes]

        self.axis.allowed_values = names

        if self.axis.value is None:
            self.axis.value = descriptor_in.axes[0].name

        # Convert named axes to an index
        if self.axis.value not in names:
            raise ValueError(
                "Could not find axis {} within the DataStreamDescriptor {}".
                format(self.axis.value, descriptor_in))
        self.axis_num = descriptor_in.axis_num(self.axis.value)
        logger.debug("Averaging over axis #%d: %s", self.axis_num,
                     self.axis.value)

        self.data_dims = descriptor_in.data_dims()
        if self.axis_num == len(descriptor_in.axes) - 1:
            logger.debug("Performing scalar average!")
            self.points_before_partial_average = 1
            self.avg_dims = [1]
        else:
            self.points_before_partial_average = descriptor_in.num_points_through_axis(
                self.axis_num + 1)
            self.avg_dims = self.data_dims[self.axis_num + 1:]

        # If we get multiple final average simultaneously
        self.reshape_dims = self.data_dims[self.axis_num:]
        if self.axis_num > 0:
            self.reshape_dims = [-1] + self.reshape_dims
        self.mean_axis = self.axis_num - len(self.data_dims)

        self.points_before_final_average = descriptor_in.num_points_through_axis(
            self.axis_num)
        logger.debug("Points before partial average: %s.",
                     self.points_before_partial_average)
        logger.debug("Points before final average: %s.",
                     self.points_before_final_average)
        logger.debug("Data dimensions are %s", self.data_dims)
        logger.debug("Averaging dimensions are %s", self.avg_dims)

        # Define final axis descriptor
        descriptor = descriptor_in.copy()
        self.num_averages = descriptor.pop_axis(self.axis.value).num_points()
        logger.debug("Number of partial averages is %d", self.num_averages)

        self.sum_so_far = np.zeros(self.avg_dims, dtype=descriptor.dtype)
        self.current_avg_frame = np.zeros(self.points_before_final_average,
                                          dtype=descriptor.dtype)
        self.partial_average.descriptor = descriptor
        self.final_average.descriptor = descriptor

        # We can update the visited_tuples upfront if none
        # of the sweeps are adaptive...
        desc_out_dtype = descriptor_in.axis_data_type(
            with_metadata=True, excluding_axis=self.axis.value)
        if not descriptor_in.is_adaptive():
            vals = [
                a.points_with_metadata() for a in descriptor_in.axes
                if a.name != self.axis.value
            ]
            nested_list = list(itertools.product(*vals))
            flattened_list = [
                tuple((val for sublist in line for val in sublist))
                for line in nested_list
            ]
            descriptor.visited_tuples = np.core.records.fromrecords(
                flattened_list, dtype=desc_out_dtype)
        else:
            descriptor.visited_tuples = np.empty((0), dtype=desc_out_dtype)

        for stream in self.partial_average.output_streams + self.final_average.output_streams:
            stream.set_descriptor(descriptor)
            stream.end_connector.update_descriptors()

        # Define variance axis descriptor
        descriptor_var = descriptor_in.copy()
        descriptor_var.data_name = "Variance"
        descriptor_var.pop_axis(self.axis.value)
        if descriptor_var.unit:
            descriptor_var.unit = descriptor_var.unit + "^2"
        descriptor_var.metadata["num_averages"] = self.num_averages
        self.final_variance.descriptor = descriptor_var

        if not descriptor_in.is_adaptive():
            descriptor_var.visited_tuples = np.core.records.fromrecords(
                flattened_list, dtype=desc_out_dtype)
        else:
            descriptor_var.visited_tuples = np.empty((0), dtype=desc_out_dtype)

        for stream in self.final_variance.output_streams:
            stream.set_descriptor(descriptor_var)
            stream.end_connector.update_descriptors()

    def final_init(self):
        if self.points_before_final_average is None:
            raise Exception(
                "Average has not been initialized. Run 'update_descriptors'")

        self.completed_averages = 0
        self.idx_frame = 0
        self.idx_global = 0
        # We only need to accumulate up to the averaging axis
        # BUT we may get something longer at any given time!
        self.carry = np.zeros(0, dtype=self.final_average.descriptor.dtype)

    async def process_data(self, data):

        # TODO: handle unflattened data separately
        if len(data.shape) > 1:
            data = data.flatten()
        #handle single points
        elif not isinstance(data, np.ndarray) and (data.size == 1):
            data = np.array([data])

        if self.carry.size > 0:
            data = np.concatenate((self.carry, data))
            self.carry = np.zeros(0, dtype=self.final_average.descriptor.dtype)

        idx = 0
        while idx < data.size:
            #check whether we have enough data to fill an averaging frame
            if data.size - idx >= self.points_before_final_average:
                # How many chunks can we process at once?
                num_chunks = int(
                    (data.size - idx) / self.points_before_final_average)
                new_points = num_chunks * self.points_before_final_average
                reshaped = data[idx:idx + new_points].reshape(
                    self.reshape_dims)
                averaged = reshaped.mean(axis=self.mean_axis)
                idx += new_points

                if self.sink.descriptor.is_adaptive():
                    new_tuples = self.sink.descriptor.tuples(
                    )[self.idx_global:self.idx_global + new_points]
                    new_tuples_stripped = remove_fields(
                        new_tuples, self.axis.value)
                    take_axis = -1 if self.axis_num > 0 else 0
                    reduced_tuples = new_tuples_stripped.reshape(
                        self.reshape_dims).take((0, ), axis=take_axis)
                    self.idx_global += new_points

                # Add to Visited tuples
                if self.sink.descriptor.is_adaptive():
                    for os in self.final_average.output_streams + self.final_variance.output_streams + self.partial_average.output_streams:
                        os.descriptor.visited_tuples = np.append(
                            os.descriptor.visited_tuples, reduced_tuples)

                for os in self.final_average.output_streams:
                    await os.push(averaged)

                for os in self.final_variance.output_streams:
                    await os.push(reshaped.var(axis=self.mean_axis, ddof=1)
                                  )  # N-1 in the denominator

                for os in self.partial_average.output_streams:
                    await os.push(averaged)

            # Maybe we can fill a partial frame
            elif data.size - idx >= self.points_before_partial_average:
                # How many chunks can we process at once?
                num_chunks = int(
                    (data.size - idx) / self.points_before_partial_average)
                new_points = num_chunks * self.points_before_partial_average

                # Find the appropriate dimensions for the partial
                partial_reshape_dims = self.reshape_dims[:]
                partial_reshape_dims[self.mean_axis] = -1
                partial_reshape_dims = partial_reshape_dims[self.mean_axis:]

                reshaped = data[idx:idx +
                                new_points].reshape(partial_reshape_dims)
                summed = reshaped.sum(axis=self.mean_axis)
                self.sum_so_far += summed

                self.current_avg_frame[self.idx_frame:self.idx_frame +
                                       new_points] = data[idx:idx + new_points]
                idx += new_points
                self.idx_frame += new_points

                self.completed_averages += num_chunks

                # If we now have enoough for the final average, push to both partial and final...
                if self.completed_averages == self.num_averages:
                    reshaped = self.current_avg_frame.reshape(
                        partial_reshape_dims)
                    for os in self.final_average.output_streams + self.partial_average.output_streams:
                        await os.push(reshaped.mean(axis=self.mean_axis))
                    for os in self.final_variance.output_streams:
                        await os.push(reshaped.var(axis=self.mean_axis, ddof=1)
                                      )  # N-1 in the denominator
                    self.sum_so_far[:] = 0.0
                    self.current_avg_frame[:] = 0.0
                    self.completed_averages = 0
                    self.idx_frame = 0
                else:
                    # Emit a partial average since we've accumulated enough data
                    if (time.time() - self.last_update >=
                            self.update_interval):
                        for os in self.partial_average.output_streams:
                            await os.push(self.sum_so_far /
                                          self.completed_averages)
                        self.last_update = time.time()

            # otherwise just add it to the carry
            else:
                self.carry = data[idx:]
                break
예제 #25
0
class Plotter(Filter):
    sink = InputConnector()
    plot_dims = IntParameter(value_range=(0, 1, 2), snap=1,
                             default=0)  # 0 means auto
    plot_mode = Parameter(
        allowed_values=["real", "imag", "real/imag", "amp/phase", "quad"],
        default="quad")

    def __init__(self,
                 *args,
                 name="",
                 plot_dims=None,
                 plot_mode=None,
                 **plot_args):
        super(Plotter, self).__init__(*args, name=name)
        if plot_dims:
            self.plot_dims.value = plot_dims
        if plot_mode:
            self.plot_mode.value = plot_mode
        self.plot_args = plot_args
        self.full_update_interval = 0.5
        self.update_interval = 2.0  # slower for partial updates
        self.last_update = time.time()
        self.last_full_update = time.time()

        self.quince_parameters = [self.plot_dims, self.plot_mode]

        # This will hold the matplot server
        self.plot_server = None

    def desc(self):
        d = {
            'plot_type':
            'standard',
            'plot_mode':
            self.plot_mode.value,
            'plot_dims':
            int(self.plot_dims.value),
            'x_min':
            float(min(self.x_values)),
            'x_max':
            float(max(self.x_values)),
            'x_len':
            int(self.descriptor.axes[-1].num_points()),
            'x_label':
            self.axis_label(-1),
            'y_label':
            "{} ({})".format(self.descriptor.data_name,
                             self.descriptor.data_unit)
        }
        if self.plot_dims.value == 2:
            d['y_label'] = self.axis_label(-2)
            d['data_label'] = "{} ({})".format(self.descriptor.data_name,
                                               self.descriptor.data_unit)
            d['y_min'] = float(min(self.y_values))
            d['y_max'] = float(max(self.y_values))
            d['y_len'] = int(self.descriptor.axes[-2].num_points())
        return d

    def update_descriptors(self):
        logger.debug(
            "Updating Plotter %s descriptors based on input descriptor %s",
            self.name, self.sink.descriptor)
        self.stream = self.sink.input_streams[0]
        self.descriptor = self.sink.descriptor

    def final_init(self):

        # Determine the plot dimensions
        if not self.plot_dims.value:
            if len(self.descriptor.axes) > 1:
                self.plot_dims.value = 2
            else:
                self.plot_dims.value = 1

        # Check the descriptor axes
        num_axes = len(self.descriptor.axes)
        if self.plot_dims.value > num_axes:
            logger.info(
                "Cannot plot in more dimensions than there are data axes.")
            self.plot_dims.value = num_axes

        if self.plot_dims.value == 1:
            self.points_before_clear = self.descriptor.axes[-1].num_points()
        else:
            self.points_before_clear = self.descriptor.axes[-1].num_points(
            ) * self.descriptor.axes[-2].num_points()
        logger.debug("Plot will clear after every %d points.",
                     self.points_before_clear)

        self.x_values = self.descriptor.axes[-1].points

        if self.plot_dims.value == 2:
            self.y_values = self.descriptor.axes[-2].points

        self.plot_buffer = (np.nan * np.ones(self.points_before_clear)).astype(
            self.descriptor.dtype)
        self.idx = 0

    def update(self):
        if self.plot_dims.value == 1:
            self.plot_server.send(self.name, self.x_values,
                                  self.plot_buffer.copy())
        elif self.plot_dims.value == 2:
            self.plot_server.send(self.name, self.x_values, self.y_values,
                                  self.plot_buffer.copy())

    async def process_data(self, data):
        # If we get more than enough data, pause to update the plot if necessary
        if (self.idx + data.size) > self.points_before_clear:
            spill_over = (self.idx + data.size) % self.points_before_clear
            if spill_over == 0:
                spill_over = self.points_before_clear
            if (time.time() - self.last_full_update >=
                    self.full_update_interval):
                # If we are getting data quickly, then we can afford to wait
                # for a full frame before pushing to plot.
                self.plot_buffer[self.idx:] = data[:(self.points_before_clear -
                                                     self.idx)]
                self.update()
                self.last_full_update = time.time()
            self.plot_buffer[:] = np.nan
            self.plot_buffer[:spill_over] = data[-spill_over:]
            self.idx = spill_over
        else:  # just keep trucking
            self.plot_buffer[self.idx:self.idx + data.size] = data.flatten()
            self.idx += data.size
            if (time.time() - max(self.last_full_update, self.last_update) >=
                    self.update_interval):
                self.update()
                self.last_update = time.time()

    async def on_done(self):
        if self.plot_dims.value == 1:
            self.plot_server.send(self.name, self.x_values, self.plot_buffer)
        elif self.plot_dims.value == 2:
            self.plot_server.send(self.name, self.x_values, self.y_values,
                                  self.plot_buffer)

    def axis_label(self, index):
        unit_str = " ({})".format(self.descriptor.axes[index].unit
                                  ) if self.descriptor.axes[index].unit else ''
        return self.descriptor.axes[index].name + unit_str
예제 #26
0
class XYPlotter(Filter):
    sink_x = InputConnector()
    sink_y = InputConnector()

    def __init__(self,
                 *args,
                 name="",
                 x_series=False,
                 y_series=False,
                 series="inner",
                 notebook=False,
                 webgl=False,
                 **plot_args):
        """Theyintent is to let someone plot this vs. that from different streams."""
        super(XYPlotter, self).__init__(*args, name=name)

        self.plot_args = plot_args
        self.update_interval = 0.5
        self.last_update = time.time()
        self.run_in_notebook = notebook
        self.x_series = x_series
        self.y_series = y_series or self.x_series
        self.plot_height = 600
        self.series = series
        self.webgl = webgl

        self.quince_parameters = []

    def update_descriptors(self):
        logger.debug("Updating XYPlotter %s descriptors.", self.name)
        self.stream_x = self.sink_x.input_streams[0]
        self.stream_y = self.sink_y.input_streams[0]
        self.desc_x = self.sink_x.descriptor
        self.desc_y = self.sink_y.descriptor

    def final_init(self):
        # Check the dimensions to ensure compatibility
        if self.desc_x.axes[-1].num_points(
        ) != self.desc_y.axes[-1].num_points():
            raise ValueError("XYPlotter x and y final axis lengths must match")
        if self.x_series and self.y_series:
            if self.desc_x.axes[-2].num_points(
            ) != self.desc_y.axes[-2].num_points():
                raise ValueError(
                    "XYPlotter x and y second-to-last axis lengths must match when plotting series."
                )

        if len(self.desc_x.axes) == 1 and len(self.desc_y.axes) == 1:
            series_axis = 0
            data_axis = 0
        else:
            if self.series == "inner":
                series_axis = -2
                data_axis = -1
            elif self.series == "outer":
                series_axis = 0
                data_axis = 1
            else:
                raise ValueError("series must be either inner or outer")

        # How many points before clear
        self.points_before_clear_y = self.desc_y.axes[data_axis].num_points()
        self.points_before_clear_x = self.desc_x.axes[data_axis].num_points()

        if self.x_series:
            x_data = [
                [] for x in range(self.desc_x.axes[series_axis].num_points())
            ]
            self.points_before_clear_x *= self.desc_x.axes[
                series_axis].num_points()
        else:
            x_data = [[]]
        if self.y_series:
            y_data = [
                [] for y in range(self.desc_y.axes[series_axis].num_points())
            ]
            self.points_before_clear_y *= self.desc_y.axes[
                series_axis].num_points()
            self.num_series = self.desc_y.axes[series_axis].num_points()
        else:
            y_data = [[]]

        x_label = "{} ({})".format(self.desc_x.data_name,
                                   self.desc_x.data_unit)
        y_label = "{} ({})".format(self.desc_y.data_name,
                                   self.desc_y.data_unit)

        self.fig = Figure(plot_width=self.plot_height,
                          plot_height=self.plot_height,
                          webgl=self.webgl,
                          x_axis_label=x_label,
                          y_axis_label=y_label)

        if self.desc_y.axes[series_axis].num_points() <= 10:
            self.colors = d3['Category10'][
                self.desc_y.axes[series_axis].num_points()]
        elif self.desc_y.axes[series_axis].num_points() <= 20:
            self.colors = d3['Category20'][
                self.desc_y.axes[series_axis].num_points()]
        else:
            self.colors = Viridis256[:self.desc_y.axes[series_axis].num_points(
            )]

        self.plot = self.fig.multi_line(x_data,
                                        y_data,
                                        name=self.name,
                                        line_width=2,
                                        color=self.colors,
                                        **self.plot_args)

        renderers = self.plot.select(dict(name=self.name))
        self.renderer = [r for r in renderers
                         if isinstance(r, GlyphRenderer)][0]
        self.data_source = self.renderer.data_source

        self.plot_buffer_x = np.nan * np.ones(self.points_before_clear_x,
                                              dtype=self.desc_x.dtype)
        self.plot_buffer_y = np.nan * np.ones(self.points_before_clear_y,
                                              dtype=self.desc_y.dtype)
        self.idx = 0
        self.idy = 0

    async def run(self):
        while True:
            # Wait for all of the acquisition to complete, avoid asyncio.wait because of random return order...
            message_x = await self.stream_x.queue.get()
            message_y = await self.stream_y.queue.get()
            messages = [message_x, message_y]

            # Ensure we aren't getting different types of messages at the same time.
            message_types = [m['type'] for m in messages]
            try:
                if len(set(message_types)) > 1:
                    raise ValueError(
                        "Writer received concurrent messages with different message types {}"
                        .format([m['type'] for m in messages]))
            except:
                import ipdb
                ipdb.set_trace()

            # Infer the type from the first message
            message_type = messages[0]['type']

            # If we receive a message
            if message_type == 'event':
                logger.debug('%s "%s" received event "%s"',
                             self.__class__.__name__, self.name, message_type)
                if messages[0]['event_type'] == 'done':
                    break

            elif message_type == 'data':
                message_data = [message['data'] for message in messages]
                message_comp = [message['compression'] for message in messages]
                message_data = [
                    pickle.loads(zlib.decompress(dat))
                    if comp == 'zlib' else dat
                    for comp, dat in zip(message_comp, message_data)
                ]
                message_data = [
                    dat if hasattr(dat, 'size') and dat.size != 1 else
                    np.array([dat]) for dat in message_data
                ]  # Convert single values to arrays

                data_x, data_y = message_data

                # if we're going to clear then reset idy
                if self.idy + data_y.size > self.points_before_clear_y:
                    logger.debug("Clearing previous plot and restarting")
                    spill_over = (self.idy +
                                  data_y.size) % self.points_before_clear_y
                    if spill_over == 0:
                        spill_over = self.points_before_clear_y
                    self.plot_buffer_y[:] = np.nan
                    self.plot_buffer_y[:spill_over] = data_y[-spill_over:]
                    self.idy = spill_over
                else:
                    self.plot_buffer_y[self.idy:self.idy +
                                       data_y.size] = data_y.flatten()
                    self.idy += data_y.size

                # if we're going to clear then reset idy
                if self.idx + data_x.size > self.points_before_clear_x:
                    logger.debug("Clearing previous plot and restarting")
                    spill_over = (self.idx +
                                  data_x.size) % self.points_before_clear_x
                    if spill_over == 0:
                        spill_over = self.points_before_clear_x
                    self.plot_buffer_x[:] = np.nan
                    self.plot_buffer_x[:spill_over] = data_x[-spill_over:]
                    self.idx = spill_over
                else:
                    self.plot_buffer_x[self.idx:self.idx +
                                       data_x.size] = data_x.flatten()
                    self.idx += data_x.size

                # Assume that the x data is synched to the y data (they arrive at the same time...)

                if (time.time() - self.last_update >= self.update_interval):
                    if self.x_series:
                        x_data = np.reshape(self.plot_buffer_x,
                                            (self.num_series, -1)).T
                    elif self.y_series:
                        x_data = np.tile(self.plot_buffer_x,
                                         (self.num_series, 1))
                    else:
                        x_data = [self.plot_buffer_x]

                    if self.y_series:
                        y_data = np.reshape(self.plot_buffer_y,
                                            (self.num_series, -1)).T
                    else:
                        y_data = [self.plot_buffer_y]

                    # Strip NaNs
                    x_data = np.array(
                        [series[~np.isnan(series)] for series in x_data])
                    y_data = np.array(
                        [series[~np.isnan(series)] for series in y_data])

                    # Convert to lists and then push all at once...
                    self.data_source.data = dict(
                        xs=x_data.tolist(),
                        ys=y_data.tolist(),
                        line_color=self.colors[0:len(y_data)])

                    self.last_update = time.time()
예제 #27
0
class ElementwiseFilter(Filter):
    """Perform elementwise operations on multiple streams:
    e.g. multiply or add all streams element-by-element"""

    sink        = InputConnector()
    source      = OutputConnector()
    filter_name = "GenericElementwise" # To identify subclasses when naming data streams

    def __init__(self, filter_name=None, **kwargs):
        super(ElementwiseFilter, self).__init__(filter_name=filter_name, **kwargs)
        self.sink.max_input_streams = 100
        self.quince_parameters = []

    def operation(self):
        """Must be overridden with the desired mathematical function"""
        pass

    def unit(self, base_unit):
        """Must be overridden accoriding the desired mathematical function
        e.g. return base_unit + "^{}".format(len(self.sink.input_streams))"""
        pass

    def update_descriptors(self):
        """Must be overridden depending on the desired mathematical function"""
        logger.debug('Updating %s "%s" descriptors based on input descriptor: %s.', self.filter_name, self.filter_name, self.sink.descriptor)

        # Sometimes not all of the input descriptors have been updated... pause here until they are:
        if None in [ss.descriptor for ss in self.sink.input_streams]:
            logger.debug('%s "%s" waiting for all input streams to be updated.', self.filter_name, self.name)
            return

        self.descriptor = self.sink.descriptor.copy()
        if self.filter_name:
            self.descriptor.data_name = self.filter_name
        if self.descriptor.unit:
            self.descriptor.unit = self.descriptor.unit + "^{}".format(len(self.sink.input_streams))
        self.source.descriptor = self.descriptor
        self.source.update_descriptors()

    def main(self):
        self.done.clear()
        streams = self.sink.input_streams

        for s in streams[1:]:
            if not np.all(s.descriptor.expected_tuples() == streams[0].descriptor.expected_tuples()):
                raise ValueError("Multiple streams connected to correlator must have matching descriptors.")

        # Buffers for stream data
        stream_data = {s: np.zeros(0, dtype=self.sink.descriptor.dtype) for s in streams}

        # Store whether streams are done
        streams_done      = {s: False for s in streams}
        points_per_stream = {s: 0 for s in streams}

        while not self.exit.is_set():

            # Try to pull all messages in the queue. queue.empty() is not reliable, so we
            # ask for forgiveness rather than permission.
            msgs_by_stream = {s: [] for s in streams}

            for stream in streams[::-1]:
                while not self.exit.is_set():
                    try:
                        msgs_by_stream[stream].append(stream.queue.get(False))
                    except queue.Empty as e:
                        time.sleep(0.002)
                        break

            # Process many messages for each stream
            for stream, messages in msgs_by_stream.items():
                for message in messages:
                    message_type = message['type']
                    # message_data = message['data']
                    # message_data = message_data if hasattr(message_data, 'size') else np.array([message_data])
                    if message_type == 'event':
                        if message['event_type'] == 'done':
                            streams_done[stream] = True
                        elif message['event_type'] == 'refine':
                            logger.warning("ElementwiseFilter doesn't handle refinement yet!")
                    elif message_type == 'data':
                        # Add any old data...
                        message_data = stream.pop()
                        if message_data is not None:
                            points_per_stream[stream] += len(message_data)
                            stream_data[stream] = np.concatenate((stream_data[stream], message_data))
                            # logger.info(f"{stream.name}: {message_data} now {stream_data[stream]}")
            # Now process the data with the elementwise operation
            smallest_length = min([d.size for d in stream_data.values()])
            new_data = [d[:smallest_length] for d in stream_data.values()]
            result = new_data[0]
            for nd in new_data[1:]:
                result = self.operation()(result, nd)
            if result.size > 0:
                self.source.push(result)

            # Add data to carry_data if necessary
            for stream in stream_data.keys():
                if stream_data[stream].size > smallest_length:
                    stream_data[stream] = stream_data[stream][smallest_length:]
                else:
                    stream_data[stream] = np.zeros(0, dtype=self.sink.descriptor.dtype)

            # If the amount of data processed is equal to the num points in the stream, we are done
            if np.all([streams_done[stream] for stream in streams]):
                self.push_to_all({"type": "event", "event_type": "done", "data": None})
                self.done.set()
                break
예제 #28
0
class ElementwiseFilter(Filter):
    """Asynchronously perform elementwise operations on multiple streams:
    e.g. multiply or add all streams element-by-element"""

    sink = InputConnector()
    source = OutputConnector()
    filter_name = "GenericElementwise"  # To identify subclasses when naming data streams

    def __init__(self, **kwargs):
        super(ElementwiseFilter, self).__init__(**kwargs)
        self.sink.max_input_streams = 100
        self.quince_parameters = []

    def operation(self):
        """Must be overridden with the desired mathematical function"""
        pass

    def unit(self, base_unit):
        """Must be overridden accoriding the desired mathematical function
        e.g. return base_unit + "^{}".format(len(self.sink.input_streams))"""
        pass

    def update_descriptors(self):
        """Must be overridden depending on the desired mathematical function"""
        logger.debug(
            'Updating %s "%s" descriptors based on input descriptor: %s.',
            self.filter_name, self.name, self.sink.descriptor)

        # Sometimes not all of the input descriptors have been updated... pause here until they are:
        if None in [ss.descriptor for ss in self.sink.input_streams]:
            logger.debug(
                '%s "%s" waiting for all input streams to be updated.',
                self.filter_name, self.name)
            return

        self.descriptor = self.sink.descriptor.copy()
        self.descriptor.data_name = self.filter_name
        if self.descriptor.unit:
            self.descriptor.unit = self.descriptor.unit + "^{}".format(
                len(self.sink.input_streams))
        self.source.descriptor = self.descriptor
        self.source.update_descriptors()

    async def run(self):
        streams = self.sink.input_streams

        for s in streams[1:]:
            if not np.all(s.descriptor.expected_tuples() ==
                          streams[0].descriptor.expected_tuples()):
                raise ValueError(
                    "Multiple streams connected to correlator must have matching descriptors."
                )

        # Buffers for stream data
        stream_data = {
            s: np.zeros(0, dtype=self.sink.descriptor.dtype)
            for s in streams
        }

        # Store whether streams are done
        stream_done = {s: False for s in streams}

        while True:
            # Wait for all of the acquisition to complete
            # Against at least some peoples rational expectations, asyncio.wait doesn't return Futures
            # in the order of the iterable it was passed, but perhaps just in order of completion. So,
            # we construct a dictionary in order that that can be mapped back where we need them:

            futures = {
                asyncio.ensure_future(stream.queue.get()): stream
                for stream in streams
            }

            # Deal with non-equal number of messages using timeout
            responses, pending = await asyncio.wait(
                futures, return_when=asyncio.FIRST_COMPLETED, timeout=2.0)

            # Construct the inverse lookup, results in {stream: result}
            stream_results = {
                futures[res]: res.result()
                for res in list(responses)
            }

            # Cancel the futures
            for pend in list(pending):
                pend.cancel()

            # Add any new data to the
            for stream, message in stream_results.items():
                message_type = message['type']
                message_data = message['data']
                message_comp = message['compression']
                message_data = pickle.loads(zlib.decompress(
                    message_data)) if message_comp == 'zlib' else message_data
                message_data = message_data if hasattr(
                    message_data, 'size') else np.array([message_data])
                if message_type == 'event':
                    if message['event_type'] == 'done':
                        stream_done[stream] = True
                    elif message['event_type'] == 'refine':
                        logger.warning(
                            "Correlator doesn't handle refinement yet!")

                elif message_type == 'data':
                    stream_data[stream] = np.concatenate(
                        (stream_data[stream], message_data.flatten()))

            if False not in stream_done.values():
                for oc in self.output_connectors.values():
                    for os in oc.output_streams:
                        await os.push_event("done")
                logger.debug('%s "%s" is done', self.__class__.__name__,
                             self.name)
                break

            # Now process the data with the elementwise operation
            smallest_length = min([d.size for d in stream_data.values()])
            new_data = [d[:smallest_length] for d in stream_data.values()]
            result = new_data[0]
            for nd in new_data[1:]:
                result = self.operation()(result, nd)
            if result.size > 0:
                await self.source.push(result)

            # Add data to carry_data if necessary
            for stream in stream_data.keys():
                if stream_data[stream].size > smallest_length:
                    stream_data[stream] = stream_data[stream][smallest_length:]
                else:
                    stream_data[stream] = np.zeros(
                        0, dtype=self.sink.descriptor.dtype)
예제 #29
0
class Channelizer(Filter):
    """Digital demodulation and filtering to select a particular frequency multiplexed channel. If
    an axis name is supplied to `follow_axis` then the filter will demodulate at the freqency
    `axis_frequency_value - follow_freq_offset` otherwise it will demodulate at `frequency`. Note that
    the filter coefficients are still calculated with respect to the `frequency` paramter, so it should
    be chosen accordingly when `follow_axis` is defined."""

    sink = InputConnector()
    source = OutputConnector()
    follow_axis = Parameter(default="")  # Name of the axis to follow
    follow_freq_offset = FloatParameter(default=0.0)  # Offset
    decimation_factor = IntParameter(value_range=(1, 100), default=4, snap=1)
    frequency = FloatParameter(value_range=(-10e9, 10e9),
                               increment=1.0e6,
                               default=10e6)
    bandwidth = FloatParameter(value_range=(0.00, 100e6),
                               increment=0.1e6,
                               default=5e6)

    def __init__(self,
                 frequency=None,
                 bandwidth=None,
                 decimation_factor=None,
                 follow_axis=None,
                 follow_freq_offset=None,
                 **kwargs):
        super(Channelizer, self).__init__(**kwargs)
        if frequency:
            self.frequency.value = frequency
        if bandwidth:
            self.bandwidth.value = bandwidth
        if decimation_factor:
            self.decimation_factor.value = decimation_factor
        if follow_axis:
            self.follow_axis.value = follow_axis
        if follow_freq_offset:
            self.follow_freq_offset.value = follow_freq_offset
        self.quince_parameters = [
            self.decimation_factor, self.frequency, self.bandwidth
        ]
        self._phase = 0.0

    def final_init(self):
        self.init_filters(self.frequency.value, self.bandwidth.value)

        if self.follow_axis.value is not "":
            desc = self.sink.descriptor
            axis_num = desc.axis_num(self.follow_axis.value)
            self.pts_before_freq_update = desc.num_points_through_axis(
                axis_num + 1)
            self.pts_before_freq_reset = desc.num_points_through_axis(axis_num)
            self.demod_freqs = desc.axes[
                axis_num].points - self.follow_freq_offset.value
            self.current_freq = 0
            self.update_references(self.current_freq)
        self.idx = 0

        # For storing carryover if getting uneven buffers
        self.carry = np.zeros(0, dtype=self.output_descriptor.dtype)

    def update_references(self, frequency):
        # store decimated reference for mix down
        # phase_drift = 2j*np.pi*0.5e-6 * (abs(frequency) - 100e6)
        ref = np.exp(2j * np.pi * -frequency * self.time_pts[::self.d1] +
                     1j * self._phase,
                     dtype=np.complex64)

        self.reference = ref
        self.reference_r = np.real(ref)
        self.reference_i = np.imag(ref)

    def init_filters(self, frequency, bandwidth):
        # convert bandwidth normalized to Nyquist interval
        n_bandwidth = bandwidth * self.time_step * 2
        n_frequency = abs(frequency) * self.time_step * 2

        # arbitrarily decide on three stage filter pipeline
        # 1. first stage decimating filter on real data
        # 2. second stage decimating filter on mixed product to boost n_bandwidth
        # 3. final channel selecting filter at n_bandwidth/2

        # anecdotally don't decimate more than a factor of eight for stability

        self.decim_factors = [1] * 3
        self.filters = [None] * 3

        # first stage decimating filter
        # maximize first stage decimation:
        #     * minimize subsequent stages time taken
        #     * filter and decimate while signal is still real
        #     * first stage decimation cannot be too large or then 2omega signal from mixing will alias
        self.d1 = 1
        while (self.d1 < 8) and (2 * n_frequency <= 0.8 / self.d1) and (
                self.d1 < self.decimation_factor.value):
            self.d1 *= 2
            n_bandwidth *= 2
            n_frequency *= 2

        if self.d1 > 1:
            # create an anti-aliasing filter
            # pass-band to 0.8 * decimation factor; anecdotally single precision needs order <= 4 for stability
            b, a = scipy.signal.cheby1(4, 3, 0.8 / self.d1)
            b = np.float32(b)
            a = np.float32(a)
            self.decim_factors[0] = self.d1
            self.filters[0] = (b, a)

        # store decimated reference for mix down
        self.update_references(frequency)

        # second stage filter to bring n_bandwidth/2 up
        # decimation cannot be too large or will impinge on channel bandwidth (keep n_bandwidth/2 <= 0.8)
        self.d2 = 1
        while (self.d2 < 8) and (
            (self.d1 * self.d2) <
                self.decimation_factor.value) and (n_bandwidth / 2 <= 0.8):
            self.d2 *= 2
            n_bandwidth *= 2
            n_frequency *= 2

        if self.d2 > 1:
            # create an anti-aliasing filter
            # pass-band to 0.8 * decimation factor; anecdotally single precision needs order <= 4 for stability
            b, a = scipy.signal.cheby1(4, 3, 0.8 / self.d2)
            b = np.float32(b)
            a = np.float32(a)
            self.decim_factors[1] = self.d2
            self.filters[1] = (b, a)

        # final channel selection filter
        if n_bandwidth < 0.1:
            raise ValueError(
                "Insufficient decimation to achieve stable filter: {}.".format(
                    n_bandwidth))

        b, a = scipy.signal.cheby1(4, 3, n_bandwidth / 2)
        b = np.float32(b)
        a = np.float32(a)
        self.decim_factors[2] = self.decimation_factor.value // (self.d1 *
                                                                 self.d2)
        self.filters[2] = (b, a)

    def update_descriptors(self):
        logger.debug(
            'Updating Channelizer "%s" descriptors based on input descriptor: %s.',
            self.name, self.sink.descriptor)

        # extract record time sampling
        self.time_pts = self.sink.descriptor.axes[-1].points
        self.record_length = len(self.time_pts)
        self.time_step = self.time_pts[1] - self.time_pts[0]
        logger.debug("Channelizer time_step = {}".format(self.time_step))

        # We will be decimating along a time axis, which is always
        # going to be the last axis given the way we usually take data.
        # TODO: perform this function along a named axis rather than a numbered axis
        # in case something about this changes.

        # update output descriptors
        decimated_descriptor = DataStreamDescriptor()
        decimated_descriptor.axes = self.sink.descriptor.axes[:]
        decimated_descriptor.axes[-1] = deepcopy(self.sink.descriptor.axes[-1])
        decimated_descriptor.axes[-1].points = self.sink.descriptor.axes[
            -1].points[self.decimation_factor.value -
                       1::self.decimation_factor.value]
        decimated_descriptor.axes[
            -1].original_points = decimated_descriptor.axes[-1].points
        decimated_descriptor._exp_src = self.sink.descriptor._exp_src
        decimated_descriptor.dtype = np.complex64
        self.output_descriptor = decimated_descriptor
        for os in self.source.output_streams:
            os.set_descriptor(decimated_descriptor)
            if os.end_connector is not None:
                os.end_connector.update_descriptors()

    async def process_data(self, data):

        # Append any data carried from the last run
        if self.carry.size > 0:
            data = np.concatenate((self.carry, data))

        # This is the largest number of records we can handle
        num_records = data.size // self.record_length

        # This is the carryover that we'll store until next round.
        # If nothing is left then reset the carryover.
        remaining_points = data.size % self.record_length
        if remaining_points > 0:
            if num_records > 0:
                self.carry = data[-remaining_points:]
                data = data[:-remaining_points]
            else:
                self.carry = data
        else:
            self.carry = np.zeros(0, dtype=self.output_descriptor.dtype)

        if num_records > 0:
            # The records are processed in parallel after being reshaped here
            reshaped_data = np.reshape(data, (num_records, self.record_length),
                                       order="C")

            # Update demodulation frequency if necessary
            if self.follow_axis.value is not "":
                freq = self.demod_freqs[(self.idx % self.pts_before_freq_reset)
                                        // self.pts_before_freq_update]
                if freq != self.current_freq:
                    self.update_references(freq)
                    self.current_freq = freq

            self.idx += data.size

            # first stage decimating filter
            if self.filters[0] is None:
                filtered = reshaped_data
            else:
                stacked_coeffs = np.concatenate(self.filters[0])
                # filter
                if np.iscomplexobj(reshaped_data):
                    # TODO: compile complex versions of the IPP functions
                    filtered_r = np.empty_like(reshaped_data, dtype=np.float32)
                    filtered_i = np.empty_like(reshaped_data, dtype=np.float32)
                    libipp.filter_records_iir(
                        stacked_coeffs, self.filters[0][0].size - 1,
                        np.ascontiguousarray(
                            reshaped_data.real.astype(np.float32)),
                        self.record_length, num_records, filtered_r)
                    libipp.filter_records_iir(
                        stacked_coeffs, self.filters[0][0].size - 1,
                        np.ascontiguousarray(
                            reshaped_data.imag.astype(np.float32)),
                        self.record_length, num_records, filtered_i)
                    filtered = filtered_r + 1j * filtered_i
                    # decimate
                    if self.decim_factors[0] > 1:
                        filtered = filtered[:, ::self.decim_factors[0]]
                else:
                    filtered = np.empty_like(reshaped_data)
                    libipp.filter_records_iir(stacked_coeffs,
                                              self.filters[0][0].size - 1,
                                              reshaped_data,
                                              self.record_length, num_records,
                                              filtered)

                    # decimate
                    if self.decim_factors[0] > 1:
                        filtered = filtered[:, ::self.decim_factors[0]]

            # mix with reference
            # keep real and imaginary separate for filtering below
            if np.iscomplexobj(reshaped_data):
                filtered *= self.reference
                filtered_r = filtered.real
                filtered_i = filtered.imag
            else:
                filtered_r = self.reference_r * filtered
                filtered_i = self.reference_i * filtered

            # channel selection filters
            for ct in [1, 2]:
                if self.filters[ct] == None:
                    continue

                coeffs = self.filters[ct]
                stacked_coeffs = np.concatenate(self.filters[ct])
                out_r = np.empty_like(filtered_r).astype(np.float32)
                out_i = np.empty_like(filtered_i).astype(np.float32)
                libipp.filter_records_iir(
                    stacked_coeffs, self.filters[ct][0].size - 1,
                    np.ascontiguousarray(filtered_r.astype(np.float32)),
                    filtered_r.shape[-1], num_records, out_r)
                libipp.filter_records_iir(
                    stacked_coeffs, self.filters[ct][0].size - 1,
                    np.ascontiguousarray(filtered_i.astype(np.float32)),
                    filtered_i.shape[-1], num_records, out_i)

                # decimate
                if self.decim_factors[ct] > 1:
                    filtered_r = np.copy(out_r[:, ::self.decim_factors[ct]],
                                         order="C")
                    filtered_i = np.copy(out_i[:, ::self.decim_factors[ct]],
                                         order="C")
                else:
                    filtered_r = out_r
                    filtered_i = out_i

            filtered = filtered_r + 1j * filtered_i

            # recover gain from selecting single sideband
            filtered *= 2

            # push to ouptut connectors
            for os in self.source.output_streams:
                await os.push(filtered)
예제 #30
0
class Channelizer(Filter):
    """Digital demodulation and filtering to select a particular frequency multiplexed channel"""

    sink = InputConnector()
    source = OutputConnector()
    decimation_factor = IntParameter(value_range=(1, 100), default=2, snap=1)
    frequency = FloatParameter(value_range=(-5e9, 5e9),
                               increment=1.0e6,
                               default=-9e6)
    bandwidth = FloatParameter(value_range=(0.00, 100e6),
                               increment=0.1e6,
                               default=5e6)

    def __init__(self,
                 frequency=None,
                 bandwidth=None,
                 decimation_factor=None,
                 **kwargs):
        super(Channelizer, self).__init__(**kwargs)
        if frequency:
            self.frequency.value = frequency
        if bandwidth:
            self.bandwidth.value = bandwidth
        if decimation_factor:
            self.decimation_factor.value = decimation_factor
        self.quince_parameters = [
            self.decimation_factor, self.frequency, self.bandwidth
        ]

    def update_descriptors(self):
        logger.debug(
            'Updating Channelizer "%s" descriptors based on input descriptor: %s.',
            self.name, self.sink.descriptor)

        # extract record time sampling
        time_pts = self.sink.descriptor.axes[-1].points
        self.record_length = len(time_pts)
        self.time_step = time_pts[1] - time_pts[0]
        logger.debug("Channelizer time_step = {}".format(self.time_step))

        # convert bandwidth normalized to Nyquist interval
        n_bandwidth = self.bandwidth.value * self.time_step * 2
        n_frequency = self.frequency.value * self.time_step * 2

        # arbitrarily decide on three stage filter pipeline
        # 1. first stage decimating filter on real data
        # 2. second stage decimating filter on mixed product to boost n_bandwidth
        # 3. final channel selecting filter at n_bandwidth/2

        # anecdotally don't decimate more than a factor of eight for stability

        self.decim_factors = [1] * 3
        self.filters = [None] * 3

        # first stage decimating filter
        # maximize first stage decimation:
        #     * minimize subsequent stages time taken
        #     * filter and decimate while signal is still real
        #     * first stage decimation cannot be too large or then 2omega signal from mixing will alias
        d1 = 1
        while (d1 < 8) and (2 * n_frequency <=
                            0.8 / d1) and (d1 < self.decimation_factor.value):
            d1 *= 2
            n_bandwidth *= 2
            n_frequency *= 2

        if d1 > 1:
            # create an anti-aliasing filter
            # pass-band to 0.8 * decimation factor; anecdotally single precision needs order <= 4 for stability
            b, a = scipy.signal.cheby1(4, 3, 0.8 / d1)
            b = np.float32(b)
            a = np.float32(a)
            self.decim_factors[0] = d1
            self.filters[0] = (b, a)

        # store decimated reference for mix down
        ref = np.exp(2j * np.pi * self.frequency.value * time_pts[::d1],
                     dtype=np.complex64)
        self.reference_r = np.real(ref)
        self.reference_i = np.imag(ref)

        # second stage filter to bring n_bandwidth/2 up
        # decimation cannot be too large or will impinge on channel bandwidth (keep n_bandwidth/2 <= 0.8)
        d2 = 1
        while (d2 < 8) and ((d1 * d2) < self.decimation_factor.value) and (
                n_bandwidth / 2 <= 0.8):
            d2 *= 2
            n_bandwidth *= 2
            n_frequency *= 2

        if d2 > 1:
            # create an anti-aliasing filter
            # pass-band to 0.8 * decimation factor; anecdotally single precision needs order <= 4 for stability
            b, a = scipy.signal.cheby1(4, 3, 0.8 / d2)
            b = np.float32(b)
            a = np.float32(a)
            self.decim_factors[1] = d2
            self.filters[1] = (b, a)

        # final channel selection filter
        if n_bandwidth < 0.1:
            raise ValueError(
                "Insufficient decimation to achieve stable filter")

        b, a = scipy.signal.cheby1(4, 3, n_bandwidth / 2)
        b = np.float32(b)
        a = np.float32(a)
        self.decim_factors[2] = self.decimation_factor.value // (d1 * d2)
        self.filters[2] = (b, a)

        # update output descriptors
        decimated_descriptor = DataStreamDescriptor()
        decimated_descriptor.axes = self.sink.descriptor.axes[:]
        decimated_descriptor.axes[-1] = deepcopy(self.sink.descriptor.axes[-1])
        decimated_descriptor.axes[-1].points = self.sink.descriptor.axes[
            -1].points[self.decimation_factor.value -
                       1::self.decimation_factor.value]
        decimated_descriptor.axes[
            -1].original_points = decimated_descriptor.axes[-1].points
        decimated_descriptor.exp_src = self.sink.descriptor.exp_src
        decimated_descriptor.dtype = np.complex64
        for os in self.source.output_streams:
            os.set_descriptor(decimated_descriptor)
            if os.end_connector is not None:
                os.end_connector.update_descriptors()

    async def process_data(self, data):
        # Assume for now we get a integer number of records at a time
        # TODO: handle partial records
        num_records = data.size // self.record_length
        reshaped_data = np.reshape(data, (num_records, self.record_length),
                                   order="C")

        # first stage decimating filter
        if self.filters[0] is not None:
            stacked_coeffs = np.concatenate(self.filters[0])
            # filter
            filtered = np.empty_like(reshaped_data)
            libipp.filter_records_iir(stacked_coeffs,
                                      self.filters[0][0].size - 1,
                                      reshaped_data, self.record_length,
                                      num_records, filtered)

            # decimate
            if self.decim_factors[0] > 1:
                filtered = filtered[:, ::self.decim_factors[0]]

        # mix with reference
        # keep real and imaginary separate for filtering below
        filtered_r = self.reference_r * filtered
        filtered_i = self.reference_i * filtered

        # channel selection filters
        for ct in [1, 2]:
            if self.filters[ct] == None:
                continue

            coeffs = self.filters[ct]
            stacked_coeffs = np.concatenate(self.filters[ct])
            out_r = np.empty_like(filtered_r)
            out_i = np.empty_like(filtered_i)
            libipp.filter_records_iir(stacked_coeffs,
                                      self.filters[ct][0].size - 1, filtered_r,
                                      filtered_r.shape[-1], num_records, out_r)
            libipp.filter_records_iir(stacked_coeffs,
                                      self.filters[ct][0].size - 1, filtered_i,
                                      filtered_i.shape[-1], num_records, out_i)

            # decimate
            if self.decim_factors[ct] > 1:
                filtered_r = np.copy(out_r[:, ::self.decim_factors[ct]],
                                     order="C")
                filtered_i = np.copy(out_i[:, ::self.decim_factors[ct]],
                                     order="C")
            else:
                filtered_r = out_r
                filtered_i = out_i

        filtered = filtered_r + 1j * filtered_i

        # recover gain from selecting single sideband
        filtered *= 2

        # push to ouptut connectors
        for os in self.source.output_streams:
            await os.push(filtered)