class X6StreamSelector(Filter): """Digital demodulation and filtering to select a particular frequency multiplexed channel""" sink = InputConnector() source = OutputConnector() phys_channel = IntParameter(value_range=(1,3), snap=1) dsp_channel = IntParameter(value_range=(0,4), snap=1) stream_type = Parameter(allowed_values=["Raw", "Demodulated", "Integrated"], default='Demodulated') def __init__(self, name=""): super(X6StreamSelector, self).__init__(name=name) self.stream_type.value = "Raw" # One of Raw, Demodulated, Integrated self.quince_parameters = [self.phys_channel, self.dsp_channel, self.stream_type] def get_descriptor(self, source_instr_settings, channel_settings): # Create a channel channel = X6Channel(channel_settings) descrip = DataStreamDescriptor() # If it's an integrated stream, then the time axis has already been eliminated. # Otherswise, add the time axis. if channel_settings['stream_type'] == 'Raw': samp_time = 4.0e-9 descrip.add_axis(DataAxis("time", samp_time*np.arange(source_instr_settings['record_length']//4))) descrip.dtype = np.float64 elif channel_settings['stream_type'] == 'Demodulated': samp_time = 32.0e-9 descrip.add_axis(DataAxis("time", samp_time*np.arange(source_instr_settings['record_length']//32))) descrip.dtype = np.complex128 else: # Integrated descrip.dtype = np.complex128 return channel, descrip
class WriteToFile(Filter): """Writes data to file using the Auspex container type, which is a simple directory structure with subdirectories, binary datafiles, and json meta files that store the axis descriptors and other information.""" sink = InputConnector() filename = FilenameParameter() groupname = Parameter(default='main') def __init__(self, filename=None, groupname=None, datasetname='data', **kwargs): super(WriteToFile, self).__init__(**kwargs) if filename: self.filename.value = filename if groupname: self.groupname.value = groupname if datasetname: self.datasetname = datasetname self.ret_queue = None # MP queue For returning data def final_init(self): assert self.filename.value, "Filename never supplied to writer." assert self.groupname.value, "Groupname never supplied to writer." assert self.datasetname, "Dataset name never supplied to writer." self.descriptor = self.sink.input_streams[0].descriptor self.container = AuspexDataContainer(self.filename.value) self.group = self.container.new_group(self.groupname.value) self.mmap = self.container.new_dataset(self.groupname.value, self.datasetname, self.descriptor) self.w_idx = 0 self.points_taken = 0 def get_data_while_running(self, return_queue): """Return data to the main thread or user as requested. Use a MP queue to transmit.""" assert not self.done.is_set(), Exception( "Experiment is over and filter done. Please use get_data") self.return_queue.put(np.array(self.mmap)) def get_data(self): assert self.done.is_set(), Exception( "Experiment is still running. Please use get_data_while_running") container = AuspexDataContainer(self.filename.value) return container.open_dataset(self.groupname.value, self.datasetname) def process_data(self, data): # Write the data self.mmap[self.w_idx:self.w_idx + data.size] = data self.w_idx += data.size self.points_taken = self.w_idx
class MeshPlotter(Filter): sink = InputConnector() plot_mode = Parameter( allowed_values=["real", "imag", "real/imag", "amp/phase", "quad"], default="quad") def __init__(self, *args, name="", plot_mode=None, x_label="", y_label="", **plot_args): super(MeshPlotter, self).__init__(*args, name=name) if plot_mode: self.plot_mode.value = plot_mode self.plot_args = plot_args self.update_interval = 0.5 self.last_update = time.time() self.x_label = x_label self.y_label = y_label self.quince_parameters = [self.plot_mode] # This will hold the matplot server self.plot_server = None def desc(self): d = { 'plot_type': 'mesh', 'plot_mode': self.plot_mode.value, 'x_label': self.x_label, 'y_label': self.y_label } return d def update_descriptors(self): logger.info( "Updating MeshPlotter %s descriptors based on input descriptor %s", self.name, self.sink.descriptor) def final_init(self): pass async def process_direct(self, data): self.plot_server.send(self.name, data) async def on_done(self): self.plot_server.send(self.name, np.array([]), msg="done") time.sleep(0.1)
class X6StreamSelector(Filter): """Digital demodulation and filtering to select a particular frequency multiplexed channel""" sink = InputConnector() source = OutputConnector() channel = IntParameter(value_range=(1, 3), snap=1) dsp_channel = IntParameter(value_range=(0, 4), snap=1) stream_type = Parameter( allowed_values=["raw", "demodulated", "integrated"], default='demodulated') # def __init__(self, name=""): # super(X6StreamSelector, self).__init__(name=name) # self.stream_type.value = "Raw" # One of Raw, Demodulated, Integrated # self.quince_parameters = [self.channel, self.dsp_channel, self.stream_type] def get_channel(self, channel_proxy): """Create and return a channel object corresponding to this stream selector""" return X6Channel(channel_proxy) def get_descriptor(self, stream_selector, receiver_channel): """Get the axis descriptor corresponding to this stream selector. If it's an integrated stream, then the time axis has already been eliminated. Otherswise, add the time axis.""" descrip = DataStreamDescriptor() if stream_selector.stream_type == 'raw': samp_time = 4.0e-9 descrip.add_axis( DataAxis( "time", samp_time * np.arange(receiver_channel.receiver.record_length // 4))) descrip.dtype = np.float64 elif stream_selector.stream_type == 'demodulated': samp_time = 32.0e-9 descrip.add_axis( DataAxis( "time", samp_time * np.arange(receiver_channel.receiver.record_length // 32))) descrip.dtype = np.complex128 else: # Integrated descrip.dtype = np.complex128 return descrip
class Averager(Filter): """Takes data and collapses along the specified axis.""" sink = InputConnector() partial_average = OutputConnector() final_average = OutputConnector() final_variance = OutputConnector() axis = Parameter() def __init__(self, axis=None, **kwargs): super(Averager, self).__init__(**kwargs) self.axis.value = axis self.points_before_final_average = None self.points_before_partial_average = None self.sum_so_far = None self.num_averages = None self.quince_parameters = [self.axis] # Rate limiting for partial averages self.last_update = time.time() self.update_interval = 0.5 def update_descriptors(self): logger.debug( 'Updating averager "%s" descriptors based on input descriptor: %s.', self.name, self.sink.descriptor) descriptor_in = self.sink.descriptor names = [a.name for a in descriptor_in.axes] self.axis.allowed_values = names if self.axis.value is None: self.axis.value = descriptor_in.axes[0].name # Convert named axes to an index if self.axis.value not in names: raise ValueError( "Could not find axis {} within the DataStreamDescriptor {}". format(self.axis.value, descriptor_in)) self.axis_num = descriptor_in.axis_num(self.axis.value) logger.debug("Averaging over axis #%d: %s", self.axis_num, self.axis.value) self.data_dims = descriptor_in.data_dims() if self.axis_num == len(descriptor_in.axes) - 1: logger.debug("Performing scalar average!") self.points_before_partial_average = 1 self.avg_dims = [1] else: self.points_before_partial_average = descriptor_in.num_points_through_axis( self.axis_num + 1) self.avg_dims = self.data_dims[self.axis_num + 1:] # If we get multiple final average simultaneously self.reshape_dims = self.data_dims[self.axis_num:] if self.axis_num > 0: self.reshape_dims = [-1] + self.reshape_dims self.mean_axis = self.axis_num - len(self.data_dims) self.points_before_final_average = descriptor_in.num_points_through_axis( self.axis_num) logger.debug("Points before partial average: %s.", self.points_before_partial_average) logger.debug("Points before final average: %s.", self.points_before_final_average) logger.debug("Data dimensions are %s", self.data_dims) logger.debug("Averaging dimensions are %s", self.avg_dims) # Define final axis descriptor descriptor = descriptor_in.copy() self.num_averages = descriptor.pop_axis(self.axis.value).num_points() logger.debug("Number of partial averages is %d", self.num_averages) self.sum_so_far = np.zeros(self.avg_dims, dtype=descriptor.dtype) self.current_avg_frame = np.zeros(self.points_before_final_average, dtype=descriptor.dtype) self.partial_average.descriptor = descriptor self.final_average.descriptor = descriptor # We can update the visited_tuples upfront if none # of the sweeps are adaptive... desc_out_dtype = descriptor_in.axis_data_type( with_metadata=True, excluding_axis=self.axis.value) if not descriptor_in.is_adaptive(): vals = [ a.points_with_metadata() for a in descriptor_in.axes if a.name != self.axis.value ] nested_list = list(itertools.product(*vals)) flattened_list = [ tuple((val for sublist in line for val in sublist)) for line in nested_list ] descriptor.visited_tuples = np.core.records.fromrecords( flattened_list, dtype=desc_out_dtype) else: descriptor.visited_tuples = np.empty((0), dtype=desc_out_dtype) for stream in self.partial_average.output_streams + self.final_average.output_streams: stream.set_descriptor(descriptor) stream.end_connector.update_descriptors() # Define variance axis descriptor descriptor_var = descriptor_in.copy() descriptor_var.data_name = "Variance" descriptor_var.pop_axis(self.axis.value) if descriptor_var.unit: descriptor_var.unit = descriptor_var.unit + "^2" descriptor_var.metadata["num_averages"] = self.num_averages self.final_variance.descriptor = descriptor_var if not descriptor_in.is_adaptive(): descriptor_var.visited_tuples = np.core.records.fromrecords( flattened_list, dtype=desc_out_dtype) else: descriptor_var.visited_tuples = np.empty((0), dtype=desc_out_dtype) for stream in self.final_variance.output_streams: stream.set_descriptor(descriptor_var) stream.end_connector.update_descriptors() def final_init(self): if self.points_before_final_average is None: raise Exception( "Average has not been initialized. Run 'update_descriptors'") self.completed_averages = 0 self.idx_frame = 0 self.idx_global = 0 # We only need to accumulate up to the averaging axis # BUT we may get something longer at any given time! self.carry = np.zeros(0, dtype=self.final_average.descriptor.dtype) async def process_data(self, data): # TODO: handle unflattened data separately if len(data.shape) > 1: data = data.flatten() #handle single points elif not isinstance(data, np.ndarray) and (data.size == 1): data = np.array([data]) if self.carry.size > 0: data = np.concatenate((self.carry, data)) self.carry = np.zeros(0, dtype=self.final_average.descriptor.dtype) idx = 0 while idx < data.size: #check whether we have enough data to fill an averaging frame if data.size - idx >= self.points_before_final_average: # How many chunks can we process at once? num_chunks = int( (data.size - idx) / self.points_before_final_average) new_points = num_chunks * self.points_before_final_average reshaped = data[idx:idx + new_points].reshape( self.reshape_dims) averaged = reshaped.mean(axis=self.mean_axis) idx += new_points if self.sink.descriptor.is_adaptive(): new_tuples = self.sink.descriptor.tuples( )[self.idx_global:self.idx_global + new_points] new_tuples_stripped = remove_fields( new_tuples, self.axis.value) take_axis = -1 if self.axis_num > 0 else 0 reduced_tuples = new_tuples_stripped.reshape( self.reshape_dims).take((0, ), axis=take_axis) self.idx_global += new_points # Add to Visited tuples if self.sink.descriptor.is_adaptive(): for os in self.final_average.output_streams + self.final_variance.output_streams + self.partial_average.output_streams: os.descriptor.visited_tuples = np.append( os.descriptor.visited_tuples, reduced_tuples) for os in self.final_average.output_streams: await os.push(averaged) for os in self.final_variance.output_streams: await os.push(reshaped.var(axis=self.mean_axis, ddof=1) ) # N-1 in the denominator for os in self.partial_average.output_streams: await os.push(averaged) # Maybe we can fill a partial frame elif data.size - idx >= self.points_before_partial_average: # How many chunks can we process at once? num_chunks = int( (data.size - idx) / self.points_before_partial_average) new_points = num_chunks * self.points_before_partial_average # Find the appropriate dimensions for the partial partial_reshape_dims = self.reshape_dims[:] partial_reshape_dims[self.mean_axis] = -1 partial_reshape_dims = partial_reshape_dims[self.mean_axis:] reshaped = data[idx:idx + new_points].reshape(partial_reshape_dims) summed = reshaped.sum(axis=self.mean_axis) self.sum_so_far += summed self.current_avg_frame[self.idx_frame:self.idx_frame + new_points] = data[idx:idx + new_points] idx += new_points self.idx_frame += new_points self.completed_averages += num_chunks # If we now have enoough for the final average, push to both partial and final... if self.completed_averages == self.num_averages: reshaped = self.current_avg_frame.reshape( partial_reshape_dims) for os in self.final_average.output_streams + self.partial_average.output_streams: await os.push(reshaped.mean(axis=self.mean_axis)) for os in self.final_variance.output_streams: await os.push(reshaped.var(axis=self.mean_axis, ddof=1) ) # N-1 in the denominator self.sum_so_far[:] = 0.0 self.current_avg_frame[:] = 0.0 self.completed_averages = 0 self.idx_frame = 0 else: # Emit a partial average since we've accumulated enough data if (time.time() - self.last_update >= self.update_interval): for os in self.partial_average.output_streams: await os.push(self.sum_so_far / self.completed_averages) self.last_update = time.time() # otherwise just add it to the carry else: self.carry = data[idx:] break
class KernelIntegrator(Filter): sink = InputConnector() source = OutputConnector() kernel = Parameter() bias = FloatParameter(default=0.0) simple_kernel = BoolParameter(default=True) box_car_start = FloatParameter(default=0.0) box_car_stop = FloatParameter(default=100e-9) demod_frequency = FloatParameter(default=0.0) """Integrate with a given kernel. Kernel will be padded/truncated to match record length""" def __init__(self, **kwargs): super(KernelIntegrator, self).__init__(**kwargs) self.pre_int_op = None self.post_int_op = None for k, v in kwargs.items(): if hasattr(self, k) and isinstance(getattr(self, k), Parameter): getattr(self, k).value = v if "pre_integration_operation" in kwargs: self.pre_int_op = kwargs["pre_integration_operation"] if "post_integration_operation" in kwargs: self.post_int_op = kwargs["post_integration_operation"] # self.quince_parameters = [self.simple_kernel, self.demod_frequency, self.box_car_start, self.box_car_stop] def update_descriptors(self): if not self.simple_kernel and self.kernel.value is None: raise ValueError("Integrator was passed kernel None") logger.debug( 'Updating KernelIntegrator "%s" descriptors based on input descriptor: %s.', self.filter_name, self.sink.descriptor) record_length = self.sink.descriptor.axes[-1].num_points() if self.kernel.value: if os.path.exists( os.path.join(config.KernelDir, self.kernel.value + '.txt')): kernel = np.loadtxt( os.path.join(config.KernelDir, self.kernel.value + '.txt'), dtype=complex, converters={ 0: lambda s: complex(s.decode().replace('+-', '-')) }) else: try: kernel = eval(self.kernel.value.encode('unicode_escape')) except: raise ValueError( 'Kernel invalid. Provide a file name or an expression to evaluate' ) if self.simple_kernel.value: logger.warning( "Using specified kernel. To use a box car filter instead, clear kernel.value" ) elif self.simple_kernel.value: time_pts = self.sink.descriptor.axes[-1].points time_step = time_pts[1] - time_pts[0] kernel = np.zeros(record_length, dtype=np.complex128) sample_start = int(self.box_car_start.value / time_step) sample_stop = int(self.box_car_stop.value / time_step) + 1 kernel[sample_start:sample_stop] = 1.0 # add modulation kernel *= np.exp(2j * np.pi * self.demod_frequency.value * time_pts) else: raise ValueError( 'Kernel invalid. Either provide a file name or an expression to evaluate or set simple_kernel.value to true' ) # pad or truncate the kernel to match the record length if kernel.size < record_length: self.aligned_kernel = np.append( kernel, np.zeros(record_length - kernel.size, dtype=np.complex128)) else: self.aligned_kernel = np.resize(kernel, record_length) # Integrator reduces and removes axis on output stream # update output descriptors output_descriptor = DataStreamDescriptor() # TODO: handle reduction to single point output_descriptor.axes = self.sink.descriptor.axes[:-1] output_descriptor._exp_src = self.sink.descriptor._exp_src output_descriptor.dtype = np.complex128 for ost in self.source.output_streams: ost.set_descriptor(output_descriptor) ost.end_connector.update_descriptors() def process_data(self, data): # TODO: handle variable partial records if self.pre_int_op: data = self.pre_int_op(data) filtered = np.inner(np.reshape(data, (-1, len(self.aligned_kernel))), self.aligned_kernel) if self.post_int_op: filtered = self.post_int_op(filtered) # push to ouptut connectors for os in self.source.output_streams: os.push(filtered)
class WindowIntegrator(Filter): """ Allow a kernel from the set {'chebwin', 'blackman', 'slepian', 'boxcar'} to be set for the duration of the start and stop values. YAML parameters are: type: WindowIntegrator source: Demod-q1 kernel_type: 'chebwin' start: 5.0e-07 stop: 9.0e-07 See: https://docs.scipy.org/doc/scipy/reference/signal.html for more details on the filters specifics. """ sink = InputConnector() source = OutputConnector() bias = FloatParameter(default=0.0) kernel_type = Parameter(default='boxcar', allowed_values=['chebwin',\ 'blackman', 'slepian', 'boxcar']) start = FloatParameter(default=0.0) stop = FloatParameter(default=100e-9) frequency = FloatParameter(default=0.0) """Integrate with a given kernel. Kernel will be padded/truncated to match record length""" def __init__(self, **kwargs): super(WindowIntegrator, self).__init__(**kwargs) self.pre_int_op = None self.post_int_op = None for k, v in kwargs.items(): if hasattr(self, k) and isinstance(getattr(self, k), Parameter): getattr(self, k).value = v if "pre_integration_operation" in kwargs: self.pre_int_op = kwargs["pre_integration_operation"] if "post_integration_operation" in kwargs: self.post_int_op = kwargs["post_integration_operation"] self.quince_parameters = [ self.kernel_type, self.frequency, self.start, self.stop ] def update_descriptors(self): if not self.kernel_type: raise ValueError("Integrator was passed kernel None") logger.debug( 'Updating WindowIntegrator "%s" descriptors based on input descriptor: %s.', self.name, self.sink.descriptor) record_length = self.sink.descriptor.axes[-1].num_points() time_pts = self.sink.descriptor.axes[-1].points time_step = time_pts[1] - time_pts[0] kernel = np.zeros(record_length, dtype=np.complex128) sample_start = int(self.box_car_start.value / time_step) sample_stop = int(self.box_car_stop.value / time_step) + 1 if self.kernel_type == 'boxcar': kernel[sample_start:sample_stop] = 1.0 elif self.kernel_type == 'chebwin': # create a Dolph-Chebyshev window with 100 dB attenuation kernel[sample_start:sample_stop] = \ chebwin(sample_start-sample_stop, at=100) elif self.kernel_type == 'blackman': kernel[sample_start:sample_stop] = \ blackman(sample_start-sample_stop) elif self.kernel_type == 'slepian': # create a Slepian window with 0.2 bandwidth kernel[sample_start:sample_stop] = \ slepian(sample_start-sample_stop, width=0.2) # add modulation kernel *= np.exp(2j * np.pi * self.frequency.value * time_step * time_pts) # pad or truncate the kernel to match the record length if kernel.size < record_length: self.aligned_kernel = np.append( kernel, np.zeros(record_length - kernel.size, dtype=np.complex128)) else: self.aligned_kernel = np.resize(kernel, record_length) # Integrator reduces and removes axis on output stream # update output descriptors output_descriptor = DataStreamDescriptor() # TODO: handle reduction to single point output_descriptor.axes = self.sink.descriptor.axes[:-1] output_descriptor._exp_src = self.sink.descriptor._exp_src output_descriptor.dtype = np.complex128 for os in self.source.output_streams: os.set_descriptor(output_descriptor) os.end_connector.update_descriptors() async def process_data(self, data): # TODO: handle variable partial records if self.pre_int_op: data = self.pre_int_op(data) filtered = np.inner(np.reshape(data, (-1, len(self.aligned_kernel))), self.aligned_kernel) if self.post_int_op: filtered = self.post_int_op(filtered) # push to ouptut connectors for os in self.source.output_streams: await os.push(filtered)
class Framer(Filter): """Mete out data in increments defined by the specified axis.""" sink = InputConnector() source = OutputConnector() axis = Parameter() def __init__(self, axis=None, **kwargs): super(Framer, self).__init__(**kwargs) self.axis.value = axis self.points_before_final_average = None self.points_before_partial_average = None self.sum_so_far = None self.num_averages = None self.quince_parameters = [self.axis] def final_init(self): descriptor_in = self.sink.descriptor names = [a.name for a in descriptor_in.axes] self.axis.allowed_values = names if self.axis.value is None: self.axis.value = descriptor_in.axes[0].name # Convert named axes to an index if self.axis.value not in names: raise ValueError("Could not find axis {} within the DataStreamDescriptor {}".format(self.axis.value, descriptor_in)) self.axis_num = descriptor_in.axis_num(self.axis.value) logger.debug("Framing on axis #%d: %s", self.axis_num, self.axis.value) # Find how many points we want to spit out at a time self.data_dims = descriptor_in.data_dims() if self.axis_num == len(descriptor_in.axes) - 1: raise Exception("Framer has refused to frame along single points.") else: self.frame_points = descriptor_in.num_points_through_axis(self.axis_num+1) logger.debug("Points before emitting frame: %s.", self.frame_points) # For storing carryover if getting uneven buffers self.idx = 0 self.carry = np.zeros(0, dtype=self.sink.descriptor.dtype) def process_data(self, data): # Append any data carried from the last run if self.carry.size > 0: data = np.concatenate((self.carry, data)) # This is the largest number of frames we can emit for the time being num_frames = data.size // self.frame_points # This is the carryover that we'll store until next round. # If nothing is left then reset the carryover. remaining_points = data.size % self.frame_points if remaining_points > 0: if num_frames > 0: self.carry = data[-remaining_points:] data = data[:-remaining_points] else: self.carry = data else: self.carry = np.zeros(0, dtype=self.sink.descriptor.dtype) if num_frames > 0: for i in range(num_frames): for os in self.source.output_streams: os.push(data[i*self.frame_points:(i+1)*self.frame_points])
class WriteToHDF5(Filter): """Writes data to file.""" sink = InputConnector() filename = FilenameParameter() groupname = Parameter(default='main') add_date = BoolParameter(default = False) save_settings = BoolParameter(default = True) def __init__(self, filename=None, groupname=None, add_date=False, save_settings=True, compress=True, store_tuples=True, exp_log=True, **kwargs): super(WriteToHDF5, self).__init__(**kwargs) self.compress = compress if filename: self.filename.value = filename if groupname: self.groupname.value = groupname self.points_taken = 0 self.file = None self.group = None self.store_tuples = store_tuples self.create_group = True self.up_to_date = False self.sink.max_input_streams = 100 self.add_date.value = add_date self.save_settings.value = save_settings self.exp_log = exp_log self.quince_parameters = [self.filename, self.groupname, self.add_date, self.save_settings] def final_init(self): if not self.filename.value: raise Exception("Filename never supplied to writer.") # If self.file is still None, then we need to create # the file object. Otherwise, we presume someone has # already set it up for us. if not self.file: self.file = self.new_file() def new_filename(self): filename = self.filename.value basename, ext = os.path.splitext(filename) if ext == "": logger.debug("Filename for writer {} does not have an extension -- using default '.h5'".format(self.name)) ext = ".h5" dirname = os.path.dirname(os.path.abspath(filename)) if self.add_date.value: date = time.strftime("%y%m%d") dirname = os.path.join(dirname, date) basename = os.path.join(dirname, os.path.basename(basename)) # Set the file number to the maximum in the current folder + 1 filenums = [] if os.path.exists(dirname): for f in os.listdir(dirname): if ext in f: filenums += [int(re.findall('-(\d{4})\.', f)[0])] if os.path.isfile(os.path.join(dirname, f)) else [] i = max(filenums) + 1 if filenums else 0 return "{}-{:04d}{}".format(basename,i,ext) def new_file(self): """ Open a new data file to write """ # Close the current file, if any if self.file is not None: try: self.file.close() except Exception as e: logger.error("Encounter exception: {}".format(e)) logger.error("Cannot close file '{}'. File may be damaged.".format(self.file.filename)) # Get new file name self.filename.value = self.new_filename() head = os.path.dirname(self.filename.value) head = os.path.normpath(head) dirs = head.split(os.sep) # Check if path exists. If not, create new one(s). os.makedirs(head, exist_ok=True) logger.debug("Create new data file: %s." % self.filename.value) # Copy current settings to a folder with the file name if self.save_settings.value: # just move copies to a new directory self.save_yaml() if self.exp_log: self.write_to_log() return h5py.File(self.filename.value, 'w', libver='latest') def write_to_log(self): """ Record the experiment in a log file """ logfile = os.path.join(config.LogDir, "experiment_log.tsv") if os.path.isfile(logfile): lf = pd.read_csv(logfile, sep="\t") else: logger.info("Experiment log file created.") lf = pd.DataFrame(columns = ["Filename", "Date", "Time"]) lf = lf.append(pd.DataFrame([[self.filename.value, time.strftime("%y%m%d"), time.strftime("%H:%M:%S")]],columns=["Filename", "Date", "Time"]),ignore_index=True) lf.to_csv(logfile, sep = "\t", index = False) def save_yaml(self): """ Save a copy of current experiment settings """ head = os.path.dirname(self.filename.value) fulldir = os.path.splitext(self.filename.value)[0] if not os.path.exists(fulldir): os.makedirs(fulldir) config.yaml_dump(config.yaml_load(config.configFile), os.path.join(fulldir, os.path.split(config.configFile)[1]), flatten = True) def save_yaml_h5(self): """ Save a copy of current experiment settings in the h5 metadata""" header = self.file.create_group("header") # load them dump to get the 'include' information header.attrs['settings'] = config.yaml_dump(config.yaml_load(config.configFile), flatten = True) async def run(self): self.finished_processing = False streams = self.sink.input_streams stream = streams[0] for s in streams[1:]: if not np.all(s.descriptor.expected_tuples() == streams[0].descriptor.expected_tuples()): raise ValueError("Multiple streams connected to writer must have matching descriptors.") desc = stream.descriptor axes = desc.axes params = desc.params axis_names = desc.axis_names(with_metadata=True) self.file.attrs['exp_src'] = desc._exp_src num_axes = len(axes) if desc.is_adaptive() and not self.store_tuples: raise Exception("Cannot omit writing tuples with an adaptive sweep... please enabled store_tuples.") if self.store_tuples: # All of the combinations for the present values of the sweep parameters only tuples = desc.expected_tuples(with_metadata=True, as_structured_array=True) expected_length = desc.expected_num_points() compression = 'gzip' if self.compress else None # If desired, create the group in which the dataset and axes will reside if self.create_group: self.group = self.file.create_group(self.groupname.value) else: self.group = self.file self.data_group = self.group.create_group("data") # If desired, push experimental metadata into the h5 file if self.save_settings.value and 'header' not in self.file.keys(): # only save header once for multiple writers self.save_yaml_h5() # Create datasets for each stream dset_for_streams = {} for stream in streams: dset = self.data_group.create_dataset(stream.descriptor.data_name, (expected_length,), dtype=stream.descriptor.dtype, chunks=True, maxshape=(None,), compression=compression) dset.attrs['is_data'] = True dset.attrs['store_tuples'] = self.store_tuples dset.attrs['name'] = stream.descriptor.data_name dset_for_streams[stream] = dset # Write params into attrs for k,v in params.items(): if k not in axis_names: self.data_group.attrs[k] = v # Create a table for the DataStreamDescriptor ref_dtype = h5py.special_dtype(ref=h5py.Reference) self.descriptor = self.group.create_dataset("descriptor", (len(axes),), dtype=ref_dtype) for k,v in desc.metadata.items(): self.descriptor.attrs[k] = v # Create axis data sets for storing the base axes as well as the # full set of tuples. For the former we add # references to the descriptor. tuple_dset_for_axis_name = {} for i, a in enumerate(axes): if a.unstructured: name = "+".join(a.name) else: name = a.name if a.unstructured: # Create another reference table to refer to the constituent axes unstruc_ref_dset = self.group.create_dataset(name, (len(a.name),), dtype=ref_dtype) unstruc_ref_dset.attrs['unstructured'] = True for j, (col_name, col_unit) in enumerate(zip(a.name, a.unit)): # Create table to store the axis value independently for each column unstruc_dset = self.group.create_dataset(col_name, (a.num_points(),), dtype=a.dtype) unstruc_ref_dset[j] = unstruc_dset.ref unstruc_dset[:] = a.points[:,j] unstruc_dset.attrs['unit'] = col_unit unstruc_dset.attrs['name'] = col_name # This stores the values taking during the experiment sweeps if self.store_tuples: dset = self.data_group.create_dataset(col_name, (expected_length,), dtype=a.dtype, chunks=True, compression=compression, maxshape=(None,) ) dset.attrs['unit'] = col_unit dset.attrs['is_data'] = False dset.attrs['name'] = col_name tuple_dset_for_axis_name[col_name] = dset self.descriptor[i] = self.group[name].ref else: # This stores the axis values self.group.create_dataset(name, (a.num_points(),), dtype=a.dtype, maxshape=(None,) ) self.group[name].attrs['unstructured'] = False self.group[name][:] = a.points self.group[name].attrs['unit'] = "None" if a.unit is None else a.unit self.group[name].attrs['name'] = a.name self.descriptor[i] = self.group[name].ref # This stores the values taking during the experiment sweeps if self.store_tuples: dset = self.data_group.create_dataset(name, (expected_length,), dtype=a.dtype, chunks=True, compression=compression, maxshape=(None,) ) dset.attrs['unit'] = "None" if a.unit is None else a.unit dset.attrs['is_data'] = False dset.attrs['name'] = name tuple_dset_for_axis_name[name] = dset # Give the reader some warning about the usefulness of these axes self.group[name].attrs['was_refined'] = False if a.metadata is not None: # Create the axis table for the metadata dset = self.group.create_dataset(name + "_metadata", (a.metadata.size,), dtype=np.uint8, maxshape=(None,) ) dset[:] = a.metadata dset = self.group.create_dataset(name + "_metadata_enum", (a.metadata_enum.size,), dtype='S128', maxshape=(None,) ) dset[:] = np.asarray(a.metadata_enum, dtype='S128') # Associate the metadata with the data axis self.group[name].attrs['metadata'] = self.group[name + "_metadata"].ref self.group[name].attrs['metadata_enum'] = self.group[name + "_metadata_enum"].ref self.group[name].attrs['name'] = name + "_metadata" # Create the dataset that stores the individual tuple values if self.store_tuples: dset = self.data_group.create_dataset(name + "_metadata" , (expected_length,), dtype=np.uint8, maxshape=(None,) ) dset.attrs['name'] = name + "_metadata" tuple_dset_for_axis_name[name + "_metadata"] = dset # Write all the tuples if this isn't adaptive if self.store_tuples: if not desc.is_adaptive(): for i, a in enumerate(axis_names): tuple_dset_for_axis_name[a][:] = tuples[a] # Write pointer w_idx = 0 while True: # Wait for all of the acquisition to complete # Against at least some peoples rational expectations, asyncio.wait doesn't return Futures # in the order of the iterable it was passed, but perhaps just in order of completion. So, # we construct a dictionary in order that that can be mapped back where we need them: futures = { asyncio.ensure_future(stream.queue.get()): stream for stream in streams } responses, _ = await asyncio.wait(futures) # Construct the inverse lookup response_for_stream = {futures[res]: res for res in list(responses)} messages = [response_for_stream[stream].result() for stream in streams] # Ensure we aren't getting different types of messages at the same time. message_types = [m['type'] for m in messages] try: if len(set(message_types)) > 1: raise ValueError("Writer received concurrent messages with different message types {}".format([m['type'] for m in messages])) except: import ipdb; ipdb.set_trace() # Infer the type from the first message message_type = messages[0]['type'] # If we receive a message if message_type == 'event': logger.debug('%s "%s" received event of type "%s"', self.__class__.__name__, self.name, message_type) if messages[0]['event_type'] == 'done': break elif messages[0]['event_type'] == 'refined': refined_axis = messages[0]['data'] # Resize the data set num_new_points = desc.num_new_points_through_axis(refined_axis) for stream in streams: dset_for_streams[stream].resize((len(dset_for_streams[streams[0]])+num_new_points,)) if self.store_tuples: for an in axis_names: tuple_dset_for_axis_name[an].resize((len(tuple_dset_for_axis_name[an])+num_new_points,)) # Generally speaking the descriptors are now insufficient to reconstruct # the full set of tuples. The user should know this, so let's mark the # descriptor axes accordingly. self.group[name].attrs['was_refined'] = True elif message_type == 'data': message_data = [message['data'] for message in messages] message_comp = [message['compression'] for message in messages] message_data = [pickle.loads(zlib.decompress(dat)) if comp == 'zlib' else dat for comp, dat in zip(message_comp, message_data)] message_data = [dat if hasattr(dat, 'size') else np.array([dat]) for dat in message_data] # Convert single values to arrays for ii in range(len(message_data)): if not hasattr(message_data[ii], 'size'): message_data[ii] = np.array([message_data[ii]]) message_data[ii] = message_data[ii].flatten() if message_data[ii].size != message_data[0].size: raise ValueError("Writer received data of unequal length.") logger.debug('%s "%s" received %d points', self.__class__.__name__, self.name, message_data[0].size) logger.debug("Now has %d of %d points.", stream.points_taken, stream.num_points()) self.up_to_date = (w_idx == dset_for_streams[streams[0]].len()) # Write the data for s, d in zip(streams, message_data): dset_for_streams[s][w_idx:w_idx+d.size] = d # Write the coordinate tuples if self.store_tuples: if desc.is_adaptive(): tuples = desc.tuples() for axis_name in axis_names: tuple_dset_for_axis_name[axis_name][w_idx:w_idx+d.size] = tuples[axis_name][w_idx:w_idx+d.size] self.file.flush() w_idx += message_data[0].size self.points_taken = w_idx logger.debug("HDF5: Write index at %d", w_idx) logger.debug("HDF5: %s has written %d points", stream.name, w_idx) # If we have gotten all our data and process_data has returned, then we are done! if np.all([v.done() for v in self.input_connectors.values()]): self.finished_processing = True
class Channelizer(Filter): """Digital demodulation and filtering to select a particular frequency multiplexed channel. If an axis name is supplied to `follow_axis` then the filter will demodulate at the freqency `axis_frequency_value - follow_freq_offset` otherwise it will demodulate at `frequency`. Note that the filter coefficients are still calculated with respect to the `frequency` paramter, so it should be chosen accordingly when `follow_axis` is defined.""" sink = InputConnector() source = OutputConnector() follow_axis = Parameter(default="") # Name of the axis to follow follow_freq_offset = FloatParameter(default=0.0) # Offset decimation_factor = IntParameter(value_range=(1, 100), default=4, snap=1) frequency = FloatParameter(value_range=(-10e9, 10e9), increment=1.0e6, default=10e6) bandwidth = FloatParameter(value_range=(0.00, 100e6), increment=0.1e6, default=5e6) def __init__(self, frequency=None, bandwidth=None, decimation_factor=None, follow_axis=None, follow_freq_offset=None, **kwargs): super(Channelizer, self).__init__(**kwargs) if frequency: self.frequency.value = frequency if bandwidth: self.bandwidth.value = bandwidth if decimation_factor: self.decimation_factor.value = decimation_factor if follow_axis: self.follow_axis.value = follow_axis if follow_freq_offset: self.follow_freq_offset.value = follow_freq_offset self.quince_parameters = [ self.decimation_factor, self.frequency, self.bandwidth ] self._phase = 0.0 def final_init(self): self.init_filters(self.frequency.value, self.bandwidth.value) if self.follow_axis.value is not "": desc = self.sink.descriptor axis_num = desc.axis_num(self.follow_axis.value) self.pts_before_freq_update = desc.num_points_through_axis( axis_num + 1) self.pts_before_freq_reset = desc.num_points_through_axis(axis_num) self.demod_freqs = desc.axes[ axis_num].points - self.follow_freq_offset.value self.current_freq = 0 self.update_references(self.current_freq) self.idx = 0 # For storing carryover if getting uneven buffers self.carry = np.zeros(0, dtype=self.output_descriptor.dtype) def update_references(self, frequency): # store decimated reference for mix down # phase_drift = 2j*np.pi*0.5e-6 * (abs(frequency) - 100e6) ref = np.exp(2j * np.pi * -frequency * self.time_pts[::self.d1] + 1j * self._phase, dtype=np.complex64) self.reference = ref self.reference_r = np.real(ref) self.reference_i = np.imag(ref) def init_filters(self, frequency, bandwidth): # convert bandwidth normalized to Nyquist interval n_bandwidth = bandwidth * self.time_step * 2 n_frequency = abs(frequency) * self.time_step * 2 # arbitrarily decide on three stage filter pipeline # 1. first stage decimating filter on real data # 2. second stage decimating filter on mixed product to boost n_bandwidth # 3. final channel selecting filter at n_bandwidth/2 # anecdotally don't decimate more than a factor of eight for stability self.decim_factors = [1] * 3 self.filters = [None] * 3 # first stage decimating filter # maximize first stage decimation: # * minimize subsequent stages time taken # * filter and decimate while signal is still real # * first stage decimation cannot be too large or then 2omega signal from mixing will alias self.d1 = 1 while (self.d1 < 8) and (2 * n_frequency <= 0.8 / self.d1) and ( self.d1 < self.decimation_factor.value): self.d1 *= 2 n_bandwidth *= 2 n_frequency *= 2 if self.d1 > 1: # create an anti-aliasing filter # pass-band to 0.8 * decimation factor; anecdotally single precision needs order <= 4 for stability b, a = scipy.signal.cheby1(4, 3, 0.8 / self.d1) b = np.float32(b) a = np.float32(a) self.decim_factors[0] = self.d1 self.filters[0] = (b, a) # store decimated reference for mix down self.update_references(frequency) # second stage filter to bring n_bandwidth/2 up # decimation cannot be too large or will impinge on channel bandwidth (keep n_bandwidth/2 <= 0.8) self.d2 = 1 while (self.d2 < 8) and ( (self.d1 * self.d2) < self.decimation_factor.value) and (n_bandwidth / 2 <= 0.8): self.d2 *= 2 n_bandwidth *= 2 n_frequency *= 2 if self.d2 > 1: # create an anti-aliasing filter # pass-band to 0.8 * decimation factor; anecdotally single precision needs order <= 4 for stability b, a = scipy.signal.cheby1(4, 3, 0.8 / self.d2) b = np.float32(b) a = np.float32(a) self.decim_factors[1] = self.d2 self.filters[1] = (b, a) # final channel selection filter if n_bandwidth < 0.1: raise ValueError( "Insufficient decimation to achieve stable filter: {}.".format( n_bandwidth)) b, a = scipy.signal.cheby1(4, 3, n_bandwidth / 2) b = np.float32(b) a = np.float32(a) self.decim_factors[2] = self.decimation_factor.value // (self.d1 * self.d2) self.filters[2] = (b, a) def update_descriptors(self): logger.debug( 'Updating Channelizer "%s" descriptors based on input descriptor: %s.', self.name, self.sink.descriptor) # extract record time sampling self.time_pts = self.sink.descriptor.axes[-1].points self.record_length = len(self.time_pts) self.time_step = self.time_pts[1] - self.time_pts[0] logger.debug("Channelizer time_step = {}".format(self.time_step)) # We will be decimating along a time axis, which is always # going to be the last axis given the way we usually take data. # TODO: perform this function along a named axis rather than a numbered axis # in case something about this changes. # update output descriptors decimated_descriptor = DataStreamDescriptor() decimated_descriptor.axes = self.sink.descriptor.axes[:] decimated_descriptor.axes[-1] = deepcopy(self.sink.descriptor.axes[-1]) decimated_descriptor.axes[-1].points = self.sink.descriptor.axes[ -1].points[self.decimation_factor.value - 1::self.decimation_factor.value] decimated_descriptor.axes[ -1].original_points = decimated_descriptor.axes[-1].points decimated_descriptor._exp_src = self.sink.descriptor._exp_src decimated_descriptor.dtype = np.complex64 self.output_descriptor = decimated_descriptor for os in self.source.output_streams: os.set_descriptor(decimated_descriptor) if os.end_connector is not None: os.end_connector.update_descriptors() async def process_data(self, data): # Append any data carried from the last run if self.carry.size > 0: data = np.concatenate((self.carry, data)) # This is the largest number of records we can handle num_records = data.size // self.record_length # This is the carryover that we'll store until next round. # If nothing is left then reset the carryover. remaining_points = data.size % self.record_length if remaining_points > 0: if num_records > 0: self.carry = data[-remaining_points:] data = data[:-remaining_points] else: self.carry = data else: self.carry = np.zeros(0, dtype=self.output_descriptor.dtype) if num_records > 0: # The records are processed in parallel after being reshaped here reshaped_data = np.reshape(data, (num_records, self.record_length), order="C") # Update demodulation frequency if necessary if self.follow_axis.value is not "": freq = self.demod_freqs[(self.idx % self.pts_before_freq_reset) // self.pts_before_freq_update] if freq != self.current_freq: self.update_references(freq) self.current_freq = freq self.idx += data.size # first stage decimating filter if self.filters[0] is None: filtered = reshaped_data else: stacked_coeffs = np.concatenate(self.filters[0]) # filter if np.iscomplexobj(reshaped_data): # TODO: compile complex versions of the IPP functions filtered_r = np.empty_like(reshaped_data, dtype=np.float32) filtered_i = np.empty_like(reshaped_data, dtype=np.float32) libipp.filter_records_iir( stacked_coeffs, self.filters[0][0].size - 1, np.ascontiguousarray( reshaped_data.real.astype(np.float32)), self.record_length, num_records, filtered_r) libipp.filter_records_iir( stacked_coeffs, self.filters[0][0].size - 1, np.ascontiguousarray( reshaped_data.imag.astype(np.float32)), self.record_length, num_records, filtered_i) filtered = filtered_r + 1j * filtered_i # decimate if self.decim_factors[0] > 1: filtered = filtered[:, ::self.decim_factors[0]] else: filtered = np.empty_like(reshaped_data) libipp.filter_records_iir(stacked_coeffs, self.filters[0][0].size - 1, reshaped_data, self.record_length, num_records, filtered) # decimate if self.decim_factors[0] > 1: filtered = filtered[:, ::self.decim_factors[0]] # mix with reference # keep real and imaginary separate for filtering below if np.iscomplexobj(reshaped_data): filtered *= self.reference filtered_r = filtered.real filtered_i = filtered.imag else: filtered_r = self.reference_r * filtered filtered_i = self.reference_i * filtered # channel selection filters for ct in [1, 2]: if self.filters[ct] == None: continue coeffs = self.filters[ct] stacked_coeffs = np.concatenate(self.filters[ct]) out_r = np.empty_like(filtered_r).astype(np.float32) out_i = np.empty_like(filtered_i).astype(np.float32) libipp.filter_records_iir( stacked_coeffs, self.filters[ct][0].size - 1, np.ascontiguousarray(filtered_r.astype(np.float32)), filtered_r.shape[-1], num_records, out_r) libipp.filter_records_iir( stacked_coeffs, self.filters[ct][0].size - 1, np.ascontiguousarray(filtered_i.astype(np.float32)), filtered_i.shape[-1], num_records, out_i) # decimate if self.decim_factors[ct] > 1: filtered_r = np.copy(out_r[:, ::self.decim_factors[ct]], order="C") filtered_i = np.copy(out_i[:, ::self.decim_factors[ct]], order="C") else: filtered_r = out_r filtered_i = out_i filtered = filtered_r + 1j * filtered_i # recover gain from selecting single sideband filtered *= 2 # push to ouptut connectors for os in self.source.output_streams: await os.push(filtered)
class Plotter(Filter): sink = InputConnector() plot_dims = IntParameter(value_range=(0, 1, 2), snap=1, default=0) # 0 means auto plot_mode = Parameter( allowed_values=["real", "imag", "real/imag", "amp/phase", "quad"], default="quad") def __init__(self, *args, name="", plot_dims=None, plot_mode=None, **plot_args): super(Plotter, self).__init__(*args, name=name) if plot_dims: self.plot_dims.value = plot_dims if plot_mode: self.plot_mode.value = plot_mode self.plot_args = plot_args self.full_update_interval = 0.5 self.update_interval = 2.0 # slower for partial updates self.last_update = time.time() self.last_full_update = time.time() self.quince_parameters = [self.plot_dims, self.plot_mode] # This will hold the matplot server self.plot_server = None def desc(self): d = { 'plot_type': 'standard', 'plot_mode': self.plot_mode.value, 'plot_dims': int(self.plot_dims.value), 'x_min': float(min(self.x_values)), 'x_max': float(max(self.x_values)), 'x_len': int(self.descriptor.axes[-1].num_points()), 'x_label': self.axis_label(-1), 'y_label': "{} ({})".format(self.descriptor.data_name, self.descriptor.data_unit) } if self.plot_dims.value == 2: d['y_label'] = self.axis_label(-2) d['data_label'] = "{} ({})".format(self.descriptor.data_name, self.descriptor.data_unit) d['y_min'] = float(min(self.y_values)) d['y_max'] = float(max(self.y_values)) d['y_len'] = int(self.descriptor.axes[-2].num_points()) return d def update_descriptors(self): logger.debug( "Updating Plotter %s descriptors based on input descriptor %s", self.name, self.sink.descriptor) self.stream = self.sink.input_streams[0] self.descriptor = self.sink.descriptor def final_init(self): # Determine the plot dimensions if not self.plot_dims.value: if len(self.descriptor.axes) > 1: self.plot_dims.value = 2 else: self.plot_dims.value = 1 # Check the descriptor axes num_axes = len(self.descriptor.axes) if self.plot_dims.value > num_axes: logger.info( "Cannot plot in more dimensions than there are data axes.") self.plot_dims.value = num_axes if self.plot_dims.value == 1: self.points_before_clear = self.descriptor.axes[-1].num_points() else: self.points_before_clear = self.descriptor.axes[-1].num_points( ) * self.descriptor.axes[-2].num_points() logger.debug("Plot will clear after every %d points.", self.points_before_clear) self.x_values = self.descriptor.axes[-1].points if self.plot_dims.value == 2: self.y_values = self.descriptor.axes[-2].points self.plot_buffer = (np.nan * np.ones(self.points_before_clear)).astype( self.descriptor.dtype) self.idx = 0 def update(self): if self.plot_dims.value == 1: self.plot_server.send(self.name, self.x_values, self.plot_buffer.copy()) elif self.plot_dims.value == 2: self.plot_server.send(self.name, self.x_values, self.y_values, self.plot_buffer.copy()) async def process_data(self, data): # If we get more than enough data, pause to update the plot if necessary if (self.idx + data.size) > self.points_before_clear: spill_over = (self.idx + data.size) % self.points_before_clear if spill_over == 0: spill_over = self.points_before_clear if (time.time() - self.last_full_update >= self.full_update_interval): # If we are getting data quickly, then we can afford to wait # for a full frame before pushing to plot. self.plot_buffer[self.idx:] = data[:(self.points_before_clear - self.idx)] self.update() self.last_full_update = time.time() self.plot_buffer[:] = np.nan self.plot_buffer[:spill_over] = data[-spill_over:] self.idx = spill_over else: # just keep trucking self.plot_buffer[self.idx:self.idx + data.size] = data.flatten() self.idx += data.size if (time.time() - max(self.last_full_update, self.last_update) >= self.update_interval): self.update() self.last_update = time.time() async def on_done(self): if self.plot_dims.value == 1: self.plot_server.send(self.name, self.x_values, self.plot_buffer) elif self.plot_dims.value == 2: self.plot_server.send(self.name, self.x_values, self.y_values, self.plot_buffer) def axis_label(self, index): unit_str = " ({})".format(self.descriptor.axes[index].unit ) if self.descriptor.axes[index].unit else '' return self.descriptor.axes[index].name + unit_str
class MeshPlotter(Filter): sink = InputConnector() plot_mode = Parameter( allowed_values=["real", "imag", "real/imag", "amp/phase", "quad"], default="quad") def __init__(self, *args, name="", plot_mode=None, x_label="", y_label="", **plot_args): super(MeshPlotter, self).__init__(*args, name=name) if plot_mode: self.plot_mode.value = plot_mode self.plot_args = plot_args self.update_interval = 0.5 self.last_update = time.time() self.x_label = x_label self.y_label = y_label self.quince_parameters = [self.plot_mode] # Unique id for plot server self.uuid = None # Should we actually produce plots? self.do_plotting = True def desc(self): d = { 'plot_type': 'mesh', 'plot_mode': self.plot_mode.value, 'x_label': self.x_label, 'y_label': self.y_label } return d def send(self, message): if self.do_plotting: data = message['data'] msg = message['msg'] name = message['name'] msg_contents = [self.uuid.encode(), msg.encode(), name.encode()] # We might be sending multiple axes, series, etc. # Just add them succesively to a multipart message. for dat in data: md = dict( dtype=str(dat.dtype), shape=dat.shape, ) msg_contents.extend( [json.dumps(md).encode(), np.ascontiguousarray(dat)]) self.socket.send_multipart(msg_contents) def update_descriptors(self): logger.info( "Updating MeshPlotter %s descriptors based on input descriptor %s", self.filter_name, self.sink.descriptor) def execute_on_run(self): # Connect to the plot server if self.do_plotting: try: self.context = zmq.Context() self.socket = self.context.socket(zmq.DEALER) self.socket.identity = "Auspex_Experiment".encode() self.socket.connect("tcp://localhost:7762") except: logger.warning( "Exception occured while contacting the plot server. Is it running?" ) def process_direct(self, data): self.send({ 'name': self.filter_name, "msg": "data", 'data': [self.plot_buffer.copy()] }) def on_done(self): self.send({ 'name': self.filter_name, 'data': [np.array([])], "msg": "done" }) if self.do_plotting: self.socket.close() self.context.term()
class Plotter(Filter): sink = InputConnector() plot_dims = IntParameter(value_range=(0, 1, 2), snap=1, default=0) # 0 means auto plot_mode = Parameter( allowed_values=["real", "imag", "real/imag", "amp/phase", "quad"], default="quad") def __init__(self, *args, name="", plot_dims=None, plot_mode=None, **plot_args): super(Plotter, self).__init__(*args, name=name) if plot_dims: self.plot_dims.value = plot_dims if plot_mode: self.plot_mode.value = plot_mode self.plot_args = plot_args self.full_update_interval = 0.5 self.update_interval = 2.0 # slower for partial updates self.last_update = time.time() self.last_full_update = time.time() self._final_buffer = Queue() self.final_buffer = None self.quince_parameters = [self.plot_dims, self.plot_mode] # Unique id for plot server self.uuid = None # Should we actually produce plots? self.do_plotting = True def send(self, message): if self.do_plotting: data = message['data'] msg = message['msg'] name = message['name'] msg_contents = [self.uuid.encode(), msg.encode(), name.encode()] # We might be sending multiple axes, series, etc. # Just add them succesively to a multipart message. for dat in data: md = dict( dtype=str(dat.dtype), shape=dat.shape, ) msg_contents.extend( [json.dumps(md).encode(), np.ascontiguousarray(dat)]) self.socket.send_multipart(msg_contents) def get_final_plot(self, quad_funcs=[np.abs, np.angle]): if not self.done.is_set(): raise Exception( "Cannot get final plot since plotter is not done or was not run." ) from bqplot import LinearScale, ColorScale, ColorAxis, Axis, Lines, Figure, Tooltip, HeatMap from bqplot.toolbar import Toolbar from ipywidgets import VBox, HBox if self.final_buffer is None: self.final_buffer = self._final_buffer.get() if self.plot_dims.value == 2: raise NotImplementedError( "2 dimensional get_final_plot not yet implemented.") elif self.plot_dims.value == 1: figs = [] for quad_func in quad_funcs: sx = LinearScale() sy = LinearScale() ax = Axis(label=self.axis_label(-1), scale=sx) ay = Axis( label= f"{self.descriptor.data_name} ({self.descriptor.data_unit})", scale=sy, orientation='vertical') line = Lines(x=self.x_values, y=quad_func(self.final_buffer), scales={ 'x': sx, 'y': sy }) fig = Figure(marks=[line], axes=[ax, ay], title=self.filter_name) figs.append(fig) if len(figs) <= 2: return HBox(figs) elif len(figs) == 4: return VBox([HBox([figs[0], figs[1]]), HBox([figs[2], figs[3]])]) elif len(figs) == 3 or len(figs) > 4: raise Exception("Please use 1, 2, or 4 quadrature functions.") def desc(self): d = { 'plot_type': 'standard', 'plot_mode': self.plot_mode.value, 'plot_dims': int(self.plot_dims.value), 'x_min': float(min(self.x_values)), 'x_max': float(max(self.x_values)), 'x_len': int(self.descriptor.axes[-1].num_points()), 'x_label': self.axis_label(-1), 'y_label': "{} ({})".format(self.descriptor.data_name, self.descriptor.data_unit) } if self.plot_dims.value == 2: d['y_label'] = self.axis_label(-2) d['data_label'] = "{} ({})".format(self.descriptor.data_name, self.descriptor.data_unit) d['y_min'] = float(min(self.y_values)) d['y_max'] = float(max(self.y_values)) d['y_len'] = int(self.descriptor.axes[-2].num_points()) return d def set_done(self): self.send({ 'name': self.filter_name, 'data': [np.array([])], "msg": "done" }) def set_quit(self): self.send({ 'name': self.filter_name, 'data': [np.array([])], "msg": "quit" }) def update_descriptors(self): logger.debug( "Updating Plotter %s descriptors based on input descriptor %s", self.filter_name, self.sink.descriptor) self.stream = self.sink.input_streams[0] self.descriptor = self.sink.descriptor def final_init(self): # Determine the plot dimensions if not self.plot_dims.value: if len(self.descriptor.axes) > 1: self.plot_dims.value = 2 else: self.plot_dims.value = 1 # Check the descriptor axes num_axes = len(self.descriptor.axes) if self.plot_dims.value > num_axes: logger.info( "Cannot plot in more dimensions than there are data axes.") self.plot_dims.value = num_axes if self.plot_dims.value == 1: self.points_before_clear = self.descriptor.axes[-1].num_points() else: self.points_before_clear = self.descriptor.axes[-1].num_points( ) * self.descriptor.axes[-2].num_points() logger.debug("Plot will clear after every %d points.", self.points_before_clear) self.x_values = self.descriptor.axes[-1].points if self.plot_dims.value == 2: self.y_values = self.descriptor.axes[-2].points #I'm so sorry everyone. Send Julia if 'complex' in np.dtype(self.descriptor.dtype).name: self.plot_buffer = ( np.nan * np.ones(self.points_before_clear) + 1.0j * np.nan * np.ones(self.points_before_clear)).astype( self.descriptor.dtype) else: self.plot_buffer = np.nan * np.ones(self.points_before_clear) self.idx = 0 def execute_on_run(self): # Connect to the plot server if self.do_plotting: try: self.context = zmq.Context() self.socket = self.context.socket(zmq.DEALER) self.socket.identity = f"Auspex_Experiment_{self.filter_name}_{hex(id(self))}".encode( ) self.socket.connect("tcp://localhost:7762") except: logger.warning( "Exception occured while contacting the plot server. Is it running?" ) def update(self): if self.plot_dims.value == 1: self.send({ 'name': self.filter_name, 'msg': 'data', 'data': [self.x_values, self.plot_buffer.copy()] }) elif self.plot_dims.value == 2: self.send({ 'name': self.filter_name, 'msg': 'data', 'data': [self.x_values, self.y_values, self.plot_buffer.copy()] }) def process_data(self, data): # If we get more than enough data, pause to update the plot if necessary if (self.idx + data.size) > self.points_before_clear: spill_over = (self.idx + data.size) % self.points_before_clear if spill_over == 0: spill_over = self.points_before_clear if (time.time() - self.last_full_update >= self.full_update_interval): # If we are getting data quickly, then we can afford to wait # for a full frame before pushing to plot. self.plot_buffer[self.idx:] = data[:(self.points_before_clear - self.idx)] self.update() self.last_full_update = time.time() self.plot_buffer[:] = np.nan self.plot_buffer[:spill_over] = data[-spill_over:] self.idx = spill_over else: # just keep trucking self.plot_buffer[self.idx:self.idx + data.size] = data.flatten() self.idx += data.size if (time.time() - max(self.last_full_update, self.last_update) >= self.update_interval): self.update() self.last_update = time.time() def on_done(self): if self.plot_dims.value == 1: self.send({ 'name': self.filter_name, "msg": "data", 'data': [self.x_values, self.plot_buffer.copy()], }) elif self.plot_dims.value == 2: self.send({ 'name': self.filter_name, "msg": "data", 'data': [self.x_values, self.y_values, self.plot_buffer.copy()] }) self._final_buffer.put(self.plot_buffer) if self.do_plotting: self.set_done() self.socket.close() self.context.term() def axis_label(self, index): unit_str = " ({})".format(self.descriptor.axes[index].unit ) if self.descriptor.axes[index].unit else '' return self.descriptor.axes[index].name + unit_str
class KernelIntegrator(Filter): sink = InputConnector() source = OutputConnector() kernel = Parameter() bias = FloatParameter(default=0.0) simple_kernel = BoolParameter(default=True) box_car_start = FloatParameter(default=0.0) box_car_stop = FloatParameter(default=100e-9) frequency = FloatParameter(default=0.0) """Integrate with a given kernel. Kernel will be padded/truncated to match record length""" def __init__(self, **kwargs): super(KernelIntegrator, self).__init__(**kwargs) self.pre_int_op = None self.post_int_op = None for k, v in kwargs.items(): if hasattr(self, k) and isinstance(getattr(self, k), Parameter): getattr(self, k).value = v if "pre_integration_operation" in kwargs: self.pre_int_op = kwargs["pre_integration_operation"] if "post_integration_operation" in kwargs: self.post_int_op = kwargs["post_integration_operation"] self.quince_parameters = [ self.simple_kernel, self.frequency, self.box_car_start, self.box_car_stop ] def update_descriptors(self): if not self.simple_kernel and self.kernel.value is None: raise ValueError("Integrator was passed kernel None") logger.debug( 'Updating KernelIntegrator "%s" descriptors based on input descriptor: %s.', self.name, self.sink.descriptor) record_length = self.sink.descriptor.axes[-1].num_points() if self.simple_kernel.value: time_pts = self.sink.descriptor.axes[-1].points time_step = time_pts[1] - time_pts[0] kernel = np.zeros(record_length, dtype=np.complex128) sample_start = int(self.box_car_start.value / time_step) sample_stop = int(self.box_car_stop.value / time_step) + 1 kernel[sample_start:sample_stop] = 1.0 # add modulation kernel *= np.exp(2j * np.pi * self.frequency.value * time_step * time_pts) else: kernel = eval(self.kernel.value.encode('unicode_escape')) # pad or truncate the kernel to match the record length if kernel.size < record_length: self.aligned_kernel = np.append( kernel, np.zeros(record_length - kernel.size, dtype=np.complex128)) else: self.aligned_kernel = np.resize(kernel, record_length) # Integrator reduces and removes axis on output stream # update output descriptors output_descriptor = DataStreamDescriptor() # TODO: handle reduction to single point output_descriptor.axes = self.sink.descriptor.axes[:-1] output_descriptor._exp_src = self.sink.descriptor._exp_src output_descriptor.dtype = np.complex128 for os in self.source.output_streams: os.set_descriptor(output_descriptor) os.end_connector.update_descriptors() async def process_data(self, data): # TODO: handle variable partial records if self.pre_int_op: data = self.pre_int_op(data) filtered = np.inner(np.reshape(data, (-1, len(self.aligned_kernel))), self.aligned_kernel) if self.post_int_op: filtered = self.post_int_op(filtered) # push to ouptut connectors for os in self.source.output_streams: await os.push(filtered)