def test_get_on_parameter_marked_as_non_gettable_raises(): a = Parameter("param") a._gettable = False with pytest.raises( TypeError, match="Trying to get a parameter that is not gettable."): a.get()
def test_stepping_from_invalid_starting_point(): the_value = -10 def set_function(value): nonlocal the_value the_value = value def get_function(): return the_value a = Parameter('test', set_cmd=set_function, get_cmd=get_function, vals=Numbers(0, 100), step=5) # We start out by setting the parameter to an # invalid value. This is not possible using initial_value # as the validator will catch that but perhaps this may happen # if the instrument can return out of range values. assert a.get() == -10 with pytest.raises(ValueError): # trying to set to 10 should raise even with 10 valid # as the steps demand that we first step to -5 which is not a.set(10) # afterwards the value should still be the same assert a.get() == -10
def test_set_on_parameter_marked_as_non_settable_raises(): a = Parameter("param", set_cmd=None) a.set(2) assert a.get() == 2 a._settable = False with pytest.raises( TypeError, match="Trying to set a parameter that is not settable."): a.set(1) assert a.get() == 2
def test_scale_and_offset_raw_value_iterable_for_set_cache( values, offsets, scales): p = Parameter(name='test_scale_and_offset_raw_value', set_cmd=None) # test that scale and offset does not change the default behaviour p.cache.set(values) assert p.raw_value == values # test setting scale and offset does not change anything p.scale = scales p.offset = offsets assert p.raw_value == values np_values = np.array(values) np_offsets = np.array(offsets) np_scales = np.array(scales) np_get_latest_values = np.array(p.get_latest()) # Without a call to ``get``, ``get_latest`` will just return old # cached values without applying the set scale and offset np.testing.assert_allclose(np_get_latest_values, np_values) np_get_values = np.array(p.get()) # Now that ``get`` is called, the returned values are the result of # application of the scale and offset. Obviously, calling # ``get_latest`` now will also return the values with the applied # scale and offset np.testing.assert_allclose(np_get_values, (np_values - np_offsets) / np_scales) np_get_latest_values_after_get = np.array(p.get_latest()) np.testing.assert_allclose(np_get_latest_values_after_get, (np_values - np_offsets) / np_scales) # test ``cache.set`` for scalar values if not isinstance(values, Iterable): p.cache.set(values) np.testing.assert_allclose(np.array(p.raw_value), np_values * np_scales + np_offsets) # No set/get cmd performed # testing conversion back and forth p.cache.set(values) np_get_latest_values = np.array(p.get_latest()) # No set/get cmd performed np.testing.assert_allclose(np_get_latest_values, np_values) # adding statistics if isinstance(offsets, Iterable): event('Offset is array') if isinstance(scales, Iterable): event('Scale is array') if isinstance(values, Iterable): event('Value is array') if isinstance(scales, Iterable) and isinstance(offsets, Iterable): event('Scale is array and also offset') if isinstance(scales, Iterable) and not isinstance(offsets, Iterable): event('Scale is array but not offset')
def test_set_via_function(): # not a use case we want to promote, but it's there... p = Parameter('test', get_cmd=None, set_cmd=None) def doubler(x): p.set(x * 2) f = Function('f', call_cmd=doubler, args=[vals.Numbers(-10, 10)]) f(4) assert p.get() == 8 with pytest.raises(ValueError): f(20)
def test_get_cache_no_get(): """ Test that cache.get on a parameter that does not have get is handled correctly. """ local_parameter = Parameter('test_param', set_cmd=None, get_cmd=False) # The parameter does not have a get method. with pytest.raises(AttributeError): local_parameter.get() # get_latest will fail as get cannot be called and no cache # is available with pytest.raises(RuntimeError): local_parameter.cache.get() value = 1 local_parameter.set(value) assert local_parameter.cache.get() == value local_parameter2 = Parameter('test_param2', set_cmd=None, get_cmd=False, initial_value=value) with pytest.raises(AttributeError): local_parameter2.get() assert local_parameter2.cache.get() == value
def array_in_str_dataset(experiment, request): meas = Measurement() scalar_param = Parameter('textparam', set_cmd=None) param = ArraySetPointParam() meas.register_parameter(scalar_param, paramtype='text') meas.register_parameter(param, setpoints=(scalar_param, ), paramtype=request.param) with meas.run() as datasaver: for i in ['A', 'B', 'C']: scalar_param.set(i) datasaver.add_result((scalar_param, scalar_param.get()), (param, param.get())) try: yield datasaver.dataset finally: datasaver.dataset.conn.close()
def array_in_scalar_dataset(experiment): meas = Measurement() scalar_param = Parameter('scalarparam', set_cmd=None) param = ArraySetPointParam() meas.register_parameter(scalar_param) meas.register_parameter(param, setpoints=(scalar_param, ), paramtype='array') with meas.run() as datasaver: for i in range(1, 10): scalar_param.set(i) datasaver.add_result((scalar_param, scalar_param.get()), (param, param.get())) try: yield datasaver.dataset finally: datasaver.dataset.conn.close()
def test_gettable(): mem = ParameterMemory() p = Parameter('p', get_cmd=mem.get) mem.set(21) assert p() == 21 assert p.get() == 21 with pytest.raises(NotImplementedError): p(10) assert hasattr(p, 'get') assert p.gettable assert not hasattr(p, 'set') assert not p.settable p.cache.set(7) assert p.get_latest() == 7 # Nothing has been passed to the "instrument" at ``cache.set`` # call, hence the following assertions should hold assert mem.get() == 21 assert p() == 21 assert p.get_latest() == 21
def array_in_scalar_dataset_unrolled(experiment): """ This fixture yields a dataset where an array-valued parameter is registered as a 'numeric' type and has an additional single-valued setpoint. We expect data to be saved as individual scalars, with the scalar setpoint repeated. """ meas = Measurement() scalar_param = Parameter('scalarparam', set_cmd=None) param = ArraySetPointParam() meas.register_parameter(scalar_param) meas.register_parameter(param, setpoints=(scalar_param, ), paramtype='numeric') with meas.run() as datasaver: for i in range(1, 10): scalar_param.set(i) datasaver.add_result((scalar_param, scalar_param.get()), (param, param.get())) try: yield datasaver.dataset finally: datasaver.dataset.conn.close()
class AMI430_3D(Instrument): def __init__( self, name: str, instrument_x: Union[AMI430, str], instrument_y: Union[AMI430, str], instrument_z: Union[AMI430, str], field_limit: Union[numbers.Real, Iterable[CartesianFieldLimitFunction]], **kwargs: Any, ): """ Driver for controlling three American Magnetics Model 430 magnet power supplies simultaneously for setting magnetic field vectors. The individual magnet power supplies can be passed in as either instances of AMI430 driver or as names of existing AMI430 instances. In the latter case, the instances will be found via the passed names. Args: name: a name for the instrument instrument_x: AMI430 instance or a names of existing AMI430 instance for controlling the X axis of magnetic field instrument_y: AMI430 instance or a names of existing AMI430 instance for controlling the Y axis of magnetic field instrument_z: AMI430 instance or a names of existing AMI430 instance for controlling the Z axis of magnetic field field_limit: a number for maximum allows magnetic field or an iterable of callable field limit functions that define region(s) of allowed values in 3D magnetic field space """ super().__init__(name, **kwargs) if not isinstance(name, str): raise ValueError("Name should be a string") for instrument, arg_name in zip( (instrument_x, instrument_y, instrument_z), ("instrument_x", "instrument_y", "instrument_z"), ): if not isinstance(instrument, (AMI430, str)): raise ValueError( f"Instruments need to be instances of the class AMI430 " f"or be valid names of already instantiated instances " f"of AMI430 class; {arg_name} argument is " f"neither of those" ) def find_ami430_with_name(ami430_name: str) -> AMI430: found_ami430 = AMI430.find_instrument( name=ami430_name, instrument_class=AMI430 ) return found_ami430 self._instrument_x = ( instrument_x if isinstance(instrument_x, AMI430) else find_ami430_with_name(instrument_x) ) self._instrument_y = ( instrument_y if isinstance(instrument_y, AMI430) else find_ami430_with_name(instrument_y) ) self._instrument_z = ( instrument_z if isinstance(instrument_z, AMI430) else find_ami430_with_name(instrument_z) ) self._field_limit: Union[float, Iterable[CartesianFieldLimitFunction]] if isinstance(field_limit, collections.abc.Iterable): self._field_limit = field_limit elif isinstance(field_limit, numbers.Real): # Conversion to float makes related driver logic simpler self._field_limit = float(field_limit) else: raise ValueError( "field limit should either be a number or " "an iterable of callable field limit functions." ) self._set_point = FieldVector( x=self._instrument_x.field(), y=self._instrument_y.field(), z=self._instrument_z.field(), ) # Get-only parameters that return a measured value self.add_parameter( "cartesian_measured", get_cmd=partial(self._get_measured, "x", "y", "z"), unit="T", ) self.add_parameter( "x_measured", get_cmd=partial(self._get_measured, "x"), unit="T" ) self.add_parameter( "y_measured", get_cmd=partial(self._get_measured, "y"), unit="T" ) self.add_parameter( "z_measured", get_cmd=partial(self._get_measured, "z"), unit="T" ) self.add_parameter( "spherical_measured", get_cmd=partial(self._get_measured, "r", "theta", "phi"), unit="T", ) self.add_parameter( "phi_measured", get_cmd=partial(self._get_measured, "phi"), unit="deg" ) self.add_parameter( "theta_measured", get_cmd=partial(self._get_measured, "theta"), unit="deg" ) self.add_parameter( "field_measured", get_cmd=partial(self._get_measured, "r"), unit="T" ) self.add_parameter( "cylindrical_measured", get_cmd=partial(self._get_measured, "rho", "phi", "z"), unit="T", ) self.add_parameter( "rho_measured", get_cmd=partial(self._get_measured, "rho"), unit="T" ) # Get and set parameters for the set points of the coordinates self.add_parameter( "cartesian", get_cmd=partial(self._get_setpoints, ("x", "y", "z")), set_cmd=partial(self._set_setpoints, ("x", "y", "z")), unit="T", vals=Anything(), ) self.add_parameter( "x", get_cmd=partial(self._get_setpoints, ("x",)), set_cmd=partial(self._set_setpoints, ("x",)), unit="T", vals=Numbers(), ) self.add_parameter( "y", get_cmd=partial(self._get_setpoints, ("y",)), set_cmd=partial(self._set_setpoints, ("y",)), unit="T", vals=Numbers(), ) self.add_parameter( "z", get_cmd=partial(self._get_setpoints, ("z",)), set_cmd=partial(self._set_setpoints, ("z",)), unit="T", vals=Numbers(), ) self.add_parameter( "spherical", get_cmd=partial(self._get_setpoints, ("r", "theta", "phi")), set_cmd=partial(self._set_setpoints, ("r", "theta", "phi")), unit="tuple?", vals=Anything(), ) self.add_parameter( "phi", get_cmd=partial(self._get_setpoints, ("phi",)), set_cmd=partial(self._set_setpoints, ("phi",)), unit="deg", vals=Numbers(), ) self.add_parameter( "theta", get_cmd=partial(self._get_setpoints, ("theta",)), set_cmd=partial(self._set_setpoints, ("theta",)), unit="deg", vals=Numbers(), ) self.add_parameter( "field", get_cmd=partial(self._get_setpoints, ("r",)), set_cmd=partial(self._set_setpoints, ("r",)), unit="T", vals=Numbers(), ) self.add_parameter( "cylindrical", get_cmd=partial(self._get_setpoints, ("rho", "phi", "z")), set_cmd=partial(self._set_setpoints, ("rho", "phi", "z")), unit="tuple?", vals=Anything(), ) self.add_parameter( "rho", get_cmd=partial(self._get_setpoints, ("rho",)), set_cmd=partial(self._set_setpoints, ("rho",)), unit="T", vals=Numbers(), ) self.add_parameter( "block_during_ramp", set_cmd=None, initial_value=True, unit="", vals=Bool() ) self.ramp_mode = Parameter( name="ramp_mode", instrument=self, get_cmd=None, set_cmd=None, vals=Enum("default", "simultaneous"), initial_value="default", ) self.ramping_state_check_interval = Parameter( name="ramping_state_check_interval", instrument=self, initial_value=0.05, unit="s", vals=Numbers(0, 10), set_cmd=None, get_cmd=None, ) self.vector_ramp_rate = Parameter( name="vector_ramp_rate", instrument=self, unit="T/s", vals=Numbers(min_value=0.0), set_cmd=None, get_cmd=None, set_parser=self._set_vector_ramp_rate_units, docstring="Ramp rate along a line (vector) in 3D space. Only active" " if `ramp_mode='simultaneous'`.", ) """Ramp rate along a line (vector) in 3D field space""" self._exit_stack = ExitStack() def _set_vector_ramp_rate_units(self, val: float) -> float: _, common_ramp_rate_units = self._raise_if_not_same_field_and_ramp_rate_units() self.vector_ramp_rate.unit = common_ramp_rate_units return val def ramp_simultaneously(self, setpoint: FieldVector, duration: float) -> None: """ Ramp all axes simultaneously to the given setpoint and in the given time The method calculates and sets the required ramp rates per magnet axis, and then initiates a ramp simultaneously on all the axes. The trajectory of the tip of the magnetic field vector is thus linear in 3D space, from the current field value to the setpoint. If ``block_during_ramp`` parameter is ``True``, the method will block until all axes finished ramping. If ``block_during_ramp`` parameter is ``True``, the ramp rates of individual magnet axes will be restored after the end of the ramp to their original values before the call of this method. If ``block_during_ramp`` parameter is ``False``, call the ``wait_while_all_axes_ramping`` method when needed to restore the ramp rates of the individual magnet axes. It is required for all axis instruments to have the same units for ramp rate and field, otherwise an exception is raised. The given setpoint and time are assumed to be in those common units. Args: setpoint: ``FieldVector`` setpoint duration: time in which the setpoint field has to be reached on all axes """ ( common_field_units, common_ramp_rate_units, ) = self._raise_if_not_same_field_and_ramp_rate_units() self.log.debug( f"Simultaneous ramp: setpoint {setpoint.repr_cartesian()} " f"{common_field_units} in {duration} {common_ramp_rate_units}" ) # Get starting field value start_field = self._get_measured_field_vector() self.log.debug( f"Simultaneous ramp: start {start_field.repr_cartesian()} " f"{common_field_units}" ) self.log.debug( f"Simultaneous ramp: delta {(setpoint - start_field).repr_cartesian()} " f"{common_field_units}" ) # Calculate new vector ramp rate based on time and setpoint vector_ramp_rate = self.calculate_vector_ramp_rate_from_duration( start=start_field, setpoint=setpoint, duration=duration ) self.vector_ramp_rate(vector_ramp_rate) self.log.debug( f"Simultaneous ramp: new vector ramp rate for {self.full_name} " f"is {vector_ramp_rate} {common_ramp_rate_units}" ) # Launch the simultaneous ramp self.ramp_mode("simultaneous") self.cartesian(setpoint.get_components("x", "y", "z")) @staticmethod def calculate_axes_ramp_rates_for( start: FieldVector, setpoint: FieldVector, duration: float ) -> Tuple[float, float, float]: """ Given starting and setpoint fields and expected ramp time calculates required ramp rates for x, y, z axes (in this order) where axes are ramped simultaneously. """ vector_ramp_rate = AMI430_3D.calculate_vector_ramp_rate_from_duration( start, setpoint, duration ) return AMI430_3D.calculate_axes_ramp_rates_from_vector_ramp_rate( start, setpoint, vector_ramp_rate ) @staticmethod def calculate_vector_ramp_rate_from_duration( start: FieldVector, setpoint: FieldVector, duration: float ) -> float: return setpoint.distance(start) / duration @staticmethod def calculate_axes_ramp_rates_from_vector_ramp_rate( start: FieldVector, setpoint: FieldVector, vector_ramp_rate: float ) -> Tuple[float, float, float]: delta_field = setpoint - start ramp_rate_3d = delta_field / delta_field.norm() * vector_ramp_rate return abs(ramp_rate_3d["x"]), abs(ramp_rate_3d["y"]), abs(ramp_rate_3d["z"]) def _raise_if_not_same_field_and_ramp_rate_units(self) -> Tuple[str, str]: instruments = (self._instrument_x, self._instrument_y, self._instrument_z) field_units_of_instruments = defaultdict(set) ramp_rate_units_of_instruments = defaultdict(set) for instrument in instruments: ramp_rate_units_of_instruments[instrument.ramp_rate_units.cache.get()].add( instrument.full_name ) field_units_of_instruments[instrument.field_units.cache.get()].add( instrument.full_name ) if len(field_units_of_instruments) != 1: raise ValueError( f"Magnet axes instruments should have the same " f"`field_units`, instead they have: " f"{field_units_of_instruments}" ) if len(ramp_rate_units_of_instruments) != 1: raise ValueError( f"Magnet axes instruments should have the same " f"`ramp_rate_units`, instead they have: " f"{ramp_rate_units_of_instruments}" ) common_field_units = tuple(field_units_of_instruments.keys())[0] common_ramp_rate_units = tuple(ramp_rate_units_of_instruments.keys())[0] return common_field_units, common_ramp_rate_units def _verify_safe_setpoint( self, setpoint_values: Tuple[float, float, float] ) -> bool: if isinstance(self._field_limit, (int, float)): return bool(np.linalg.norm(setpoint_values) < self._field_limit) answer = any( [limit_function(*setpoint_values) for limit_function in self._field_limit] ) return answer def _adjust_child_instruments(self, values: Tuple[float, float, float]) -> None: """ Set the fields of the x/y/z magnets. This function is called whenever the field is changed and performs several safety checks to make sure no limits are exceeded. Args: values: a tuple of cartesian coordinates (x, y, z). """ self.log.debug("Checking whether fields can be set") # Check if exceeding the global field limit if not self._verify_safe_setpoint(values): raise ValueError("_set_fields aborted; field would exceed limit") # Check if the individual instruments are ready for name, value in zip(["x", "y", "z"], values): instrument = getattr(self, f"_instrument_{name}") if instrument.ramping_state() == "ramping": msg = "_set_fields aborted; magnet {} is already ramping" raise AMI430Exception(msg.format(instrument)) # Now that we know we can proceed, call the individual instruments self.log.debug("Field values OK, proceeding") if self.ramp_mode() == "simultaneous": self._perform_simultaneous_ramp(values) else: self._perform_default_ramp(values) def _update_individual_axes_ramp_rates( self, values: Tuple[float, float, float] ) -> None: if self.vector_ramp_rate() is None or self.vector_ramp_rate() == 0: raise ValueError( "The value of the `vector_ramp_rate` Parameter is " "currently None or 0. Set it to an appropriate " "value to use the simultaneous ramping feature." ) new_axes_ramp_rates = self.calculate_axes_ramp_rates_from_vector_ramp_rate( start=self._get_measured_field_vector(), setpoint=FieldVector(x=values[0], y=values[1], z=values[2]), vector_ramp_rate=self.vector_ramp_rate.get(), ) instruments = (self._instrument_x, self._instrument_y, self._instrument_z) for instrument, new_axis_ramp_rate in zip(instruments, new_axes_ramp_rates): instrument.ramp_rate.set(new_axis_ramp_rate) self.log.debug( f"Simultaneous ramp: new rate for {instrument.full_name} " f"is {new_axis_ramp_rate} {instrument.ramp_rate.unit}" ) def _perform_simultaneous_ramp(self, values: Tuple[float, float, float]) -> None: self._prepare_to_restore_individual_axes_ramp_rates() self._update_individual_axes_ramp_rates(values) axes = (self._instrument_x, self._instrument_y, self._instrument_z) for axis_instrument, value in zip(axes, values): current_actual = axis_instrument.field() # If the new set point is practically equal to the # current one then do nothing if np.isclose(value, current_actual, rtol=0, atol=1e-8): self.log.debug( f"Simultaneous ramp: {axis_instrument.short_name} is " f"already at target field {value} " f"{axis_instrument.field.unit} " f"({current_actual} exactly)" ) continue self.log.debug( f"Simultaneous ramp: setting {axis_instrument.short_name} " f"target field to {value} {axis_instrument.field.unit}" ) axis_instrument.set_field(value, perform_safety_check=False, block=False) if self.block_during_ramp() is True: self.log.debug(f"Simultaneous ramp: blocking until ramp is finished") self.wait_while_all_axes_ramping() else: self.log.debug("Simultaneous ramp: not blocking until ramp is finished") self.log.debug(f"Simultaneous ramp: returning from the ramp call") def _perform_default_ramp(self, values: Tuple[float, float, float]) -> None: operators: Tuple[Callable[[Any, Any], bool], ...] = (np.less, np.greater) for operator in operators: # First ramp the coils that are decreasing in field strength. # This will ensure that we are always in a safe region as # far as the quenching of the magnets is concerned for name, value in zip(["x", "y", "z"], values): instrument = getattr(self, f"_instrument_{name}") current_actual = instrument.field() # If the new set point is practically equal to the # current one then do nothing if np.isclose(value, current_actual, rtol=0, atol=1e-8): continue # evaluate if the new set point is smaller or larger # than the current value if not operator(abs(value), abs(current_actual)): continue instrument.set_field( value, perform_safety_check=False, block=self.block_during_ramp.get(), ) def _prepare_to_restore_individual_axes_ramp_rates(self) -> None: for instrument in (self._instrument_x, self._instrument_y, self._instrument_z): self._exit_stack.enter_context(instrument.ramp_rate.restore_at_exit()) self._exit_stack.callback( self.log.debug, "Restoring individual axes ramp rates", ) def wait_while_all_axes_ramping(self) -> None: """ Wait and blocks as long as any magnet axis is ramping. After the ramping is finished, also resets the individual ramp rates of the magnet axes if those were made to be restored, e.g. by using ``simultaneous`` ramp mode. """ while self.any_axis_is_ramping(): self._instrument_x._sleep(self.ramping_state_check_interval.get()) self._exit_stack.close() def any_axis_is_ramping(self) -> bool: """ Returns True if any of the magnet axes are currently ramping, or False if none of the axes are ramping. """ return any( axis_instrument.ramping_state() == "ramping" for axis_instrument in ( self._instrument_x, self._instrument_y, self._instrument_z, ) ) def pause(self) -> None: """Pause all magnet axes.""" for axis_instrument in ( self._instrument_x, self._instrument_y, self._instrument_z, ): axis_instrument.pause() def _request_field_change(self, instrument: AMI430, value: numbers.Real) -> None: """ This method is called by the child x/y/z magnets if they are set individually. It results in additional safety checks being performed by this 3D driver. """ if instrument is self._instrument_x: self._set_x(value) elif instrument is self._instrument_y: self._set_y(value) elif instrument is self._instrument_z: self._set_z(value) else: msg = "This magnet doesnt belong to its specified parent {}" raise NameError(msg.format(self)) def _get_measured_field_vector(self) -> FieldVector: return FieldVector( x=self._instrument_x.field(), y=self._instrument_y.field(), z=self._instrument_z.field(), ) def _get_measured(self, *names: str) -> Union[numbers.Real, List[numbers.Real]]: measured_field_vector = self._get_measured_field_vector() measured_values = measured_field_vector.get_components(*names) # Convert angles from radians to degrees d = dict(zip(names, measured_values)) # Do not do "return list(d.values())", because then there is # no guaranty that the order in which the values are returned # is the same as the original intention return_value = [d[name] for name in names] if len(names) == 1: return_value = return_value[0] return return_value def _get_setpoints( self, names: Sequence[str] ) -> Union[numbers.Real, List[numbers.Real]]: measured_values = self._set_point.get_components(*names) # Convert angles from radians to degrees d = dict(zip(names, measured_values)) return_value = [d[name] for name in names] # Do not do "return list(d.values())", because then there is # no guarantee that the order in which the values are returned # is the same as the original intention if len(names) == 1: return_value = return_value[0] return return_value def _set_setpoints(self, names: Sequence[str], values: Sequence[float]) -> None: kwargs = dict(zip(names, np.atleast_1d(values))) set_point = FieldVector() set_point.copy(self._set_point) if len(kwargs) == 3: set_point.set_vector(**kwargs) else: set_point.set_component(**kwargs) self._adjust_child_instruments(set_point.get_components("x", "y", "z")) self._set_point = set_point