class PysmurfController: """ Controller object for running pysmurf scripts and functions. Args: agent (ocs.ocs_agent.OCSAgent): OCSAgent object which is running args (Namespace): argparse namespace with site_config and agent specific arguments Attributes: agent (ocs.ocs_agent.OCSAgent): OCSAgent object which is running log (txaio.tx.Logger): txaio logger object created by agent prot (PysmurfScriptProtocol): protocol used to call and monitor external pysmurf scripts lock (ocs.ocs_twisted.TimeoutLock): lock to protect multiple pysmurf scripts from running simultaneously. slot (int): ATCA Slot of the smurf-card this agent is commanding. """ def __init__(self, agent, args): self.agent: ocs_agent.OCSAgent = agent self.log = agent.log self.prot = None self.lock = TimeoutLock() self.current_session = None self.slot = args.slot if self.slot is None: self.slot = os.environ['SLOT'] if args.monitor_id is not None: self.agent.subscribe_on_start( self._on_session_data, 'observatory.{}.feeds.pysmurf_session_data'.format( args.monitor_id), ) def _on_session_data(self, _data): data, feed = _data if self.current_session is not None: if data['id'] == os.environ.get("SMURFPUB_ID"): if data['type'] == 'session_data': if isinstance(data['payload'], dict): self.current_session.data.update(data['payload']) else: self.log.warn( "Session data not passed as a dict!! Skipping...") elif data['type'] == 'session_log': if isinstance(data['payload'], str): self.current_session.add_message(data['payload']) def _get_smurf_control(self, session=None, load_tune=True, **kwargs): """ Gets pysmurf and det-config instances for sodetlib functions. """ cfg = DetConfig() cfg.load_config_files(slot=self.slot) S = cfg.get_smurf_control(**kwargs) if load_tune: S.load_tune(cfg.dev.exp['tunefile']) S._ocs_session = session return S, cfg @inlineCallbacks def _run_script(self, script, args, log, session): """ Runs a pysmurf control script. Can only run from the reactor. Args: script (string): path to the script you wish to run args (list, optional): List of command line arguments to pass to the script. Defaults to []. log (string or bool, optional): Determines if and how the process's stdout should be logged. You can pass the path to a logfile, True to use the agent's log, or False to not log at all. """ with self.lock.acquire_timeout(0, job=script) as acquired: if not acquired: return False, "The requested script cannot be run because " \ "script {} is already running".format(self.lock.job) self.current_session = session try: # IO is not really safe from the reactor thread, so we possibly # need to find another way to do this if people use it and it # causes problems... logger = None if isinstance(log, str): self.log.info("Logging output to file {}".format(log)) log_file = yield threads.deferToThread(open, log, 'a') logger = Logger( observer=FileLogObserver(log_file, log_formatter)) elif log: # If log==True, use agent's logger logger = self.log self.prot = PysmurfScriptProtocol(script, log=logger) self.prot.deferred = Deferred() python_exec = sys.executable cmd = [python_exec, '-u', script] + list(map(str, args)) self.log.info("{exec}, {cmd}", exec=python_exec, cmd=cmd) reactor.spawnProcess(self.prot, python_exec, cmd, env=os.environ) rc = yield self.prot.deferred return (rc == 0 ), "Script has finished with exit code {}".format(rc) finally: # Sleep to allow any remaining messages to be put into the # session var yield dsleep(1.0) self.current_session = None @inlineCallbacks def run(self, session, params=None): """run(script, args=[], log=True) **Task** - Runs a pysmurf control script. Parameters: script (string): Path of the pysmurf script to run. args (list, optional): List of command line arguments to pass to the script. Defaults to []. log (string/bool, optional): Determines if and how the process's stdout should be logged. You can pass the path to a logfile, True to use the agent's log, or False to not log at all. Notes: Data and logs may be passed from the pysmurf control script to the session object by publishing it via the Pysmurf Publisher using the message types ``session_data`` and ``session_logs`` respectively. For example, below is a simple script which starts the data stream and returns the datfile path and the list of active channels to the session:: active_channels = S.which_on(0) datafile = S.stream_data_on() S.pub.publish({ 'datafile': datafile, 'active_channels': active_channels }, msgtype='session_data') This would result in the following session.data object:: >>> response.session['data'] { 'datafile': '/data/smurf_data/20200316/1584401673/outputs/1584402020.dat', 'active_channels': [0,1,2,3,4] } """ ok, msg = yield self._run_script(params['script'], params.get('args', []), params.get('log', True), session) return ok, msg def abort(self, session, params=None): """abort() **Task** - Aborts the actively running script. """ self.prot.transport.signalProcess('KILL') return True, "Aborting process" @ocs_agent.param('poll_interval', type=float, default=10) def check_state(self, session, params=None): """check_state(poll_interval=10) **Process** - Continuously checks the current state of the smurf. This will not modify the smurf state, so this task can be run in conjunction with other smurf operations. This will continuously poll smurf metadata and update the ``session.data`` object. Args ----- poll_interval : float Time (sec) between updates. Notes ------- The following data will be written to the session.data object:: >> response.session['data'] { 'channel_mask': Array of channels that are streaming data, 'downsample_factor': downsample_factor, 'agg_time': Buffer time per G3Frame (sec), 'open_g3stream': True if data is currently streaming to G3, 'pysmurf_action': Current pysmurf action, 'pysmurf_action_timestamp': Current pysmurf-action timestamp, 'stream_tag': stream-tag for the current g3 stream, 'last_update': Time that session-data was last updated, 'stream_id': Stream-id of the controlled smurf instance } """ S, cfg = self._get_smurf_control(load_tune=False, no_dir=True) reg = sdl.Registers(S) session.set_status('running') kw = {'retry_on_fail': False} while session.status in ['starting', 'running']: try: d = dict( channel_mask=S.get_channel_mask(**kw).tolist(), downsample_factor=S.get_downsample_factor(**kw), agg_time=reg.agg_time.get(**kw), open_g3stream=reg.open_g3stream.get(**kw), pysmurf_action=reg.pysmurf_action.get(**kw, as_string=True), pysmurf_action_timestamp=reg.pysmurf_action_timestamp.get( **kw), stream_tag=reg.stream_tag.get(**kw, as_string=True), last_update=time.time(), stream_id=cfg.stream_id, ) session.data.update(d) except RuntimeError: self.log.warn("Could not connect to epics server! Waiting and " "then trying again") time.sleep(params['poll_interval']) return True, "Finished checking state" def _stop_check_state(self, session, params): """Stopper for check state process""" session.set_status('stopping') @ocs_agent.param("duration", default=None, type=float) @ocs_agent.param('kwargs', default=None) @ocs_agent.param('load_tune', default=True, type=bool) def stream(self, session, params): """stream(duration=None) **Process** - Stream smurf data. If a duration is specified, stream will end after that amount of time. If unspecified, the stream will run until the stop function is called. Args ----- duration : float, optional If set, determines how many seconds to stream data. By default, will leave stream open until stop function is called. kwargs : dict A dictionary containing additional keyword arguments to pass to sodetlib's ``stream_g3_on`` function load_tune : bool If true, will load a tune-file to the pysmurf object on instantiation. Notes ------ The following data will be written to the session.data object:: >> response.session['data'] { 'stream_id': Stream-id for the slot, 'sid': Session-id for the streaming session, } """ if params['kwargs'] is None: params['kwargs'] = {} with self.lock.acquire_timeout(0, job='stream') as acquired: if not acquired: return False, f"Operation failed: {self.lock.job} is running." S, cfg = self._get_smurf_control(session=session, load_tune=params['load_tune']) stop_time = None if params['duration'] is not None: stop_time = time.time() + params['duration'] session.data['stream_id'] = cfg.stream_id session.data['sid'] = sdl.stream_g3_on(S, **params['kwargs']) session.set_status('running') while session.status in ['starting', 'running']: if stop_time is not None: if time.time() > stop_time: break time.sleep(1) sdl.stream_g3_off(S) return True, 'Finished streaming data' def _stream_stop(self, session, params=None): session.set_status('stopping') return True, "Requesting to end stream" @ocs_agent.param('bands', default=None) @ocs_agent.param('kwargs', default=None) def uxm_setup(self, session, params): """uxm_setup(bands=None, kwargs=None) **Task** - Runs first-time setup procedure for a UXM. This will run the following operations: 1. Setup Amps (~1 min) 2. Estimate attens if attens are not already set in the device cfg (~1 min / band) 3. Estimate phase delay (~1 min / band) 4. Setup tune (~7 min / band) 5. Setup tracking param (~30s / band) 6. Measure noise (~45s) See the `sodetlib setup docs <https://simons1.princeton.edu/docs/sodetlib/operations/setup.html#first-time-setup>`_ for more information on the sodetlib setup procedure and allowed keyword arguments. Args ----- bands : list, int Bands to set up. Defaults to all. kwargs : dict Dict containing additional keyword args to pass to the uxm_setup function. Notes ------- SODETLIB functions such as ``uxm_setup`` and any other functions called by ``uxm_setup`` will add relevant data to the ``session.data`` object to a unique key. For example, if all is successful ``session.data`` may look like:: >> response.session['data'] { 'timestamps': [('setup_amps', 1651162263.0204525), ...], 'setup_amps_summary': { 'success': True, 'amp_50k_Id': 15.0, 'amp_hemt_Id': 8.0, 'amp_50k_Vg': -0.52, 'amp_hemt_Vg': -0.829, }, 'setup_phase_delay': { 'bands': [0, 1, ...] 'band_delay_us': List of band delays }, 'noise': { 'band_medians': List of median white noise for each band } } """ if params['kwargs'] is None: params['kwargs'] = {} with self.lock.acquire_timeout(0, job='uxm_setup') as acquired: if not acquired: return False, f"Operation failed: {self.lock.job} is running." S, cfg = self._get_smurf_control(session=session) session.set_status('running') self.log.info("Starting UXM setup") success, summary = uxm_setup.uxm_setup(S, cfg, bands=params['bands'], **params['kwargs']) if not success: final_step = session.data['timestamps'][-1][0] return False, f"Failed on step {final_step}" else: return True, "Completed full UXM-Setup procedure" @ocs_agent.param('bands', default=None) @ocs_agent.param('kwargs', default=None) def uxm_relock(self, session, params): """uxm_relock(bands=None, kwargs=None) **Task** - Relocks detectors to existing tune if setup has already been run. Runs the following operations: 1. Setup Amps (~1 min) 2. Relocks detectors, setup notches (if requested), and serial gradient descent / eta scan (~5 min / band) 3. Tracking setup (~20s / band) 4. Noise check (~45s) See the `sodetlib relock docs <https://simons1.princeton.edu/docs/sodetlib/operations/setup.html#relocking>`_ for more information on the sodetlib relock procedure and allowed keyword arguments. Args ----- bands : list, int Bands to set up. Defaults to all. kwargs : dict Dict containing additional keyword args to pass to the uxm_relock function. Notes ------- SODETLIB functions such as ``uxm_relock`` and any other functions called will add relevant data to the ``session.data`` object to a unique key. For example, if all is successful ``session.data`` may look like:: >> response.session['data'] { 'timestamps': [('setup_amps', 1651162263.0204525), ...], 'setup_amps_summary': { 'success': True, 'amp_50k_Id': 15.0, 'amp_hemt_Id': 8.0, 'amp_50k_Vg': -0.52, 'amp_hemt_Vg': -0.829, }, 'noise': { 'band_medians': List of median white noise for each band } } """ if params['kwargs'] is None: params['kwargs'] = {} with self.lock.acquire_timeout(0, job='uxm_relock') as acquired: if not acquired: return False, f"Operation failed: {self.lock.job} is running." session.set_status('running') S, cfg = self._get_smurf_control(session=session) success, summary = uxm_relock.uxm_relock(S, cfg, bands=params['bands'], **params['kwargs']) return success, "Finished UXM Relock" @ocs_agent.param('duration', default=30., type=float) @ocs_agent.param('kwargs', default=None) def take_noise(self, session, params): """take_noise(duration=30., kwargs=None) **Task** - Takes a short timestream and calculates noise statistics. Median white noise level for each band will be stored in the session data. See the `sodetlib noise docs <https://simons1.princeton.edu/docs/sodetlib/noise.html>`_ for more information on the noise function and possible keyword arguments. Args ----- duration : float Duration of timestream to take for noise calculation. kwargs : dict Dict containing additional keyword args to pass to the take_noise function. Notes ------- Median white noise levels for each band will be stored in the session.data object, for example:: >> response.session['data'] { 'noise': { 'band_medians': List of median white noise for each band } } """ if params['kwargs'] is None: params['kwargs'] = {} with self.lock.acquire_timeout(0, job='take_noise') as acquired: if not acquired: return False, f"Operation failed: {self.lock.job} is running." session.set_status('running') S, cfg = self._get_smurf_control(session=session) sdl.noise.take_noise(S, cfg, params['duration'], **params['kwargs']) return True, "Finished taking noise" @ocs_agent.param('kwargs', default=None) def take_bgmap(self, session, params): """take_bgmap(kwargs=None) **Task** - Takes a bias-group map. This will calculate the number of channels assigned to each bias group and put that into the session data object along with the filepath to the analyzed bias-step output. See the `bias steps docs page <https://simons1.princeton.edu/docs/sodetlib/operations/bias_steps.html>`_ for more information on what additional keyword arguments can be passed. Args ---- kwargs : dict Additional kwargs to pass to take_bgmap function. Notes ------ The filepath of the BiasStepAnalysis object and the number of channels assigned to each bias group will be written to the session.data object:: >> response.session['data'] { 'nchans_per_bg': [123, 183, 0, 87, ...], 'filepath': /path/to/bias_step_file/on/smurf_server.npy, } """ if params['kwargs'] is None: params['kwargs'] = {} with self.lock.acquire_timeout(0, job='take_bgmap') as acquired: if not acquired: return False, f"Operation failed: {self.lock.job} is running." session.set_status('running') S, cfg = self._get_smurf_control(session=session) bsa = bias_steps.take_bgmap(S, cfg, **params['kwargs']) nchans_per_bg = [0 for _ in range(NBIASLINES + 1)] for bg in range(NBIASLINES): nchans_per_bg[bg] = int(np.sum(bsa.bgmap == bg)) nchans_per_bg[-1] = int(np.sum(bsa.bgmap == -1)) session.data = { 'nchans_per_bg': nchans_per_bg, 'filepath': bsa.filepath, } return True, "Finished taking bgmap" @ocs_agent.param('kwargs', default=None) def take_iv(self, session, params): """take_iv(kwargs=None) **Task** - Takes an IV. This will add the normal resistance array and channel info to the session data object along with the analyzed IV filepath. See the `sodetlib IV docs page <https://simons1.princeton.edu/docs/sodetlib/operations/iv.html>`_ for more information on what additional keyword arguments can be passed in. Args ---- kwargs : dict Additional kwargs to pass to the ``take_iv`` function. Notes ------ The following data will be written to the session.data object:: >> response.session['data'] { 'bands': Bands number of each resonator 'channels': Channel number of each resonator 'bgmap': BGMap assignment for each resonator 'R_n': Normal resistance for each resonator 'filepath': Filepath of saved IVAnalysis object } """ if params['kwargs'] is None: params['kwargs'] = {} with self.lock.acquire_timeout(0, job='take_iv') as acquired: if not acquired: return False, f"Operation failed: {self.lock.job} is running." session.set_status('starting') S, cfg = self._get_smurf_control() iva = iv.take_iv(S, cfg, **params['kwargs']) session.data = { 'bands': iva.bands.tolist(), 'channels': iva.channels.tolist(), 'bgmap': iva.bgmap.tolist(), 'R_n': iva.R_n.tolist(), 'filepath': iva.filepath, } return True, "Finished taking IV" @ocs_agent.param('kwargs', default=None) def take_bias_steps(self, session, params): """take_bias_steps(kwargs=None) **Task** - Takes bias_steps and saves the output filepath to the session data object. See the `sodetlib bias step docs page <https://simons1.princeton.edu/docs/sodetlib/operations/bias_steps.html>`_ for more information on bias steps and what kwargs can be passed in. Args ---- kwargs : dict Additional kwargs to pass to ``take_bais_steps`` function. Notes ------ The following data will be written to the session.data object:: >> response.session['data'] { 'filepath': Filepath of saved BiasStepAnalysis object } """ if params['kwargs'] is None: params['kwargs'] = {} with self.lock.acquire_timeout(0, job='bias_steps') as acquired: if not acquired: return False, f"Operation failed: {self.lock.job} is running." session.set_status('starting') S, cfg = self._get_smurf_control(session=session) bsa = bias_steps.take_bias_steps(S, cfg, **params['kwargs']) session.data = {'filepath': bsa.filepath} return True, "Finished taking bias steps" @ocs_agent.param('rfrac', default=(0.3, 0.6)) @ocs_agent.param('kwargs', default=None) def bias_dets(self, session, params): """bias_dets(rfrac=(0.3, 0.6), kwargs=None) **Task** - Biases detectors to a target Rfrac value or range. This function uses IV results to determine voltages for each bias-group. If rfrac is set to be a value, the bias voltage will be set such that the median rfrac across all channels is as close as possible to the set value. If a range is specified, the voltage will be chosen to maximize the number of channels in that range. See the sodetlib docs page for `biasing dets into transition <https://simons1.princeton.edu/docs/sodetlib/operations/iv.html#biasing-detectors-into-transition>`_ for more information on the functions and additional keyword args that can be passed in. Args ------- rfrac : float, tuple Target rfrac range to aim for. If this is a float, bias voltages will be chosen to get the median rfrac of each bias group as close as possible to that value. If kwargs : dict Additional kwargs to pass to the ``bias_dets`` function. Notes ------ The following data will be written to the session.data object:: >> response.session['data'] { 'biases': List of voltage bias values for each bias-group } """ if params['kwargs'] is None: params['kwargs'] = {} with self.lock.acquire_timeout(0, job='bias_steps') as acquired: if not acquired: return False, f"Operation failed: {self.lock.job} is running." session.set_status('running') S, cfg = self._get_smurf_control(session=session) if isinstance(params['rfrac'], (int, float)): biases = bias_dets.bias_to_rfrac(S, cfg, rfrac=params['rfrac'], **params['kwargs']) else: biases = bias_dets.bias_to_rfrac(S, cfg, rfrac_range=params['rfrac'], **params['kwargs']) session.data['biases'] = biases.tolist() return True, "Finished taking bias steps"
class HWPSimulatorAgent: def __init__(self, agent, port='/dev/ttyACM0'): self.active = True self.agent = agent self.log = agent.log self.lock = TimeoutLock() self.port = port self.take_data = False self.arduino = HWPSimulator(port=self.port) self.initialized = False agg_params = {'frame_length': 60} self.agent.register_feed('amplitudes', record=True, agg_params=agg_params, buffer_time=1) def init_arduino(self): """ Initializes the Arduino connection. """ if self.initialized: return True, "Already initialized." with self.lock.acquire_timeout(timeout=0, job='init') as acquired: if not acquired: self.log.warn( "Could not start init because {} is already running". format(self.lock.job)) return False, "Could not acquire lock." try: self.arduino.read() except ValueError: pass print("Arduino HWP Simulator initialized.") self.initialized = True return True, 'Arduino HWP Simulator initialized.' def start_acq(self, session, params): """Starts acquiring data. Args: sampling_frequency (float): Sampling frequency for data collection. Defaults to 2.5 Hz """ f_sample = params.get('sampling_frequency', 2.5) sleep_time = 1 / f_sample - 0.1 if not self.initialized: self.init_arduino() with self.lock.acquire_timeout(timeout=0, job='acq') as acquired: if not acquired: self.log.warn( "Could not start acq because {} is already running".format( self.lock.job)) return False, "Could not acquire lock." session.set_status('running') self.take_data = True while self.take_data: data = { 'timestamp': time.time(), 'block_name': 'amps', 'data': {} } data['data']['amplitude'] = self.arduino.read() time.sleep(sleep_time) self.agent.publish_to_feed('amplitudes', data) self.agent.feeds['amplitudes'].flush_buffer() return True, 'Acquisition exited cleanly.' def stop_acq(self, session, params=None): """ Stops the data acquisiton. """ if self.take_data: self.take_data = False return True, 'requested to stop taking data.' else: return False, 'acq is not currently running.'
class LS240_Agent: def __init__(self, agent, num_channels=2, fake_data=False, port="/dev/ttyUSB0"): print(num_channels) self.active = True self.agent = agent self.log = agent.log self.lock = TimeoutLock() self.fake_data = fake_data self.module = None self.port = port self.thermometers = [ 'Channel {}'.format(i + 1) for i in range(num_channels) ] self.log = agent.log self.initialized = False self.take_data = False # Registers Temperature and Voltage feeds agg_params = { 'frame_length': 60, } self.agent.register_feed('temperatures', record=True, agg_params=agg_params, buffer_time=1) # Task functions. def init_lakeshore_task(self, session, params=None): """ Task to initialize Lakeshore 240 Module. """ if self.initialized: return True, "Already Initialized Module" with self.lock.acquire_timeout(0, job='init') as acquired: if not acquired: self.log.warn("Could not start init because " "{} is already running".format(self.lock.job)) return False, "Could not acquire lock." session.set_status('starting') if self.fake_data: session.add_message("No initialization since faking data") # self.thermometers = ["chan_1", "chan_2"] else: self.module = Module(port=self.port) print("Initialized Lakeshore module: {!s}".format(self.module)) session.add_message("Lakeshore initialized with ID: %s" % self.module.inst_sn) self.initialized = True return True, 'Lakeshore module initialized.' def set_values(self, session, params=None): """ A task to set sensor parameters for a Lakeshore240 Channel Args: channel (int, 1 -- 2 or 8): Channel number to set. Optional Args: sensor (int, 1, 2, or 3): 1 = Diode, 2 = PlatRTC, 3 = NTC RTD auto_range (int, 0 or 1): Must be 0 or 1. Specifies if channel should use autorange. range (int 0-8): Specifies range if autorange is false. Only settable for NTC RTD. 0 = 10 Ohms (1 mA) 1 = 30 Ohms (300 uA) 2 = 100 Ohms (100 uA) 3 = 300 Ohms (30 uA) 4 = 1 kOhm (10 uA) 5 = 3 kOhms (3 uA) 6 = 10 kOhms (1 uA) 7 = 30 kOhms (300 nA) 8 = 100 kOhms (100 nA) current_reversal (int, 0 or 1): Specifies if input current reversal is on or off. Always 0 if input is a diode. units (int, 1-4): Specifies preferred units parameter, and sets the units for alarm settings. 1 = Kelvin 2 = Celsius 3 = Sensor 4 = Fahrenheit enabled (int, 0 or 1): sets if channel is enabled name (str): sets name of channel """ if params is None: params = {} with self.lock.acquire_timeout(0, job='set_values') as acquired: if not acquired: self.log.warn("Could not start set_values because " "{} is already running".format(self.lock.job)) return False, "Could not acquire lock." if not self.fake_data: self.module.channels[params['channel'] - 1].set_values( sensor=params.get('sensor'), auto_range=params.get('auto_range'), range=params.get('range'), current_reversal=params.get('current_reversal'), unit=params.get('unit'), enabled=params.get('enabled'), name=params.get('name'), ) return True, 'Set values for channel {}'.format(params['channel']) def upload_cal_curve(self, session, params=None): """ Task to upload a calibration curve to a channel. Args: channel (int, 1 -- 2 or 8): Channel number filename (str): filename for cal curve """ channel = params['channel'] filename = params['filename'] with self.lock.acquire_timeout(0, job='upload_cal_curve') as acquired: if not acquired: self.log.warn("Could not start set_values because " "{} is already running".format(self.lock.job)) return False, "Could not acquire lock." if not self.fake_data: channel = self.module.channels[channel - 1] self.log.info( "Starting upload to channel {}...".format(channel)) channel.load_curve(filename) self.log.info("Finished uploading.") return True, "Uploaded curve to channel {}".format(channel) def start_acq(self, session, params=None): """ Task to start data acquisition. Args: sampling_frequency (float): Sampling frequency for data collection. Defaults to 2.5 Hz """ if params is None: params = {} f_sample = params.get('sampling_frequency', 2.5) sleep_time = 1 / f_sample - 0.01 with self.lock.acquire_timeout(0, job='acq') as acquired: if not acquired: self.log.warn( "Could not start acq because {} is already running".format( self.lock.job)) return False, "Could not acquire lock." session.set_status('running') self.take_data = True while self.take_data: data = { 'timestamp': time.time(), 'block_name': 'temps', 'data': {} } if self.fake_data: for therm in self.thermometers: data['data'][therm + ' T'] = random.randrange(250, 350) data['data'][therm + ' V'] = random.randrange(250, 350) time.sleep(.2) else: for i, therm in enumerate(self.thermometers): data['data'][ therm + ' T'] = self.module.channels[i].get_reading( unit='K') data['data'][ therm + ' V'] = self.module.channels[i].get_reading( unit='S') time.sleep(sleep_time) self.agent.publish_to_feed('temperatures', data) self.agent.feeds['temperatures'].flush_buffer() return True, 'Acquisition exited cleanly.' def stop_acq(self, session, params=None): """ Stops acq process. """ if self.take_data: self.take_data = False return True, 'requested to stop taking data.' else: return False, 'acq is not currently running'
class LS372_Agent: """Agent to connect to a single Lakeshore 372 device. Args: name (ApplicationSession): ApplicationSession for the Agent. ip (str): IP Address for the 372 device. fake_data (bool, optional): generates random numbers without connecting to LS if True. dwell_time_delay (int, optional): Amount of time, in seconds, to delay data collection after switching channels. Note this time should not include the change pause time, which is automatically accounted for. Will automatically be reduced to dwell_time - 1 second if it is set longer than a channel's dwell time. This ensures at least one second of data collection at the end of a scan. """ def __init__(self, agent, name, ip, fake_data=False, dwell_time_delay=0): # self._acq_proc_lock is held for the duration of the acq Process. # Tasks that require acq to not be running, at all, should use # this lock. self._acq_proc_lock = TimeoutLock() # self._lock is held by the acq Process only when accessing # the hardware but released occasionally so that (short) Tasks # may run. Use a YieldingLock to guarantee that a waiting # Task gets activated preferentially, even if the acq thread # immediately tries to reacquire. self._lock = YieldingLock(default_timeout=5) self.name = name self.ip = ip self.fake_data = fake_data self.dwell_time_delay = dwell_time_delay self.module = None self.thermometers = [] self.log = agent.log self.initialized = False self.take_data = False self.agent = agent # Registers temperature feeds agg_params = { 'frame_length': 10 * 60 #[sec] } self.agent.register_feed('temperatures', record=True, agg_params=agg_params, buffer_time=1) def init_lakeshore_task(self, session, params=None): """init_lakeshore_task(params=None) Perform first time setup of the Lakeshore 372 communication. Args: params (dict): Parameters dictionary for passing parameters to task. Parameters: auto_acquire (bool, optional): Default is False. Starts data acquisition after initialization if True. """ if params is None: params = {} if self.initialized and not params.get('force', False): self.log.info("Lakeshore already initialized. Returning...") return True, "Already initialized" with self._lock.acquire_timeout(job='init') as acquired1, \ self._acq_proc_lock.acquire_timeout(timeout=0., job='init') \ as acquired2: if not acquired1: self.log.warn(f"Could not start init because " f"{self._lock.job} is already running") return False, "Could not acquire lock" if not acquired2: self.log.warn(f"Could not start init because " f"{self._acq_proc_lock.job} is already running") return False, "Could not acquire lock" session.set_status('running') if self.fake_data: self.res = random.randrange(1, 1000) session.add_message("No initialization since faking data") self.thermometers = ["thermA", "thermB"] else: self.module = LS372(self.ip) print("Initialized Lakeshore module: {!s}".format(self.module)) session.add_message("Lakeshore initilized with ID: %s" % self.module.id) self.thermometers = [ channel.name for channel in self.module.channels ] self.initialized = True # Start data acquisition if requested if params.get('auto_acquire', False): self.agent.start('acq') return True, 'Lakeshore module initialized.' def start_acq(self, session, params=None): """acq(params=None) Method to start data acquisition process. The most recent data collected is stored in session.data in the structure:: >>> session.data {"fields": {"Channel_05": {"T": 293.644, "R": 33.752, "timestamp": 1601924482.722671}, "Channel_06": {"T": 0, "R": 1022.44, "timestamp": 1601924499.5258765}, "Channel_08": {"T": 0, "R": 1026.98, "timestamp": 1601924494.8172355}, "Channel_01": {"T": 293.41, "R": 108.093, "timestamp": 1601924450.9315426}, "Channel_02": {"T": 293.701, "R": 30.7398, "timestamp": 1601924466.6130798} } } """ with self._acq_proc_lock.acquire_timeout(timeout=0, job='acq') \ as acq_acquired, \ self._lock.acquire_timeout(job='acq') as acquired: if not acq_acquired: self.log.warn(f"Could not start Process because " f"{self._acq_proc_lock.job} is already running") return False, "Could not acquire lock" if not acquired: self.log.warn(f"Could not start Process because " f"{self._lock.job} is holding the lock") return False, "Could not acquire lock" session.set_status('running') self.log.info("Starting data acquisition for {}".format( self.agent.agent_address)) previous_channel = None last_release = time.time() session.data = {"fields": {}} self.take_data = True while self.take_data: # Relinquish sampling lock occasionally. if time.time() - last_release > 1.: last_release = time.time() if not self._lock.release_and_acquire(timeout=10): self.log.warn(f"Failed to re-acquire sampling lock, " f"currently held by {self._lock.job}.") continue if self.fake_data: data = { 'timestamp': time.time(), 'block_name': 'fake-data', 'data': {} } for therm in self.thermometers: reading = np.random.normal(self.res, 20) data['data'][therm] = reading time.sleep(.1) else: active_channel = self.module.get_active_channel() # The 372 reports the last updated measurement repeatedly # during the "pause change time", this results in several # stale datapoints being recorded. To get around this we # query the pause time and skip data collection during it # if the channel has changed (as it would if autoscan is # enabled.) if previous_channel != active_channel: if previous_channel is not None: pause_time = active_channel.get_pause() self.log.debug("Pause time for {c}: {p}", c=active_channel.channel_num, p=pause_time) dwell_time = active_channel.get_dwell() self.log.debug("User set dwell_time_delay: {p}", p=self.dwell_time_delay) # Check user set dwell time isn't too long if self.dwell_time_delay > dwell_time: self.log.warn("WARNING: User set dwell_time_delay of " + \ "{delay} s is larger than channel " + \ "dwell time of {chan_time} s. If " + \ "you are autoscanning this will " + \ "cause no data to be collected. " + \ "Reducing dwell time delay to {s} s.", delay=self.dwell_time_delay, chan_time=dwell_time, s=dwell_time - 1) total_time = pause_time + dwell_time - 1 else: total_time = pause_time + self.dwell_time_delay for i in range(total_time): self.log.debug( "Sleeping for {t} more seconds...", t=total_time - i) time.sleep(1) # Track the last channel we measured previous_channel = self.module.get_active_channel() current_time = time.time() data = { 'timestamp': current_time, 'block_name': active_channel.name, 'data': {} } # Collect both temperature and resistance values from each Channel channel_str = active_channel.name.replace(' ', '_') temp_reading = self.module.get_temp( unit='kelvin', chan=active_channel.channel_num) res_reading = self.module.get_temp( unit='ohms', chan=active_channel.channel_num) # For data feed data['data'][channel_str + '_T'] = temp_reading data['data'][channel_str + '_R'] = res_reading # For session.data field_dict = { channel_str: { "T": temp_reading, "R": res_reading, "timestamp": current_time } } session.data['fields'].update(field_dict) session.app.publish_to_feed('temperatures', data) self.log.debug("{data}", data=session.data) return True, 'Acquisition exited cleanly.' def stop_acq(self, session, params=None): """ Stops acq process. """ if self.take_data: self.take_data = False return True, 'requested to stop taking data.' else: return False, 'acq is not currently running' def set_heater_range(self, session, params): """ Adjust the heater range for servoing cryostat. Wait for a specified amount of time after the change. :param params: dict with 'range', 'wait' keys :type params: dict range - the heater range value to change to wait - time in seconds after changing the heater value to wait, allows the servo to adjust to the new heater range, typical value of ~600 seconds """ with self._lock.acquire_timeout(job='set_heater_range') as acquired: if not acquired: self.log.warn(f"Could not start Task because " f"{self._lock.job} is already running") return False, "Could not acquire lock" session.set_status('running') heater_string = params.get('heater', 'sample') if heater_string.lower() == 'sample': heater = self.module.sample_heater elif heater_string.lower() == 'still': heater = self.module.still_heater current_range = heater.get_heater_range() if params['range'] == current_range: print( "Current heater range matches commanded value. Proceeding unchanged." ) else: heater.set_heater_range(params['range']) time.sleep(params['wait']) return True, f'Set {heater_string} heater range to {params["range"]}' def set_excitation_mode(self, session, params): """ Set the excitation mode of a specified channel. :param params: dict with "channel" and "mode" keys for Channel.set_excitation_mode() :type params: dict """ with self._lock.acquire_timeout(job='set_excitation_mode') as acquired: if not acquired: self.log.warn(f"Could not start Task because " f"{self._lock.job} is already running") return False, "Could not acquire lock" session.set_status('running') self.module.channels[params['channel']].set_excitation_mode( params['mode']) session.add_message( f'post message in agent for Set channel {params["channel"]} excitation mode to {params["mode"]}' ) print( f'print statement in agent for Set channel {params["channel"]} excitation mode to {params["mode"]}' ) return True, f'return text for Set channel {params["channel"]} excitation mode to {params["mode"]}' def set_excitation(self, session, params): """ Set the excitation voltage/current value of a specified channel. :param params: dict with "channel" and "value" keys for Channel.set_excitation() :type params: dict """ with self._lock.acquire_timeout(job='set_excitation') as acquired: if not acquired: self.log.warn(f"Could not start Task because " f"{self._lock.job} is already running") return False, "Could not acquire lock" session.set_status('running') current_excitation = self.module.channels[ params['channel']].get_excitation() if params['value'] == current_excitation: print( f'Channel {params["channel"]} excitation already set to {params["value"]}' ) else: self.module.channels[params['channel']].set_excitation( params['value']) session.add_message( f'Set channel {params["channel"]} excitation to {params["value"]}' ) print( f'Set channel {params["channel"]} excitation to {params["value"]}' ) return True, f'Set channel {params["channel"]} excitation to {params["value"]}' def set_pid(self, session, params): """ Set the PID parameters for servo control of fridge. :param params: dict with "P", "I", and "D" keys for Heater.set_pid() :type params: dict """ with self._lock.acquire_timeout(job='set_pid') as acquired: if not acquired: self.log.warn(f"Could not start Task because " f"{self._lock.job} is already running") return False, "Could not acquire lock" session.set_status('running') self.module.sample_heater.set_pid(params["P"], params["I"], params["D"]) session.add_message( f'post message text for Set PID to {params["P"]}, {params["I"]}, {params["D"]}' ) print( f'print text for Set PID to {params["P"]}, {params["I"]}, {params["D"]}' ) return True, f'return text for Set PID to {params["P"]}, {params["I"]}, {params["D"]}' def set_active_channel(self, session, params): """ Set the active channel on the LS372. :param params: dict with "channel" number :type params: dict """ with self._lock.acquire_timeout(job='set_active_channel') as acquired: if not acquired: self.log.warn(f"Could not start Task because " f"{self._lock.job} is already running") return False, "Could not acquire lock" session.set_status('running') self.module.set_active_channel(params["channel"]) session.add_message( f'post message text for set channel to {params["channel"]}') print(f'print text for set channel to {params["channel"]}') return True, f'return text for set channel to {params["channel"]}' def set_autoscan(self, session, params): """ Sets autoscan on the LS372. :param params: dict with "autoscan" value """ with self._lock.acquire_timeout(job='set_autoscan') as acquired: if not acquired: self.log.warn(f"Could not start Task because " f"{self._lock.job} is already running") return False, "Could not acquire lock" session.set_status('running') if params['autoscan']: self.module.enable_autoscan() self.log.info('enabled autoscan') else: self.module.disable_autoscan() self.log.info('disabled autoscan') return True, 'Set autoscan to {}'.format(params['autoscan']) def servo_to_temperature(self, session, params): """Servo to temperature passed into params. :param params: dict with "temperature" Heater.set_setpoint() in unites of K :type params: dict """ with self._lock.acquire_timeout( job='servo_to_temperature') as acquired: if not acquired: self.log.warn(f"Could not start Task because " f"{self._lock.job} is already running") return False, "Could not acquire lock" session.set_status('running') # Check we're in correct control mode for servo. if self.module.sample_heater.mode != 'Closed Loop': session.add_message( f'Changing control to Closed Loop mode for servo.') self.module.sample_heater.set_mode("Closed Loop") # Check we aren't autoscanning. if self.module.get_autoscan() is True: session.add_message( f'Autoscan is enabled, disabling for PID control on dedicated channel.' ) self.module.disable_autoscan() # Check we're scanning same channel expected by heater for control. if self.module.get_active_channel().channel_num != int( self.module.sample_heater.input): session.add_message( f'Changing active channel to expected heater control input' ) self.module.set_active_channel( int(self.module.sample_heater.input)) # Check we're setup to take correct units. if self.module.get_active_channel().units != 'kelvin': session.add_message( f'Setting preferred units to Kelvin on heater control input.' ) self.module.get_active_channel().set_units('kelvin') # Make sure we aren't servoing too high in temperature. if params["temperature"] > 1: return False, f'Servo temperature is set above 1K. Aborting.' self.module.sample_heater.set_setpoint(params["temperature"]) return True, f'Setpoint now set to {params["temperature"]} K' def check_temperature_stability(self, session, params): """Check servo temperature stability is within threshold. :param params: dict with "measurements" and "threshold" parameters :type params: dict measurements - number of measurements to average for stability check threshold - amount within which the average needs to be to the setpoint for stability """ with self._lock.acquire_timeout( job='check_temp_stability') as acquired: if not acquired: self.log.warn(f"Could not start Task because " f"{self._lock.job} is already running") return False, "Could not acquire lock" session.set_status('running') setpoint = float(self.module.sample_heater.get_setpoint()) if params is None: params = {'measurements': 10, 'threshold': 0.5e-3} test_temps = [] for i in range(params['measurements']): test_temps.append(self.module.get_temp()) time.sleep( .1 ) # sampling rate is 10 readings/sec, so wait 0.1 s for a new reading mean = np.mean(test_temps) session.add_message( f'Average of {params["measurements"]} measurements is {mean} K.' ) print( f'Average of {params["measurements"]} measurements is {mean} K.' ) if np.abs(mean - setpoint) < params['threshold']: print("passed threshold") session.add_message(f'Setpoint Difference: ' + str(mean - setpoint)) session.add_message( f'Average is within {params["threshold"]} K threshold. Proceeding with calibration.' ) return True, f"Servo temperature is stable within {params['threshold']} K" else: print("we're in the else") #adjust_heater(t,rest) return False, f"Temperature not stable within {params['threshold']}." def set_output_mode(self, session, params=None): """ Set output mode of the heater. :param params: dict with "heater" and "mode" parameters :type params: dict heater - Specifies which heater to control. Either 'sample' or 'still' mode - Specifies mode of heater. Can be "Off", "Monitor Out", "Open Loop", "Zone", "Still", "Closed Loop", or "Warm up" """ with self._lock.acquire_timeout(job='set_output_mode') as acquired: if not acquired: self.log.warn(f"Could not start Task because " f"{self._lock.job} is already running") return False, "Could not acquire lock" session.set_status('running') if params['heater'].lower() == 'still': self.module.still_heater.set_mode(params['mode']) if params['heater'].lower() == 'sample': self.module.sample_heater.set_mode(params['mode']) self.log.info("Set {} output mode to {}".format( params['heater'], params['mode'])) return True, "Set {} output mode to {}".format(params['heater'], params['mode']) def set_heater_output(self, session, params=None): """ Set display type and output of the heater. :param params: dict with "heater", "display", and "output" parameters :type params: dict heater - Specifies which heater to control. Either 'sample' or 'still' output - Specifies heater output value. If display is set to "Current" or heater is "still", can be any number between 0 and 100. If display is set to "Power", can be any number between 0 and the maximum allowed power. Note that for the still heater this sets the still heater manual output, NOT the still heater still output. Use set_still_output() instead to set the still output. display (opt)- Specifies heater display type. Can be "Current" or "Power". If None, heater display is not reset before setting output. """ with self._lock.acquire_timeout(job='set_heater_output') as acquired: if not acquired: self.log.warn(f"Could not start Task because " f"{self._lock.job} is already running") return False, "Could not acquire lock" heater = params['heater'].lower() output = params['output'] display = params.get('display', None) if heater == 'still': self.module.still_heater.set_heater_output( output, display_type=display) if heater.lower() == 'sample': self.log.info("display: {}\toutput: {}".format( display, output)) self.module.sample_heater.set_heater_output( output, display_type=display) self.log.info("Set {} heater display to {}, output to {}".format( heater, display, output)) session.set_status('running') data = { 'timestamp': time.time(), 'block_name': '{}_heater_out'.format(heater), 'data': { '{}_heater_out'.format(heater): output } } session.app.publish_to_feed('temperatures', data) return True, "Set {} display to {}, output to {}".format( heater, display, output) def set_still_output(self, session, params=None): """ Set the still output on the still heater. This is different than the manual output on the still heater. Use set_heater_output() for that. :param params: dict with "output" parameter :type params: dict output - Specifies still heater output value. Can be any number between 0 and 100. """ with self._lock.acquire_timeout(job='set_still_output') as acquired: if not acquired: self.log.warn(f"Could not start Task because " f"{self._lock.job} is already running") return False, "Could not acquire lock" output = params['output'] self.module.still_heater.set_still_output(output) self.log.info("Set still output to {}".format(output)) session.set_status('running') data = { 'timestamp': time.time(), 'block_name': 'still_heater_still_out', 'data': { 'still_heater_still_out': output } } session.app.publish_to_feed('temperatures', data) return True, "Set still output to {}".format(output) def get_still_output(self, session, params=None): """ Gets the current still output on the still heater. This task has no useful parameters. The still heater output is stored in the session.data object in the format:: {"still_heater_still_out": 9.628} """ with self._lock.acquire_timeout(job='get_still_output') as acquired: if not acquired: self.log.warn(f"Could not start Task because " f"{self._lock.job} is already running") return False, "Could not acquire lock" still_output = self.module.still_heater.get_still_output() self.log.info("Current still output is {}".format(still_output)) session.set_status('running') session.data = {"still_heater_still_out": still_output} return True, "Current still output is {}".format(still_output)
class PfeifferAgent: def __init__(self, agent, ip_address, port, f_sample=2.5): self.active = True self.agent = agent self.log = agent.log self.lock = TimeoutLock() self.f_sample = f_sample self.take_data = False self.gauge = Pfeiffer(ip_address, int(port)) agg_params = { 'frame_length': 60, } self.agent.register_feed('pressures', record=True, agg_params=agg_params, buffer_time=1) def start_acq(self, session, params=None): """ Get pressures from the Pfeiffer gauges, publishes them to the feed Args: sampling_frequency- defaults to 2.5 Hz """ if params is None: params = {} f_sample = params.get('sampling_frequency') if f_sample is None: f_sample = self.f_sample sleep_time = 1. / f_sample - 0.01 with self.lock.acquire_timeout(timeout=0, job='init') as acquired: # Locking mechanism stops code from proceeding if no lock acquired if not acquired: self.log.warn( "Could not start init because {} is already running". format(self.lock.job)) return False, "Could not acquire lock." session.set_status('running') self.take_data = True while self.take_data: data = { 'timestamp': time.time(), 'block_name': 'pressures', 'data': {} } pressure_array = self.gauge.read_pressure_all() # Loop through all the channels on the device for channel in range(len(pressure_array)): data['data']["pressure_ch" + str(channel + 1)] = pressure_array[channel] self.agent.publish_to_feed('pressures', data) time.sleep(sleep_time) self.agents.feeds['pressures'].flush_buffer() return True, 'Acquistion exited cleanly' def stop_acq(self, session, params=None): """ End pressure data acquisition """ if self.take_data: self.take_data = False self.gauge.close() return True, 'requested to stop taking data.' else: return False, 'acq is not currently running'
class dS378Agent: '''OCS agent class for dS378 ethernet relay ''' def __init__(self, agent, ip=IP_DEFAULT, port=17123): ''' Parameters ---------- ip : string IP address port : int Port number ''' self.active = True self.agent = agent self.log = agent.log self.lock = TimeoutLock() self.take_data = False self._dev = dS378(ip=ip, port=port) self.initialized = False agg_params = {'frame_length': 60} self.agent.register_feed('relay', record=True, agg_params=agg_params, buffer_time=1) def start_acq(self, session, params): '''Starts acquiring data. ''' if params is None: params = {} f_sample = params.get('sampling_frequency', 0.5) sleep_time = 1 / f_sample - 0.1 with self.lock.acquire_timeout(timeout=0, job='acq') as acquired: if not acquired: self.log.warn( f'Could not start acq because {self.lock.job} is already running' ) return False, 'Could not acquire lock.' session.set_status('running') self.take_data = True session.data = {"fields": {}} last_release = time.time() while self.take_data: # Release lock if time.time() - last_release > LOCK_RELEASE_SEC: last_release = time.time() if not self.lock.release_and_acquire( timeout=LOCK_RELEASE_TIMEOUT): print(f'Re-acquire failed: {self.lock.job}') return False, 'Could not re-acquire lock.' # Data acquisition current_time = time.time() data = { 'timestamp': current_time, 'block_name': 'relay', 'data': {} } d_status = self._dev.get_status() relay_list = self._dev.get_relays() data['data']['V_sppl'] = d_status['V_sppl'] data['data']['T_int'] = d_status['T_int'] for i in range(8): data['data'][f'Relay_{i+1}'] = relay_list[i] field_dict = { f'relay': { 'V_sppl': d_status['V_sppl'], 'T_int': d_status['T_int'] } } session.data['fields'].update(field_dict) self.agent.publish_to_feed('relay', data) session.data.update({'timestamp': current_time}) time.sleep(sleep_time) self.agent.feeds['relay'].flush_buffer() return True, 'Acquisition exited cleanly.' def stop_acq(self, session, params=None): """ Stops the data acquisiton. """ if self.take_data: self.take_data = False return True, 'requested to stop taking data.' return False, 'acq is not currently running.' def set_relay(self, session, params=None): '''Turns the relay on/off or pulses it Parameters ---------- relay_number : int relay_number, 1 -- 8 on_off : int or RelayStatus 1: on, 0: off pulse_time : int, 32 bit See document ''' if params is None: params = {} with self.lock.acquire_timeout(3, job='set_values') as acquired: if not acquired: self.log.warn('Could not start set_values because ' f'{self.lock.job} is already running') return False, 'Could not acquire lock.' if params.get('pulse_time') is None: params['pulse_time'] = 0 self._dev.set_relay(relay_number=params['relay_number'], on_off=params['on_off'], pulse_time=params['pulse_time']) return True, f'Set values for BLE2' def get_relays(self, session, params=None): ''' Get relay states''' if params is None: params = {} with self.lock.acquire_timeout(3, job='get_relays') as acquired: if not acquired: self.log.warn('Could not start get_relays because ' f'{self.lock.job} is already running') return False, 'Could not acquire lock.' d_status = self._dev.get_relays() session.data = {f'Relay_{i+1}': d_status[i] for i in range(8)} return True, f'Got relay status'
class LS425Agent: """Agent for interfacing with a single Lakeshore 425 device. Args: agent (ocs.ocs_agent.OCSAgent): Instantiated OCSAgent class for this Agent port (int): Path to USB device in `/dev/` f_sample (float): Default sampling rate for the acq Process """ def __init__(self, agent, port, f_sample=1.): self.agent: ocs_agent.OCSAgent = agent self.log = agent.log self.lock = TimeoutLock() self.port = port self.dev = None self.f_sample = f_sample self.initialized = False self.take_data = False # Registers Temperature and Voltage feeds agg_params = {'frame_length': 60} self.agent.register_feed('mag_field', record=True, agg_params=agg_params, buffer_time=1) # Task functions. @ocs_agent.param('auto_acquire', default=False, type=bool) def init_lakeshore(self, session, params): """init_lakeshore(auto_acquire=False) **Task** - Perform first time setup of the Lakeshore 425 Module. Parameters: auto_acquire (bool, optional): Default is False. Starts data acquisition after initialization if True. """ if params is None: params = {} auto_acquire = params['auto_acquire'] if self.initialized: return True, "Already Initialized Module" with self.lock.acquire_timeout(0, job='init') as acquired: if not acquired: self.log.warn("Could not start init because " "{} is already running".format(self.lock.job)) return False, "Could not acquire lock." session.set_status('starting') self.dev = ls.LakeShore425(self.port) self.log.info(self.dev.get_id()) print("Initialized Lakeshore module: {!s}".format(self.dev)) self.initialized = True # Start data acquisition if requested if auto_acquire: self.agent.start('acq', params={'sampling_frequency': self.f_sample}) return True, 'Lakeshore module initialized.' @ocs_agent.param('sampling_frequency', default=None, type=float) def acq(self, session, params): """acq(sampling_frequency=None) **Process** - Acquire data from the Lakeshore 425. Parameters: sampling_frequency (float, optional): Sampling frequency for data collection. Defaults to the value passed to `--sampling_frequency` on Agent startup Notes: The most recent data collected is stored in session data in the structure:: >>> response.session['data'] {"fields": {"mag_field": {"Bfield": 270.644}, "timestamp": 1601924466.6130798} } """ if params is None: params = {} f_sample = params['sampling_frequency'] # If f_sample is None, use value passed to Agent init if f_sample is None: f_sample = self.f_sample sleep_time = 1 / f_sample - 0.01 with self.lock.acquire_timeout(0, job='acq') as acquired: if not acquired: self.log.warn( "Could not start acq because {} is already running".format( self.lock.job)) return False, "Could not acquire lock." session.set_status('running') self.take_data = True session.data = {"fields": {}} while self.take_data: Bfield = self.dev.get_field() current_time = time.time() data = { 'timestamp': current_time, 'block_name': 'mag_field', 'data': { 'Bfield': Bfield } } self.agent.publish_to_feed('mag_field', data) session.data.update({'timestamp': current_time}) self.agent.feeds['mag_field'].flush_buffer() time.sleep(sleep_time) return True, 'Acquisition exited cleanly.' def _stop_acq(self, session, params): """ Stops acq process. """ if self.take_data: self.take_data = False return True, 'requested to stop taking data.' else: return False, 'acq is not currently running' @ocs_agent.param('_') def operational_status(self, session, params): """operational_status() **Task** - Check operational status. """ with self.lock.acquire_timeout(0, job='operational_status') as acquired: if not acquired: self.log.warn( 'Could not start operational_status because {} is already running' .format(self.lock.job)) return False, 'Could not acquire lock' op_status = self.dev.get_op_status() self.log.info(op_status) return True, 'operational status: ' + op_status @ocs_agent.param('_') def zero_calibration(self, session, params): """zero_calibration() **Task** - Calibrate the zero point. """ with self.lock.acquire_timeout(0, job='zero_calibration') as acquired: if not acquired: self.log.warn( 'Could not start zero_calibration because {} is already running' .format(self.lock.job)) return False, 'Could not acquire lock' self.dev.set_zero() return True, 'Zero calibration is done' @ocs_agent.param('command', type=str) def any_command(self, session, params): """any_command(command) **Process** - Send serial command to Lakeshore 425 Parameters: command (str): any serial command Examples: Example for calling in a client:: >>> client.any_command(command='*IDN?') Notes: An example of the session data:: >>> response.session['data'] {'response': 'LSA1234'} """ command = params['command'] with self.lock.acquire_timeout(0, job='any_command') as acquired: if not acquired: self.log.warn( 'Could not any_command because {} is already running'. format(self.lock.job)) return False, 'Could not acquire lock' print('Input command: ' + command) if '?' in command: out = self.dev.query(command) session.data = {'response': out} return True, 'any_command is finished cleanly. Results: {}'.format( out) else: self.dev.command(command) session.data = {'response': None} return True, 'any_command is finished cleanly'
class LabJackAgent: def __init__(self, agent, ip_address, num_channels): self.active = True self.agent = agent self.log = agent.log self.lock = TimeoutLock() self.ip_address = ip_address self.module = None self.sensors = ['Channel {}'.format(i+1) for i in range(num_channels)] self.initialized = False self.take_data = False # Register feed agg_params = { 'frame_length': 60, } self.agent.register_feed('Sensors', record=True, agg_params=agg_params, buffer_time=1) # Task functions def init_labjack_task(self, session, params=None): """ task to initialize labjack module """ if self.initialized: return True, "Already initialized module" with self.lock.acquire_timeout(0, job='init') as acquired: if not acquired: self.log.warn("Could not start init because " "{} is already running".format(self.lock.job)) return False, "Could not acquire lock." session.set_status('starting') self.module = ModbusTcpClient(str(self.ip_address)) print("Initialized labjack module") session.add_message("Labjack initialized") self.initialized = True return True, 'LabJack module initialized.' def start_acq(self, session, params=None): """ Task to start data acquisition. Args: sampling_frequency (float): Sampling frequency for data collection. Defaults to 2.5 Hz """ if params is None: params = {} f_sample = params.get('sampling_frequency', 2.5) sleep_time = 1/f_sample - 0.01 with self.lock.acquire_timeout(0, job='acq') as acquired: if not acquired: self.log.warn("Could not start acq because " "{} is already running".format(self.lock.job)) return False, "Could not acquire lock." session.set_status('running') self.take_data = True while self.take_data: data = { 'timestamp': time.time(), 'block_name': 'sens', 'data': {} } for i, sens in enumerate(self.sensors): rr = self.module.read_input_registers(2*i, 2) data['data'][sens + 'V'] = data_to_float32(rr.registers) time.sleep(sleep_time) self.agent.publish_to_feed('Sensors', data) self.agent.feeds['Sensors'].flush_buffer() return True, 'Acquisition exited cleanly.' def stop_acq(self, session, params=None): if self.take_data: self.take_data = False return True, 'requested to stop taking data.' else: return False, 'acq is not currently running'
class HWPBBBAgent: """OCS agent for HWP encoder DAQ using Beaglebone Black Attributes ---------- rising_edge_count : int clock count values for the rising edge of IRIG reference marker, saved for calculating the beaglebone clock frequency irig_time : int unix timestamp from IRIG """ def __init__(self, agent_obj, port=8080): self.active = True self.agent = agent_obj self.log = agent_obj.log self.lock = TimeoutLock() self.port = port self.take_data = False self.initialized = False # For clock count to time conversion self.rising_edge_count = 0 self.irig_time = 0 agg_params = {'frame_length': 60} self.agent.register_feed('HWPEncoder', record=True, agg_params=agg_params) agg_params = {'frame_length': 60, 'exclude_influx': True} self.agent.register_feed('HWPEncoder_full', record=True, agg_params=agg_params) self.parser = EncoderParser(beaglebone_port=self.port) def start_acq(self, session, params): """Starts acquiring data. """ time_encoder_published = 0 counter_list = [] counter_index_list = [] quad_list = [] quad_counter_list = [] received_time_list = [] with self.lock.acquire_timeout(timeout=0, job='acq') as acquired: if not acquired: self.log.warn('Could not start acq because {} is already running' .format(self.lock.job)) return False, 'Could not acquire lock.' session.set_status('running') self.take_data = True while self.take_data: # This is blocking until data are available self.parser.grab_and_parse_data() # IRIG data; normally every sec while len(self.parser.irig_queue): irig_data = self.parser.irig_queue.popleft() rising_edge_count = irig_data[0] irig_time = irig_data[1] irig_info = irig_data[2] synch_pulse_clock_counts = irig_data[3] sys_time = irig_data[4] data = {'timestamp':sys_time, 'block_name':'HWPEncoder_irig', 'data':{}} data['data']['irig_time'] = irig_time data['data']['rising_edge_count'] = rising_edge_count data['data']['irig_sec'] = de_irig(irig_info[0], 1) data['data']['irig_min'] = de_irig(irig_info[1], 0) data['data']['irig_hour'] = de_irig(irig_info[2], 0) data['data']['irig_day'] = de_irig(irig_info[3], 0) \ + de_irig(irig_info[4], 0) * 100 data['data']['irig_year'] = de_irig(irig_info[5], 0) # Beagleboneblack clock frequency measured by IRIG if self.rising_edge_count > 0 and irig_time > 0: bbb_clock_freq = float(rising_edge_count - self.rising_edge_count) \ / (irig_time - self.irig_time) else: bbb_clock_freq = 0. data['data']['bbb_clock_freq'] = bbb_clock_freq self.agent.publish_to_feed('HWPEncoder', data) self.rising_edge_count = rising_edge_count self.irig_time = irig_time # saving clock counts for every refernce edge and every irig bit info data = {'timestamps':[], 'block_name':'HWPEncoder_irig_raw', 'data':{}} # 0.09: time difference in seconds b/w reference marker and # the first index marker data['timestamps'] = sys_time + 0.09 + np.arange(10) * 0.1 data['data']['irig_synch_pulse_clock_time'] = list(irig_time + 0.09 + \ np.arange(10) * 0.1) data['data']['irig_synch_pulse_clock_counts'] = synch_pulse_clock_counts data['data']['irig_info'] = list(irig_info) self.agent.publish_to_feed('HWPEncoder', data) ## Reducing the packet size, less frequent publishing # Encoder data; packet coming rate = 570*2*2/150/4 ~ 4Hz packet at 2 Hz rotation while len(self.parser.counter_queue): counter_data = self.parser.counter_queue.popleft() counter_list += counter_data[0].tolist() counter_index_list += counter_data[1].tolist() quad_data = counter_data[2] sys_time = counter_data[3] received_time_list.append(sys_time) quad_list.append(quad_data) quad_counter_list.append(counter_data[0][0]) ct = time.time() if len(counter_list) >= NUM_ENCODER_TO_PUBLISH \ or (len(counter_list) \ and (ct - time_encoder_published) > SEC_ENCODER_TO_PUBLISH): # Publishing quadratic data first data = {'timestamps':[], 'block_name':'HWPEncoder_quad', 'data':{}} data['timestamps'] = received_time_list data['data']['quad'] = quad_list self.agent.publish_to_feed('HWPEncoder', data) # Publishing counter data # (full sampled data will not be recorded in influxdb) data = {'timestamps':[], 'block_name':'HWPEncoder_counter', 'data':{}} data['data']['counter'] = counter_list data['data']['counter_index'] = counter_index_list data['timestamps'] = count2time(counter_list, received_time_list[0]) self.agent.publish_to_feed('HWPEncoder_full', data) ## Subsampled data for influxdb display data_subsampled = {'block_name':'HWPEncoder_counter_sub', 'data':{}} data_subsampled['timestamps'] = np.array(data['timestamps'])\ [::NUM_SUBSAMPLE].tolist() data_subsampled['data']['counter_sub'] = np.array(counter_list)\ [::NUM_SUBSAMPLE].tolist() data_subsampled['data']['counter_index_sub'] = np.array(counter_index_list)\ [::NUM_SUBSAMPLE].tolist() self.agent.publish_to_feed('HWPEncoder', data_subsampled) # For rough estimation of HWP rotation frequency data = {'timestamp': received_time_list[0], 'block_name':'HWPEncoder_freq', 'data':{}} dclock_counter = counter_list[-1] - counter_list[0] dindex_counter = counter_index_list[-1] - counter_index_list[0] # Assuming Beagleboneblack clock is 200 MHz pulse_rate = dindex_counter * 2.e8 / dclock_counter hwp_freq = pulse_rate / 2. / NUM_SLITS diff_counter = np.diff(counter_list) diff_index = np.diff(counter_index_list) self.log.info(f'pulse_rate {pulse_rate} {hwp_freq}') data['data']['approx_hwp_freq'] = hwp_freq data['data']['diff_counter_mean'] = np.mean(diff_counter) data['data']['diff_index_mean'] = np.mean(diff_index) data['data']['diff_counter_std'] = np.std(diff_counter) data['data']['diff_index_std'] = np.std(diff_index) self.agent.publish_to_feed('HWPEncoder', data) # Initialize lists counter_list = [] counter_index_list = [] quad_list = [] quad_counter_list = [] received_time_list = [] time_encoder_published = ct self.agent.feeds['HWPEncoder'].flush_buffer() return True, 'Acquisition exited cleanly.' def stop_acq(self, session, params=None): """ Stops the data acquisiton. """ if self.take_data: self.take_data = False return True, 'requested to stop taking data.' return False, 'acq is not currently running.'
class ACUAgent: """ Agent to acquire data from an ACU and control telescope pointing with the ACU. Args: acu_config (str): The configuration for the ACU, as referenced in aculib.configs. Default value is 'guess'. """ def __init__(self, agent, acu_config='guess'): self.lock = TimeoutLock() self.jobs = { 'monitor': 'idle', 'broadcast': 'idle', 'control': 'idle', # shared by all motion tasks/processes } self.acu_config = aculib.guess_config(acu_config) self.base_url = self.acu_config['base_url'] self.sleeptime = self.acu_config['motion_waittime'] self.udp = self.acu_config['streams']['main'] self.udp_ext = self.acu_config['streams']['ext'] self.log = agent.log # self.data provides a place to reference data from the monitors. # 'status' is populated by the monitor operation # 'broadcast' is populated by the udp_monitor operation self.data = { 'status': { 'summary': {}, 'full_status': {} }, 'broadcast': {}, 'uploads': {}, } self.health_check = {'broadcast': False, 'status': False} self.agent = agent self.take_data = False self.web_agent = tclient.Agent(reactor) tclient._HTTP11ClientFactory.noisy = False self.acu = aculib.AcuControl('guess', backend=TwistedHttpBackend( self.web_agent)) agent.register_process('monitor', self.start_monitor, lambda: self.set_job_stop('monitor'), blocking=False, startup=True) agent.register_process('broadcast', self.start_udp_monitor, lambda: self.set_job_stop('broadcast'), blocking=False, startup=True) agent.register_process('generate_scan', self.generate_scan, lambda: self.set_job_stop('generate_scan'), blocking=False, startup=False) agg_params = {'frame_length': 60} self.agent.register_feed('acu_status_summary', record=True, agg_params={ 'frame_length': 60, 'exclude_influx': True }, buffer_time=1) self.agent.register_feed('acu_status_full', record=True, agg_params={ 'frame_length': 60, 'exclude_influx': True }, buffer_time=1) self.agent.register_feed('acu_status_influx', record=True, agg_params={ 'frame_length': 60, 'exclude_aggregator': True }, buffer_time=1) self.agent.register_feed('acu_udp_stream', record=True, agg_params={ 'frame_length': 60, 'exclude_influx': True }, buffer_time=1) self.agent.register_feed('acu_broadcast_influx', record=False, agg_params={ 'frame_length': 60, 'exclude_aggregator': True }, buffer_time=1) self.agent.register_feed('acu_health_check', record=True, agg_params=agg_params, buffer_time=1) self.agent.register_feed('acu_upload', record=True, agg_params=agg_params, buffer_time=1) self.agent.register_feed('acu_error', record=True, agg_params=agg_params, buffer_time=1) agent.register_task('go_to', self.go_to, blocking=False) agent.register_task('run_specified_scan', self.run_specified_scan, locking=False) agent.register_task('set_boresight', self.set_boresight, blocking=False) agent.register_task('stop_and_clear', self.stop_and_clear, blocking=False) # Operation management. This agent has several Processes that # must be able to alone or simultaneously. The state of each is # registered in self.jobs, protected by self.lock (though this is # probably not necessary as long as we don't thread). Any logic # to assess conflicts should probably be in try_set_job. def try_set_job(self, job_name): """ Set a job status to 'run'. Args: job_name (str): Name of the task/process you are trying to start. """ with self.lock.acquire_timeout(timeout=1.0, job=job_name) as acquired: if not acquired: self.log.warn("Lock could not be acquried because it is held" f" by {self.lock.job}") return False # Set running. self.jobs[job_name] = 'run' return (True, 'ok') def set_job_stop(self, job_name): """ Set a job status to 'stop'. Args: job_name (str): Name of the process you are trying to stop. """ with self.lock.acquire_timeout(timeout=1.0, job=job_name) as acquired: if not acquired: self.log.warn("Lock could not be acquired because it is" f" held by {self.lock.job}") return False self.jobs[job_name] = 'stop' # state = self.jobs.get(job_name, 'idle') # if state == 'idle': # return False, 'Job not running.' # if state == 'stop': # return False, 'Stop already requested.' # self.jobs[job_name] = 'stop' return True, 'Requested Process stop.' def set_job_done(self, job_name): """ Set a job status to 'idle'. Args: job_name (str): Name of the task/process you are trying to idle. """ with self.lock.acquire_timeout(timeout=1.0, job=job_name) as acquired: if not acquired: self.log.warn("Lock could not be acquried because it is held" f" by {self.lock.job}") return False self.jobs[job_name] = 'idle' # # The Operations # @inlineCallbacks def health_check(self, session, params=None): pass @inlineCallbacks def start_monitor(self, session, params=None): """PROCESS "monitor". This process refreshes the cache of SATP ACU status information, and reports it on HK feeds 'acu_status_summary' and 'acu_status_full'. Summary parameters are ACU-provided time code, Azimuth mode, Azimuth position, Azimuth velocity, Elevation mode, Elevation position, Elevation velocity, Boresight mode, and Boresight position. """ ok, msg = self.try_set_job('monitor') if not ok: return ok, msg session.set_status('running') report_t = time.time() report_period = 10 n_ok = 0 min_query_period = 0.05 # Seconds query_t = 0 summary_params = [ 'Time', 'Azimuth mode', 'Azimuth current position', 'Azimuth current velocity', 'Elevation mode', 'Elevation current position', 'Elevation current velocity', # 'Boresight mode', # 'Boresight current position', 'Qty of free program track stack positions', ] mode_key = { 'Stop': 0, 'Preset': 1, 'ProgramTrack': 2, 'Stow': 3, 'SurvivalMode': 4, } tfn_key = {'None': 0, 'False': 0, 'True': 1} char_replace = [' ', '-', ':', '(', ')', '+', ',', '/'] while self.jobs['monitor'] == 'run': now = time.time() if now > report_t + report_period: self.log.info('Responses ok at %.3f Hz' % (n_ok / (now - report_t))) self.health_check['status'] = True report_t = now n_ok = 0 if now - query_t < min_query_period: yield dsleep(now - query_t) query_t = now try: # j = yield self.acu.http.Values('DataSets.StatusSATPDetailed8100') j = yield self.acu.http.Values( 'DataSets.StatusCCATDetailed8100') n_ok += 1 session.data = j except Exception as e: # Need more error handling here... errormsg = {'aculib_error_message': str(e)} self.log.error(errormsg) acu_error = { 'timestamp': time.time(), 'block_name': 'ACU_error', 'data': errormsg } self.agent.publish_to_feed('acu_error', acu_error) yield dsleep(1) for (key, value) in session.data.items(): ocs_key = key for char in char_replace: ocs_key = ocs_key.replace(char, '_') ocs_key = ocs_key.replace('24V', 'V24') if key in summary_params: self.data['status']['summary'][ocs_key] = value if key == 'Azimuth mode': self.data['status']['summary']['Azimuth_mode_num'] =\ mode_key[value] elif key == 'Elevation mode': self.data['status']['summary']['Elevation_mode_num'] =\ mode_key[value] else: self.data['status']['full_status'][ocs_key] = str(value) influx_status = {} for v in self.data['status']['full_status']: try: influx_status[str(v) + '_influx'] =\ float(self.data['status']['full_status'][v]) except ValueError: influx_status[str(v) + '_influx'] =\ tfn_key[self.data['status']['full_status'][v]] self.data['status']['summary']['ctime'] =\ timecode(self.data['status']['summary']['Time']) acustatus_summary = { 'timestamp': self.data['status']['summary']['ctime'], 'block_name': 'ACU_summary_output', 'data': self.data['status']['summary'] } acustatus_full = { 'timestamp': self.data['status']['summary']['ctime'], 'block_name': 'ACU_fullstatus_output', 'data': self.data['status']['full_status'] } acustatus_influx = { 'timestamp': self.data['status']['summary']['ctime'], 'block_name': 'ACU_fullstatus_ints', 'data': influx_status } self.agent.publish_to_feed('acu_status_summary', acustatus_summary) self.agent.publish_to_feed('acu_status_full', acustatus_full) self.agent.publish_to_feed('acu_status_influx', acustatus_influx) self.set_job_done('monitor') return True, 'Acquisition exited cleanly.' @inlineCallbacks def start_udp_monitor(self, session, params=None): """PROCESS broadcast This process reads UDP data from the port specified by self.acu_config, decodes it, and publishes to an HK feed. """ ok, msg = self.try_set_job('broadcast') if not ok: return ok, msg session.set_status('running') FMT = '<iddddd' FMT_LEN = struct.calcsize(FMT) UDP_PORT = self.acu_config['PositionBroadcast_target'].split(':')[1] udp_data = [] class MonitorUDP(protocol.DatagramProtocol): def datagramReceived(self, data, src_addr): host, port = src_addr offset = 0 while len(data) - offset >= FMT_LEN: d = struct.unpack(FMT, data[offset:offset + FMT_LEN]) udp_data.append(d) offset += FMT_LEN handler = reactor.listenUDP(int(UDP_PORT), MonitorUDP()) while self.jobs['broadcast'] == 'run': if udp_data: self.health_check['broadcast'] = True process_data = udp_data[:200] udp_data = udp_data[200:] year = datetime.datetime.now().year gyear = calendar.timegm(time.strptime(str(year), '%Y')) sample_rate = (len(process_data) / ( (process_data[-1][0] - process_data[0][0]) * 86400 + process_data[-1][1] - process_data[0][1])) latest_az = process_data[2] latest_el = process_data[3] latest_az_raw = process_data[4] latest_el_raw = process_data[5] session.data = { 'sample_rate': sample_rate, 'latest_az': latest_az, 'latest_el': latest_el, 'latest_az_raw': latest_az_raw, 'latest_el_raw': latest_el_raw } pd0 = process_data[0] pd0_gday = (pd0[0] - 1) * 86400 pd0_sec = pd0[1] pd0_data_ctime = gyear + pd0_gday + pd0_sec pd0_azimuth_corrected = pd0[2] pd0_azimuth_raw = pd0[4] pd0_elevation_corrected = pd0[3] pd0_elevation_raw = pd0[5] bcast_first = { 'Time': pd0_data_ctime, 'Azimuth_Corrected': pd0_azimuth_corrected, 'Azimuth_Raw': pd0_azimuth_raw, 'Elevation_Corrected': pd0_elevation_corrected, 'Elevation_Raw': pd0_elevation_raw, } acu_broadcast_influx = { 'timestamp': bcast_first['Time'], 'block_name': 'ACU_position', 'data': bcast_first, } self.agent.publish_to_feed('acu_broadcast_influx', acu_broadcast_influx) for d in process_data: gday = (d[0] - 1) * 86400 sec = d[1] data_ctime = gyear + gday + sec azimuth_corrected = d[2] azimuth_raw = d[4] elevation_corrected = d[3] elevation_raw = d[5] self.data['broadcast'] = { 'Time': data_ctime, 'Azimuth_Corrected': azimuth_corrected, 'Azimuth_Raw': azimuth_raw, 'Elevation_Corrected': elevation_corrected, 'Elevation_Raw': elevation_raw, } acu_udp_stream = { 'timestamp': self.data['broadcast']['Time'], 'block_name': 'ACU_position', 'data': self.data['broadcast'] } self.agent.publish_to_feed('acu_udp_stream', acu_udp_stream) else: yield dsleep(1) yield dsleep(0.005) handler.stopListening() self.set_job_done('broadcast') return True, 'Acquisition exited cleanly.' @inlineCallbacks def go_to(self, session, params=None): """ TASK "go_to" Moves the telescope to a particular point (azimuth, elevation) in Preset mode. When motion has ended and the telescope reaches the preset point, it returns to Stop mode and ends. Params: az (float): destination angle for the azimuthal axis el (float): destination angle for the elevation axis wait (float): amount of time to wait for motion to end """ ok, msg = self.try_set_job('control') if not ok: return ok, msg az = params.get('az') el = params.get('el') wait_for_motion = params.get('wait', 1) current_az = round(self.data['broadcast']['Azimuth_Corrected'], 4) current_el = round(self.data['broadcast']['Elevation_Corrected'], 4) publish_dict = { 'Start_Azimuth': current_az, 'Start_Elevation': current_el, 'Start_Boresight': 0, 'Upload_Type': 1, 'Preset_Azimuth': az, 'Preset_Elevation': el, 'Upload_Lines': [] } acu_upload = { 'timestamp': self.data['broadcast']['Time'], 'block_name': 'ACU_upload', 'data': publish_dict } self.agent.publish_to_feed('acu_upload', acu_upload) # Check whether the telescope is already at the point self.log.info('Checking current position') if current_az == az and current_el == el: self.log.info('Already positioned at %.2f, %.2f' % (current_az, current_el)) self.set_job_done('control') return True, 'Pointing completed' yield self.acu.stop() self.log.info('Stopped') yield dsleep(0.1) yield self.acu.go_to(az, el) mdata = self.data['status']['summary'] # Wait for telescope to start moving self.log.info('Moving to commanded position') while mdata['Azimuth_current_velocity'] == 0.0 and\ mdata['Elevation_current_velocity'] == 0.0: yield dsleep(wait_for_motion) mdata = self.data['status']['summary'] moving = True while moving: mdata = self.data['status']['summary'] ve = round(mdata['Elevation_current_velocity'], 2) va = round(mdata['Azimuth_current_velocity'], 2) if (ve != 0.0) or (va != 0.0): moving = True yield dsleep(wait_for_motion) else: moving = False mdata = self.data['status']['summary'] pe = round(mdata['Elevation_current_position'], 2) pa = round(mdata['Azimuth_current_position'], 2) if pe != el or pa != az: yield self.acu.stop() self.log.warn('Stopped before reaching commanded point!') return False, 'Something went wrong!' modes = (mdata['Azimuth_mode'], mdata['Elevation_mode']) if modes != ('Preset', 'Preset'): return False, 'Fault triggered!' yield self.acu.stop() self.set_job_done('control') return True, 'Pointing completed' @inlineCallbacks def set_boresight(self, session, params=None): """TASK set_boresight Moves the telescope to a particular third-axis angle. Params: b (float): destination angle for boresight rotation """ ok, msg = self.try_set_job('control') if not ok: return ok, msg bs_destination = params.get('b') yield self.acu.stop() yield dsleep(5) yield self.acu.go_3rd_axis(bs_destination) current_position = self.data['status']['summary']\ ['Boresight_current_position'] while current_position != bs_destination: yield dsleep(1) current_position = self.data['status']['summary']\ ['Boresight_current_position'] yield self.acu.stop() self.set_job_done('control') return True, 'Moved to new 3rd axis position' @inlineCallbacks def stop_and_clear(self, session, params=None): """TASK stop_and_clear Changes the azimuth and elevation modes to Stop and clears points uploaded to the stack. """ ok, msg = self.try_set_job('control') if not ok: self.set_job_done('control') yield dsleep(0.1) self.try_set_job('control') self.log.info('try_set_job ok') yield self.acu.stop() self.log.info('Stop called') yield dsleep(5) yield self.acu.http.Command('DataSets.CmdTimePositionTransfer', 'Clear Stack') yield dsleep(0.1) self.log.info('Cleared stack.') self.set_job_done('control') return True, 'Job completed' @inlineCallbacks def run_specified_scan(self, session, params=None): """TASK run_specifid_scan Upload and execute a scan pattern. The pattern may be specified by a numpy file, parameters for a linear scan in one direction, or a linear scan with a turnaround. Params: scantype (str): the type of scan information you are uploading. Options are 'from_file', 'linear_1dir', or 'linear_turnaround'. Optional params: filename (str): full path to desired numpy file. File contains an array of three lists ([list(times), list(azimuths), list(elevations)]). Times begin from 0.0. Applies to scantype 'from_file'. azpts (tuple): spatial endpoints of the azimuth scan. Applies to scantype 'linear_1dir' (2 values) and 'linear_turnaround' (3 values). el (float): elevation for a linear velocity azimuth scan. Applies to scantype 'linear_1dir' and 'linear_turnaround'. azvel (float): velocity of the azimuth axis in a linear velocity azimuth scan. Applies to scantype 'linear_1dir' and 'linear_turnaround'. acc (float): acceleration of the turnaround for a linear velocity scan with a turnaround. Applies to scantype 'linear_turnaround'. ntimes (int): number of times the platform traverses between azimuth endpoints for a 'linear_turnaround' scan. """ ok, msg = self.try_set_job('control') if not ok: return ok, msg self.log.info('try_set_job ok') scantype = params.get('scantype') if scantype == 'from_file': filename = params.get('filename') times, azs, els, vas, ves, azflags, elflags =\ sh.from_file(filename) elif scantype == 'linear_1dir': azpts = params.get('azpts') el = params.get('el') azvel = params.get('azvel') total_time = (azpts[1] - azpts[0]) / azvel azs = np.linspace(azpts[0], azpts[1], total_time * 10) els = np.linspace(el, el, total_time * 10) times = np.linspace(0.0, total_time, total_time * 10) elif scantype == 'linear_turnaround_sameends': # from parameters, generate the full set of scan points self.log.info('scantype is' + str(scantype)) azpts = params.get('azpts') el = params.get('el') azvel = params.get('azvel') acc = params.get('acc') ntimes = params.get('ntimes') times, azs, els, vas, ves, azflags, elflags =\ sh.linear_turnaround_scanpoints(azpts, el, azvel, acc, ntimes) # Switch to Stop mode and clear the stack yield self.acu.stop() self.log.info('Stop called') yield dsleep(5) yield self.acu.http.Command('DataSets.CmdTimePositionTransfer', 'Clear Stack') yield dsleep(0.1) self.log.info('Cleared stack.') # Move to the starting position for the scan and then switch to Stop # mode start_az = azs[0] start_el = els[0] upload_publish_dict = { 'Start_Azimuth': start_az, 'Start_Elevation': start_el, 'Start_Boresight': 0, 'Upload_Type': 2, 'Preset_Azimuth': 0, 'Preset_Elevation': 0, 'Upload_Lines': [] } # Follow the scan in ProgramTrack mode, then switch to Stop mode if scantype == 'linear_turnaround_sameends': all_lines = sh.write_lines(times, azs, els, vas, ves, azflags, elflags) elif scantype == 'from_file': all_lines = sh.write_lines(times, azs, els, vas, ves, azflags, elflags) # Other scan types not yet implemented, so break else: return False, 'Not enough information to scan' self.log.info('all_lines generated') yield self.acu.mode('ProgramTrack') self.log.info('mode is now ProgramTrack') group_size = 120 while len(all_lines): upload_lines = all_lines[:group_size] text = ''.join(upload_lines) all_lines = all_lines[group_size:] free_positions = self.data['status']['summary']\ ['Qty_of_free_program_track_stack_positions'] while free_positions < 9899: free_positions = self.data['status']['summary']\ ['Qty_of_free_program_track_stack_positions'] yield dsleep(0.1) yield self.acu.http.UploadPtStack(text) upload_publish_dict['Upload_Lines'] = upload_lines acu_upload = { 'timestamp': self.data['broadcast']['Time'], 'block_name': 'ACU_upload', 'data': upload_publish_dict } self.agent.publish_to_feed('acu_upload', acu_upload) self.log.info('Uploaded a group') self.log.info('No more lines to upload') current_az = round(self.data['broadcast']['Azimuth_Corrected'], 4) current_el = round(self.data['broadcast']['Elevation_Corrected'], 4) while current_az != azs[-1] or current_el != els[-1]: yield dsleep(0.1) modes = (self.data['status']['summary']['Azimuth_mode'], self.data['status']['summary']['Elevation_mode']) if modes != ('ProgramTrack', 'ProgramTrack'): return False, 'Fault triggered (not ProgramTrack)!' current_az = round(self.data['broadcast']['Azimuth_Corrected'], 4) current_el = round(self.data['broadcast']['Elevation_Corrected'], 4) yield dsleep(self.sleeptime) yield self.acu.stop() self.set_job_done('control') return True, 'Track completed.' @inlineCallbacks def generate_scan(self, session, params=None): """ Scan generator, currently only works for constant-velocity az scans with fixed elevation. Args: scantype (str): type of scan you are generating. For dev, preset to 'linear'. stop_iter (float): how many times the generator should generate a new set of points before forced to stop az_endpoint1 (float): first endpoint of a linear azimuth scan az_endpoint2 (float): second endpoint of a linear azimuth scan az_speed (float): azimuth speed for constant-velocity scan acc (float): turnaround acceleration for a constant-velocity scan el_endpoint1 (float): first endpoint of elevation motion el_endpoint2 (float): second endpoint of elevation motion. For dev, currently both el endpoints should be equal el_speed (float): speed of motion for a scan with changing elevation. For dev, currently set to 0.0 """ ok, msg = self.try_set_job('control') if not ok: return ok, msg self.log.info('try_set_job ok') # scantype = params.get('scantype') scantype = 'linear' stop_iter = params.get('stop_iter') az_endpoint1 = params.get('az_endpoint1') az_endpoint2 = params.get('az_endpoint2') az_speed = params.get('az_speed') acc = params.get('acc') el_endpoint1 = params.get('el_endpoint1') el_endpoint2 = params.get('el_endpoint2') el_speed = params.get('el_speed') self.log.info('scantype is ' + str(scantype)) yield self.acu.stop() if scantype != 'linear': self.log.warn('Scan type not supported') return False g = sh.generate(stop_iter, az_endpoint1, az_endpoint2, az_speed, acc, el_endpoint1, el_endpoint2, el_speed) self.acu.mode('ProgramTrack') while True: lines = next(g) current_lines = lines group_size = 250 while len(current_lines): upload_lines = current_lines[:group_size] text = ''.join(upload_lines) current_lines = current_lines[group_size:] free_positions = self.data['status']['summary']\ ['Qty_of_free_program_track_stack_positions'] while free_positions < 5099: yield dsleep(0.1) free_positions = self.data['status']['summary']\ ['Qty_of_free_program_track_stack_positions'] yield self.acu.http.UploadPtStack(text) yield self.acu.stop() self.set_job_done('control') return True, 'Track generation ended cleanly'
class CapSensor_Agent: def __init__(self, agent, config_file): self.agent = agent self.lock = TimeoutLock() self.meas_cap = meas_cap_builder(config_file) agg_params = { 'frame_length': 60, } for meas, capdists in self.meas_cap.items(): self.agent.register_feed("Cap{}".format(meas.num), record=True, agg_params=agg_params) for n, capdist in enumerate(capdists): for i in range(len(capdist.avgs) + 1): self.agent.register_feed("Dist{}_Cal{}_Intvl{}".format(meas.num, n, i), record=True, agg_params=agg_params) self.initialized = False self.take_data = False self.f_poll = POLL_FREQUENCY self.send_interval = self.f_poll/SEND_FREQUENCY def init_task(self, session, params=None): if params is None: params = {} auto_acquire = params.get('auto_acquire', False) if self.initialized: return True, "Already Initialized Chip" with self.lock.acquire_timeout(0, job='init') as acquired: if not acquired: self.log.warn("Could not start init because " "{} is already running".format(self.lock.job)) return False, "Could not acquire lock." session.set_status('starting') self.chip = Chip(self.meas_cap.keys()) self.chip.trigger() self.initialized = True # Start data acquisition if requested if auto_acquire: self.agent.start('acq') return True, 'Cap chip initialized.' def acq(self, session, params=None): """acq(params=None) Task to start data acquisition. """ if params is None: params = {} sleep_time = 1/self.f_poll with self.lock.acquire_timeout(0, job='acq') as acquired: if not acquired: self.log.warn("Could not start acq because {} is already running" .format(self.lock.job)) return False, "Could not acquire lock." session.set_status('running') self.take_data = True i = 0 while self.take_data: self.chip.poll(time.time()) if i == self.send_interval: for meas, capdists in self.meas_cap.items(): cap_data = {} cap_name = "Cap{}".format(meas.num) caps, timeline = meas.get_data() cap_data['block_name'] = cap_name cap_data['timestamps'] = timeline cap_data['data'] = {meas.name: caps} self.agent.publish_to_feed(cap_name, cap_data) for n, capdist in enumerate(capdists): if not capdist.init: continue capdist.fill_caps(caps, timeline) dists = capdist.poll_dists() for i, intvl in enumerate(dists): dist_data = {} dist_name = "Dist{}_Cal{}_Intvl{}".format(meas.num, n, i) dist_data['block_name'] = dist_name dist_data['timestamps'] = intvl[1] dist_data['data'] = {capdist.name: intvl[0]} self.agent.publish_to_feed(dist_name, dist_data) i = 0 time.sleep(sleep_time) i += 1 for feed in self.agent.feeds.values(): feed.flush_buffer() return True, 'Acquisition exited cleanly.' def stop_acq(self, session, params=None): """ Stops acq process. """ if self.take_data: self.take_data = False return True, 'requested to stop taking data.' else: return False, 'acq is not currently running' def offset(self, session, params): if not self.take_data: return False, 'acq should be running while calibrating' meas_num = params['meas_num'] meas = self.chip.meas[meas_num] dist = params['dist'] wait_time = params.get('wait_time', 10) min_sample = params.get('min_sample', 100) set_origin = params.get('set_origin', False) logfile = params.get('logfile', None) session.set_status('running') capdists = self.meas_cap.pop(meas) start_time = time.time() time.sleep(wait_time) if len(meas.data) < min_sample: self.meas_cap[meas] = capdists return False, 'too few samples' done = False for _ in range(10): try: avg_cap = mean(meas.data) done = True break except Exception: continue if not done: self.meas_cap[meas] = capdists return False, 'try again' for n, capdist in enumerate(capdists): capdist.set_offset(avg_cap, dist) if set_origin: capdist.set_origin(dist) else: capdist.set_origin(0) self.meas_cap[meas] = capdists if logfile is not None: with open(logfile, 'a') as f: writer = csv.writer(f) writer.writerow([start_time, wait_time, meas_num, dist, avg_cap, set_origin]) return True, "Meas {} calibrated at time {}".format(meas_num, start_time)
class RotationAgent: """Agent to control the rotation speed of the CHWP Args: kikusui_ip (str): IP address for the Kikusui power supply kikusui_port (str): Port for the Kikusui power supply pid_ip (str): IP address for the PID controller pid_port (str): Port for the PID controller pid_verbosity (str): Verbosity of PID controller output """ def __init__(self, agent, kikusui_ip, kikusui_port, pid_ip, pid_port, pid_verbosity): self.agent = agent self.log = agent.log self.lock = TimeoutLock() self._initialized = False self.take_data = False self.kikusui_ip = kikusui_ip self.kikusui_port = int(kikusui_port) self.pid_ip = pid_ip self.pid_port = pid_port self._pid_verbosity = pid_verbosity > 0 self.cmd = None # Command object for PSU commanding self.pid = None # PID object for pid controller commanding agg_params = {'frame_length': 60} self.agent.register_feed('hwprotation', record=True, agg_params=agg_params) @ocs_agent.param('auto_acquire', default=False, type=bool) @ocs_agent.param('force', default=False, type=bool) def init_connection(self, session, params): """init_connection(auto_acquire=False, force=False) **Task** - Initialize connection to Kikusui Power Supply and PID Controller. Parameters: auto_acquire (bool, optional): Default is False. Starts data acquisition after initialization if True. force (bool, optional): Force initialization, even if already initialized. Defaults to False. """ if self._initialized and not params['force']: self.log.info("Connection already initialized. Returning...") return True, "Connection already initialized" with self.lock.acquire_timeout(0, job='init_connection') as acquired: if not acquired: self.log.warn( 'Could not run init_connection because {} is already running' .format(self.lock.job)) return False, 'Could not acquire lock' try: pmx = PMX(tcp_ip=self.kikusui_ip, tcp_port=self.kikusui_port, timeout=0.5) self.cmd = Command(pmx) self.log.info('Connected to Kikusui power supply') except ConnectionRefusedError: self.log.error( 'Could not establish connection to Kikusui power supply') reactor.callFromThread(reactor.stop) return False, 'Unable to connect to Kikusui PSU' try: self.pid = pd.PID(pid_ip=self.pid_ip, pid_port=self.pid_port, verb=self._pid_verbosity) self.log.info('Connected to PID controller') except BrokenPipeError: self.log.error( 'Could not establish connection to PID controller') reactor.callFromThread(reactor.stop) return False, 'Unable to connect to PID controller' self._initialized = True # Start 'iv_acq' Process if requested if params['auto_acquire']: self.agent.start('iv_acq') return True, 'Connection to PSU and PID controller established' def tune_stop(self, session, params): """tune_stop() **Task** - Reverse the drive direction of the PID controller and optimize the PID parameters for deceleration. """ with self.lock.acquire_timeout(3, job='tune_stop') as acquired: if not acquired: self.log.warn( 'Could not tune stop because {} is already running'.format( self.lock.job)) return False, 'Could not acquire lock' self.pid.tune_stop() return True, 'Reversing Direction' def tune_freq(self, session, params): """tune_freq() **Task** - Tune the PID controller setpoint to the rotation frequency and optimize the PID parameters for rotation. """ with self.lock.acquire_timeout(3, job='tune_freq') as acquired: if not acquired: self.log.warn( 'Could not tune freq because {} is already running'.format( self.lock.job)) return False, 'Could not acquire lock' self.pid.tune_freq() return True, 'Tuning to setpoint' @ocs_agent.param('freq', default=0., check=lambda x: 0. <= x <= 3.0) def declare_freq(self, session, params): """declare_freq(freq=0) **Task** - Store the entered frequency as the PID setpoint when ``tune_freq()`` is next called. Parameters: freq (float): Desired HWP rotation frequency """ with self.lock.acquire_timeout(3, job='declare_freq') as acquired: if not acquired: self.log.warn( 'Could not declare freq because {} is already running'. format(self.lock.job)) return False, 'Could not acquire lock' self.pid.declare_freq(params['freq']) return True, 'Setpoint at {} Hz'.format(params['freq']) @ocs_agent.param('p', default=0.2, type=float, check=lambda x: 0. < x <= 8.) @ocs_agent.param('i', default=63, type=int, check=lambda x: 0 <= x <= 200) @ocs_agent.param('d', default=0., type=float, check=lambda x: 0. <= x < 10.) def set_pid(self, session, params): """set_pid(p=0.2, i=63, d=0.) **Task** - Set the PID parameters. Note these changes are for the current session only and will change whenever the agent container is reloaded. Parameters: p (float): Proportional PID value i (int): Integral PID value d (float): Derivative PID value """ with self.lock.acquire_timeout(3, job='set_pid') as acquired: if not acquired: self.log.warn( 'Could not set pid because {} is already running'.format( self.lock.job)) return False, 'Could not acquire lock' self.pid.set_pid([params['p'], params['i'], params['d']]) return True, f"Set PID params to p: {params['p']}, i: {params['i']}, d: {params['d']}" def get_freq(self, session, params): """get_freq() **Task** - Return the current HWP frequency as seen by the PID controller. """ with self.lock.acquire_timeout(3, job='get_freq') as acquired: if not acquired: self.log.warn( 'Could not get freq because {} is already running'.format( self.lock.job)) return False, 'Could not acquire lock' freq = self.pid.get_freq() return True, 'Current frequency = {}'.format(freq) def get_direction(self, session, params): """get_direction() **Task** - Return the current HWP tune direction as seen by the PID controller. """ with self.lock.acquire_timeout(3, job='get_direction') as acquired: if not acquired: self.log.warn( 'Could not get freq because {} is already running'.format( self.lock.job)) return False, 'Could not acquire lock' direction = self.pid.get_direction() return True, 'Current Direction = {}'.format(['Forward', 'Reverse'][direction]) @ocs_agent.param('direction', type=str, default='0', choices=['0', '1']) def set_direction(self, session, params): """set_direction(direction='0') **Task** - Set the HWP rotation direction. Parameters: direction (str): '0' for forward and '1' for reverse. """ with self.lock.acquire_timeout(3, job='set_direction') as acquired: if not acquired: self.log.warn( 'Could not set direction because {} is already running'. format(self.lock.job)) return False, 'Could not acquire lock' self.pid.set_direction(params['direction']) return True, 'Set direction' @ocs_agent.param('slope', default=1., type=float, check=lambda x: -10. < x < 10.) @ocs_agent.param('offset', default=0.1, type=float, check=lambda x: -10. < x < 10.) def set_scale(self, session, params): """set_scale(slope=1, offset=0.1) **Task** - Set the PID's internal conversion from input voltage to rotation frequency. Parameters: slope (float): Slope of the "rotation frequency vs input voltage" relationship offset (float): y-intercept of the "rotation frequency vs input voltage" relationship """ with self.lock.acquire_timeout(3, job='set_scale') as acquired: if not acquired: self.log.warn( 'Could not set scale because {} is already running'.format( self.lock.job)) return False, 'Could not acquire lock' self.pid.set_scale(params['slope'], params['offset']) return True, 'Set scale' def set_on(self, session, params): """set_on() **Task** - Turn on the Kikusui drive voltage. """ with self.lock.acquire_timeout(3, job='set_on') as acquired: if not acquired: self.log.warn( 'Could not set on because {} is already running'.format( self.lock.job)) return False, 'Could not acquire lock' time.sleep(1) self.cmd.user_input('on') return True, 'Set Kikusui on' def set_off(self, session, params): """set_off() **Task** - Turn off the Kikusui drive voltage. """ with self.lock.acquire_timeout(3, job='set_off') as acquired: if not acquired: self.log.warn( 'Could not set off because {} is already running'.format( self.lock.job)) return False, 'Could not acquire lock' time.sleep(1) self.cmd.user_input('off') return True, 'Set Kikusui off' @ocs_agent.param('volt', default=0, type=float, check=lambda x: 0 <= x <= 35) def set_v(self, session, params): """set_v(volt=0) **Task** - Set the Kikusui drive voltage. Parameters: volt (float): Kikusui set voltage """ with self.lock.acquire_timeout(3, job='set_v') as acquired: if not acquired: self.log.warn( 'Could not set v because {} is already running'.format( self.lock.job)) return False, 'Could not acquire lock' time.sleep(1) self.cmd.user_input('V {}'.format(params['volt'])) return True, 'Set Kikusui voltage to {} V'.format(params['volt']) @ocs_agent.param('volt', default=32., type=float, check=lambda x: 0. <= x <= 35.) def set_v_lim(self, session, params): """set_v_lim(volt=32) **Task** - Set the Kikusui drive voltage limit. Parameters: volt (float): Kikusui limit voltage """ with self.lock.acquire_timeout(3, job='set_v_lim') as acquired: if not acquired: self.log.warn( 'Could not set v lim because {} is already running'.format( self.lock.job)) return False, 'Could not acquire lock' time.sleep(1) print(params['volt']) self.cmd.user_input('VL {}'.format(params['volt'])) return True, 'Set Kikusui voltage limit to {} V'.format(params['volt']) def use_ext(self, session, params): """use_ext() **Task** - Set the Kikusui to use an external voltage control. Doing so enables PID control. """ with self.lock.acquire_timeout(3, job='use_ext') as acquired: if not acquired: self.log.warn( 'Could not use external voltage because {} is already running' .format(self.lock.job)) return False, 'Could not acquire lock' time.sleep(1) self.cmd.user_input('U') return True, 'Set Kikusui voltage to PID control' def ign_ext(self, session, params): """ign_ext() **Task** - Set the Kiksui to ignore external voltage control. Doing so disables the PID and switches to direct control. """ with self.lock.acquire_timeout(3, job='ign_ext') as acquired: if not acquired: self.log.warn( 'Could not ignore external voltage because {} is already running' .format(self.lock.job)) return False, 'Could not acquire lock' time.sleep(1) self.cmd.user_input('I') return True, 'Set Kikusui voltage to direct control' @ocs_agent.param('test_mode', default=False, type=bool) def iv_acq(self, session, params): """iv_acq(test_mode=False) **Process** - Start Kikusui data acquisition. Parameters: test_mode (bool, optional): Run the Process loop only once. This is meant only for testing. Default is False. Notes: The most recent data collected is stored in the session data in the structure:: >>> response.session['data'] {'kikusui_volt': 0, 'kikusui_curr': 0, 'last_updated': 1649085992.719602} """ with self.lock.acquire_timeout(timeout=0, job='iv_acq') as acquired: if not acquired: self.log.warn( 'Could not start iv acq because {} is already running'. format(self.lock.job)) return False, 'Could not acquire lock' session.set_status('running') last_release = time.time() self.take_data = True while self.take_data: # Relinquish sampling lock occasionally. if time.time() - last_release > 1.: last_release = time.time() if not self.lock.release_and_acquire(timeout=10): self.log.warn(f"Failed to re-acquire sampling lock, " f"currently held by {self.lock.job}.") continue data = { 'timestamp': time.time(), 'block_name': 'HWPKikusui_IV', 'data': {} } v_msg, v_val = self.cmd.user_input('V?') i_msg, i_val = self.cmd.user_input('C?') data['data']['kikusui_volt'] = v_val data['data']['kikusui_curr'] = i_val self.agent.publish_to_feed('hwprotation', data) session.data = { 'kikusui_volt': v_val, 'kikusui_curr': i_val, 'last_updated': time.time() } time.sleep(1) if params['test_mode']: break self.agent.feeds['hwprotation'].flush_buffer() return True, 'Acqusition exited cleanly' def _stop_iv_acq(self, session, params): """ Stop iv_acq process. """ if self.take_data: self.take_data = False return True, 'requested to stop taking data' return False, 'acq is not currently running'
class SynaccessAgent: def __init__(self, agent, ip_address, username, password): """ Initializes the class variables Args: ip_address(str): IP Address for the agent. username(str): username credential to login to strip password(str): password credential to login to strip """ self.agent = agent self.lock = TimeoutLock() self.ip_address = ip_address self.user = username self.passw = password def get_status(self, session, params=None): with self.lock.acquire_timeout(1) as acquired: if acquired: req = "http://" + self.user + ":" + self.passw + "@" +\ self.ip_address+"/cmd.cgi?$A5" r = requests.get(req) resp = str(r.content)[6:11][::-1] ret_str = [] for x in resp: if x == '1': ret_str.append('on') else: ret_str.append('off') return True, 'status outlet [1,2,3,4,5] is {}'.format(ret_str) else: return False, "Could not acquire lock" def reboot(self, session, params=None): with self.lock.acquire_timeout(1) as acquired: if acquired: req = "http://"+self.user + ":" + \ self.passw + "@" + self.ip_address + \ "/cmd.cgi?$A4" + " " + str(params['outlet']) requests.get(req) return True, 'Rebooted outlet {}'.format(params['outlet']) else: return False, "Could not acquire lock" def set_outlet(self, session, params=None): """ Sets a particular outlet to on/off Args: outlet (int): the outlet that we are changing the state of on (bool): the new state """ with self.lock.acquire_timeout(1) as acquired: if acquired: if params['on']: on = "1" else: on = "0" req = "http://" + self.user + ":" + self.passw + "@" + \ self.ip_address+"/cmd.cgi?$A3" + " " + \ str(params['outlet']) + " " + on requests.get(req) return True, 'Set outlet {} to {}'.\ format(params['outlet'], params['on']) else: return False, "Could not acquire lock" def set_all(self, session, params=None): """ Sets all outlets to on/off Args: on (bool): the new state """ with self.lock.acquire_timeout(1) as acquired: if acquired: on = "0" if params['on']: on = "1" req = "http://" + self.user + ":" + self.passw + "@" +\ self.ip_address + "/cmd.cgi?$A7" + " " + on requests.get(req) return True, 'Set all outlets to {}'.format(params['on']) else: return False, "Could not acquire lock"
class TektronixAWGAgent: """Tektronix3021c Agent. Args: ip_address (string): the IP address of the gpib to ethernet controller connected to the function generator. gpib_slot (int): the gpib address currently set on the function generator. """ def __init__(self, agent, ip_address, gpib_slot): self.agent = agent self.log = agent.log self.lock = TimeoutLock() self.job = None self.ip_address = ip_address self.gpib_slot = gpib_slot self.monitor = False self.awg = None # Registers data feeds agg_params = { 'frame_length': 60, } self.agent.register_feed('awg', record=True, agg_params=agg_params) def init_awg(self, session, params=None): """ Task to connect to Tektronix AWG """ with self.lock.acquire_timeout(0) as acquired: if not acquired: return False, "Could not acquire lock" try: self.awg = TektronixInterface(self.ip_address, self.gpib_slot) self.idn = self.awg.identify() except socket.timeout as e: self.log.error("""Tektronix AWG timed out during connect -> {}""".format(e)) return False, "Timeout" self.log.info("Connected to AWG: {}".format(self.idn)) return True, 'Initialized AWG.' def set_frequency(self, session, params=None): """ Sets frequency of function generator: Args: frequency (float): Frequency to set in Hz. Must be between 0 and 25,000,000. """ with self.lock.acquire_timeout(1) as acquired: if acquired: freq = params.get("frequency") try: float(freq) except ValueError as e: return False, """Frequency must be a float or int -> {}""".format(e) except TypeError as e: return False, """Frequency must not be of NoneType -> {}""".format(e) if 0 < freq < 25E6: self.awg.set_freq(freq) data = { 'timestamp': time.time(), 'block_name': "AWG_frequency_cmd", 'data': { 'AWG_frequency_cmd': freq } } self.agent.publish_to_feed('awg', data) else: return False, """Invalid input: Frequency must be between 0 and 25,000,000 Hz""" else: return False, "Could not acquire lock" return True, 'Set frequency {} Hz'.format(params) def set_amplitude(self, session, params=None): """ Sets current of power supply: Args: amplitude (float): Peak to Peak voltage to set. Must be between 0 and 10. """ with self.lock.acquire_timeout(1) as acquired: if acquired: amp = params.get('amplitude') try: float(amp) except ValueError as e: return False, """Amplitude must be a float or int -> {}""".format(e) except TypeError as e: return False, """Amplitude must not be of NoneType -> {}""".format(e) if 0 < amp < 10: self.awg.set_amp(amp) data = { 'timestamp': time.time(), 'block_name': "AWG_amplitude_cmd", 'data': { 'AWG_amplitude_cmd': amp } } self.agent.publish_to_feed('awg', data) else: return False, """Amplitude must be between 0 and 10 Volts peak to peak""" else: return False, "Could not acquire lock" return True, 'Set amplitude to {} Vpp'.format(params) def set_output(self, session, params=None): """ Task to turn channel on or off. Args: state (bool): True for on, False for off. """ with self.lock.acquire_timeout(1) as acquired: if acquired: state = params.get("state") try: bool(state) except ValueError as e: return False, "State must be a boolean -> {}".format(e) except TypeError as e: return False, """State must not be of NoneType -> {}""".format(e) self.awg.set_output(state) data = { 'timestamp': time.time(), 'block_name': "AWG_output_cmd", 'data': { 'AWG_output_cmd': int(state) } } self.agent.publish_to_feed('awg', data) else: return False, "Could not acquire lock" return True, 'Set Output to {}.'.format(params)
class PysmurfController: """ Controller object for running pysmurf scripts and functions. Arguments --------- agent: ocs.ocs_agent.OCSAgent OCSAgent object which is running args: Namespace argparse namespace with site_config and agent specific arguments Attributes ---------- agent: ocs.ocs_agent.OCSAgent OCSAgent object which is running log: txaio.tx.Logger txaio logger object created by agent prot: PysmurfScriptProtocol protocol used to call and monitor external pysmurf scripts protocol_lock: ocs.ocs_twisted.TimeoutLock lock to protect multiple pysmurf scripts from running simultaneously. """ def __init__(self, agent, args): self.agent: ocs_agent.OCSAgent = agent self.log = agent.log self.prot = None self.protocol_lock = TimeoutLock() self.current_session = None if args.monitor_id is not None: self.agent.subscribe_on_start( self._on_session_data, 'observatory.{}.feeds.pysmurf_session_data'.format( args.monitor_id), ) def _on_session_data(self, _data): data, feed = _data if self.current_session is not None: if data['id'] == os.environ.get("SMURFPUB_ID"): if data['type'] == 'session_data': if isinstance(data['payload'], dict): self.current_session.data.update(data['payload']) else: self.log.warn( "Session data not passed as a dict!! Skipping...") elif data['type'] == 'session_log': if isinstance(data['payload'], str): self.current_session.add_message(data['payload']) @inlineCallbacks def _run_script(self, script, args, log, session): """ Runs a pysmurf control script. Can only run from the reactor. Arguments ---------- script: string path to the script you wish to run args: list, optional List of command line arguments to pass to the script. Defaults to []. log: string/bool, optional Determines if and how the process's stdout should be logged. You can pass the path to a logfile, True to use the agent's log, or False to not log at all. """ with self.protocol_lock.acquire_timeout(0, job=script) as acquired: if not acquired: return False, "The requested script cannot be run because " \ "script {} is already running".format(self.protocol_lock.job) self.current_session = session try: # IO is not really safe from the reactor thread, so we possibly # need to find another way to do this if people use it and it # causes problems... logger = None if isinstance(log, str): self.log.info("Logging output to file {}".format(log)) log_file = yield threads.deferToThread(open, log, 'a') logger = Logger( observer=FileLogObserver(log_file, log_formatter)) elif log: # If log==True, use agent's logger logger = self.log self.prot = PysmurfScriptProtocol(script, log=logger) self.prot.deferred = Deferred() python_exec = sys.executable cmd = [python_exec, '-u', script] + list(map(str, args)) self.log.info("{exec}, {cmd}", exec=python_exec, cmd=cmd) reactor.spawnProcess(self.prot, python_exec, cmd, env=os.environ) rc = yield self.prot.deferred return (rc == 0 ), "Script has finished with exit code {}".format(rc) finally: # Sleep to allow any remaining messages to be put into the # session var yield dsleep(1.0) self.current_session = None @inlineCallbacks def run_script(self, session, params=None): """run(params=None) Run task. Runs a pysmurf control script. Arguments ---------- script: string path to the script you wish to run args: list, optional List of command line arguments to pass to the script. Defaults to []. log: string/bool, optional Determines if and how the process's stdout should be logged. You can pass the path to a logfile, True to use the agent's log, or False to not log at all. """ ok, msg = yield self._run_script(params['script'], params.get('args', []), params.get('log', True), session) return ok, msg def abort_script(self, session, params=None): """ Aborts the currently running script """ self.prot.transport.signalProcess('KILL') return True, "Aborting process" @inlineCallbacks def tune_squids(self, session, params=None): """ Task to run /config/scripts/pysmurf/tune_squids.py Arguments --------- args: list, optional List of command line arguments to pass to the script. Defaults to []. log: string/bool, optional Determines if and how the process's stdout should be logged. You can pass the path to a logfile, True to use the agent's log, or False to not log at all. """ if params is None: params = {} ok, msg = yield self._run_script( '/config/scripts/pysmurf/tune_squids.py', params.get('args', []), params.get('log', True), session) return ok, msg
class LS370_Agent: """Agent to connect to a single Lakeshore 370 device. Args: name (ApplicationSession): ApplicationSession for the Agent. port (str): Serial port for the 370 device, e.g. '/dev/ttyUSB2' fake_data (bool, optional): generates random numbers without connecting to LS if True. dwell_time_delay (int, optional): Amount of time, in seconds, to delay data collection after switching channels. Note this time should not include the change pause time, which is automatically accounted for. Will automatically be reduced to dwell_time - 1 second if it is set longer than a channel's dwell time. This ensures at least one second of data collection at the end of a scan. """ def __init__(self, agent, name, port, fake_data=False, dwell_time_delay=0): # self._acq_proc_lock is held for the duration of the acq Process. # Tasks that require acq to not be running, at all, should use # this lock. self._acq_proc_lock = TimeoutLock() # self._lock is held by the acq Process only when accessing # the hardware but released occasionally so that (short) Tasks # may run. Use a YieldingLock to guarantee that a waiting # Task gets activated preferentially, even if the acq thread # immediately tries to reacquire. self._lock = YieldingLock(default_timeout=5) self.name = name self.port = port self.fake_data = fake_data self.dwell_time_delay = dwell_time_delay self.module = None self.thermometers = [] self.log = agent.log self.initialized = False self.take_data = False self.agent = agent # Registers temperature feeds agg_params = { 'frame_length': 10 * 60 # [sec] } self.agent.register_feed('temperatures', record=True, agg_params=agg_params, buffer_time=1) def init_lakeshore_task(self, session, params=None): """init_lakeshore_task(params=None) Perform first time setup of the Lakeshore 370 communication. Args: params (dict): Parameters dictionary for passing parameters to task. Parameters: auto_acquire (bool, optional): Default is False. Starts data acquisition after initialization if True. force (bool, optional): Force re-initialize the lakeshore if True. """ if params is None: params = {} if self.initialized and not params.get('force', False): self.log.info("Lakeshore already initialized. Returning...") return True, "Already initialized" with self._lock.acquire_timeout(job='init') as acquired1, \ self._acq_proc_lock.acquire_timeout(timeout=0., job='init') \ as acquired2: if not acquired1: self.log.warn(f"Could not start init because " f"{self._lock.job} is already running") return False, "Could not acquire lock" if not acquired2: self.log.warn(f"Could not start init because " f"{self._acq_proc_lock.job} is already running") return False, "Could not acquire lock" session.set_status('running') if self.fake_data: self.res = random.randrange(1, 1000) session.add_message("No initialization since faking data") self.thermometers = ["thermA", "thermB"] else: self.module = LS370(self.port) print("Initialized Lakeshore module: {!s}".format(self.module)) session.add_message("Lakeshore initilized with ID: %s" % self.module.id) self.thermometers = [ channel.name for channel in self.module.channels ] self.initialized = True # Start data acquisition if requested if params.get('auto_acquire', False): self.agent.start('acq') return True, 'Lakeshore module initialized.' def start_acq(self, session, params=None): """acq(params=None) Method to start data acquisition process. """ with self._acq_proc_lock.acquire_timeout(timeout=0, job='acq') \ as acq_acquired, \ self._lock.acquire_timeout(job='acq') as acquired: if not acq_acquired: self.log.warn(f"Could not start Process because " f"{self._acq_proc_lock.job} is already running") return False, "Could not acquire lock" if not acquired: self.log.warn(f"Could not start Process because " f"{self._lock.job} is holding the lock") return False, "Could not acquire lock" session.set_status('running') self.log.info("Starting data acquisition for {}".format( self.agent.agent_address)) previous_channel = None last_release = time.time() self.take_data = True while self.take_data: # Relinquish sampling lock occasionally. if time.time() - last_release > 1.: last_release = time.time() if not self._lock.release_and_acquire(timeout=10): self.log.warn(f"Failed to re-acquire sampling lock, " f"currently held by {self._lock.job}.") continue if self.fake_data: data = { 'timestamp': time.time(), 'block_name': 'fake-data', 'data': {} } for therm in self.thermometers: reading = np.random.normal(self.res, 20) data['data'][therm] = reading time.sleep(.1) else: active_channel = self.module.get_active_channel() # The 370 reports the last updated measurement repeatedly # during the "pause change time", this results in several # stale datapoints being recorded. To get around this we # query the pause time and skip data collection during it # if the channel has changed (as it would if autoscan is # enabled.) if previous_channel != active_channel: if previous_channel is not None: pause_time = active_channel.get_pause() self.log.debug("Pause time for {c}: {p}", c=active_channel.channel_num, p=pause_time) dwell_time = active_channel.get_dwell() self.log.debug("User set dwell_time_delay: {p}", p=self.dwell_time_delay) # Check user set dwell time isn't too long if self.dwell_time_delay > dwell_time: self.log.warn( "WARNING: User set dwell_time_delay of " + "{delay} s is larger than channel " + "dwell time of {chan_time} s. If " + "you are autoscanning this will " + "cause no data to be collected. " + "Reducing dwell time delay to {s} s.", delay=self.dwell_time_delay, chan_time=dwell_time, s=dwell_time - 1) total_time = pause_time + dwell_time - 1 else: total_time = pause_time + self.dwell_time_delay for i in range(total_time): self.log.debug( "Sleeping for {t} more seconds...", t=total_time - i) time.sleep(1) # Track the last channel we measured previous_channel = self.module.get_active_channel() # Setup feed dictionary channel_str = active_channel.name.replace(' ', '_') data = { 'timestamp': time.time(), 'block_name': channel_str, 'data': {} } # Collect both temperature and resistance values from each Channel data['data'][channel_str + '_T'] = \ self.module.get_temp(unit='kelvin', chan=active_channel.channel_num) data['data'][channel_str + '_R'] = \ self.module.get_temp(unit='ohms', chan=active_channel.channel_num) # Courtesy in case active channel has not changed time.sleep(0.1) session.app.publish_to_feed('temperatures', data) return True, 'Acquisition exited cleanly.' def stop_acq(self, session, params=None): """ Stops acq process. """ if self.take_data: self.take_data = False return True, 'requested to stop taking data.' else: return False, 'acq is not currently running' def set_heater_range(self, session, params): """ Adjust the heater range for servoing cryostat. Wait for a specified amount of time after the change. :param params: dict with 'heater', 'range', 'wait' keys :type params: dict heater - which heater to set range for, 'sample' by default (and the only implemented one) range - the heater range value to change to wait - time in seconds after changing the heater value to wait, allows the servo to adjust to the new heater range, typical value of ~600 seconds """ with self._lock.acquire_timeout(job='set_heater_range') as acquired: if not acquired: self.log.warn(f"Could not start Task because " f"{self._lock.job} is already running") return False, "Could not acquire lock" session.set_status('running') heater_string = params.get('heater', 'sample') if heater_string.lower() == 'sample': heater = self.module.sample_heater elif heater_string.lower( ) == 'still': # TODO: add still heater class to driver # heater = self.module.still_heater self.log.warn( f"{heater_string} heater not yet implemented in this agent, please modify client" ) current_range = heater.get_heater_range() if params['range'] == current_range: print( "Current heater range matches commanded value. Proceeding unchanged." ) else: heater.set_heater_range(params['range']) time.sleep(params['wait']) return True, f'Set {heater_string} heater range to {params["range"]}' def set_excitation_mode(self, session, params): """ Set the excitation mode of a specified channel. :param params: dict with "channel" and "mode" keys for Channel.set_excitation_mode() :type params: dict """ with self._lock.acquire_timeout(job='set_excitation_mode') as acquired: if not acquired: self.log.warn(f"Could not start Task because " f"{self._lock.job} is already running") return False, "Could not acquire lock" session.set_status('running') self.module.chan_num2channel( params['channel']).set_excitation_mode(params['mode']) session.add_message( f'post message in agent for Set channel {params["channel"]} excitation mode to {params["mode"]}' ) print( f'print statement in agent for Set channel {params["channel"]} excitation mode to {params["mode"]}' ) return True, f'return text for Set channel {params["channel"]} excitation mode to {params["mode"]}' def set_excitation(self, session, params): """ Set the excitation voltage/current value of a specified channel. :param params: dict with "channel" and "value" keys for Channel.set_excitation() :type params: dict """ with self._lock.acquire_timeout(job='set_excitation') as acquired: if not acquired: self.log.warn(f"Could not start Task because " f"{self._lock.job} is already running") return False, "Could not acquire lock" session.set_status('running') current_excitation = self.module.chan_num2channel( params['channel']).get_excitation() if params['value'] == current_excitation: print( f'Channel {params["channel"]} excitation already set to {params["value"]}' ) else: self.module.chan_num2channel(params['channel']).set_excitation( params['value']) session.add_message( f'Set channel {params["channel"]} excitation to {params["value"]}' ) print( f'Set channel {params["channel"]} excitation to {params["value"]}' ) return True, f'Set channel {params["channel"]} excitation to {params["value"]}' def set_pid(self, session, params): """ Set the PID parameters for servo control of fridge. :param params: dict with "P", "I", and "D" keys for Heater.set_pid() :type params: dict """ with self._lock.acquire_timeout(job='set_pid') as acquired: if not acquired: self.log.warn(f"Could not start Task because " f"{self._lock.job} is already running") return False, "Could not acquire lock" session.set_status('running') self.module.sample_heater.set_pid(params["P"], params["I"], params["D"]) session.add_message( f'post message text for Set PID to {params["P"]}, {params["I"]}, {params["D"]}' ) print( f'print text for Set PID to {params["P"]}, {params["I"]}, {params["D"]}' ) return True, f'return text for Set PID to {params["P"]}, {params["I"]}, {params["D"]}' def set_active_channel(self, session, params): """ Set the active channel on the LS370. :param params: dict with "channel" number :type params: dict """ with self._lock.acquire_timeout(job='set_active_channel') as acquired: if not acquired: self.log.warn(f"Could not start Task because " f"{self._lock.job} is already running") return False, "Could not acquire lock" session.set_status('running') self.module.set_active_channel(params["channel"]) session.add_message( f'post message text for set channel to {params["channel"]}') print(f'print text for set channel to {params["channel"]}') return True, f'return text for set channel to {params["channel"]}' def set_autoscan(self, session, params): """ Sets autoscan on the LS370. :param params: dict with "autoscan" value """ with self._lock.acquire_timeout(job='set_autoscan') as acquired: if not acquired: self.log.warn(f"Could not start Task because " f"{self._lock.job} is already running") return False, "Could not acquire lock" session.set_status('running') if params['autoscan']: self.module.enable_autoscan() self.log.info('enabled autoscan') else: self.module.disable_autoscan() self.log.info('disabled autoscan') return True, 'Set autoscan to {}'.format(params['autoscan']) def servo_to_temperature(self, session, params): """Servo to temperature passed into params. :param params: dict with "temperature" Heater.set_setpoint() in units of K, and "channel" as an integer (optional) :type params: dict """ with self._lock.acquire_timeout( job='servo_to_temperature') as acquired: if not acquired: self.log.warn(f"Could not start Task because " f"{self._lock.job} is already running") return False, "Could not acquire lock" session.set_status('running') # Check we're in correct control mode for servo. if self.module.sample_heater.mode != 'Closed Loop': session.add_message( 'Changing control to Closed Loop mode for servo.') self.module.sample_heater.set_mode("Closed Loop") # Check we aren't autoscanning. if self.module.get_autoscan() is True: session.add_message( 'Autoscan is enabled, disabling for PID control on dedicated channel.' ) self.module.disable_autoscan() # Check to see if we passed an input channel, and if so change to it if params.get("channel", False) is not False: session.add_message( f'Changing heater input channel to {params.get("channel")}' ) self.module.sample_heater.set_input_channel( params.get("channel")) # Check we're scanning same channel expected by heater for control. if self.module.get_active_channel().channel_num != int( self.module.sample_heater.input): session.add_message( 'Changing active channel to expected heater control input') self.module.set_active_channel( int(self.module.sample_heater.input)) # Check we're setup to take correct units. if self.module.sample_heater.units != 'kelvin': session.add_message( 'Setting preferred units to Kelvin on heater control.') self.module.sample_heater.set_units('kelvin') # Make sure we aren't servoing too high in temperature. if params["temperature"] > 1: return False, 'Servo temperature is set above 1K. Aborting.' self.module.sample_heater.set_setpoint(params["temperature"]) return True, f'Setpoint now set to {params["temperature"]} K' def check_temperature_stability(self, session, params): """Check servo temperature stability is within threshold. :param params: dict with "measurements" and "threshold" parameters :type params: dict measurements - number of measurements to average for stability check threshold - amount within which the average needs to be to the setpoint for stability """ with self._lock.acquire_timeout( job='check_temp_stability') as acquired: if not acquired: self.log.warn(f"Could not start Task because " f"{self._lock.job} is already running") return False, "Could not acquire lock" session.set_status('running') setpoint = float(self.module.sample_heater.get_setpoint()) if params is None: params = {'measurements': 10, 'threshold': 0.5e-3} test_temps = [] for i in range(params['measurements']): test_temps.append(self.module.get_temp()) time.sleep( .1 ) # sampling rate is 10 readings/sec, so wait 0.1 s for a new reading mean = np.mean(test_temps) session.add_message( f'Average of {params["measurements"]} measurements is {mean} K.' ) print( f'Average of {params["measurements"]} measurements is {mean} K.' ) if np.abs(mean - setpoint) < params['threshold']: print("passed threshold") session.add_message('Setpoint Difference: ' + str(mean - setpoint)) session.add_message( f'Average is within {params["threshold"]} K threshold. Proceeding with calibration.' ) return True, f"Servo temperature is stable within {params['threshold']} K" else: print("we're in the else") # adjust_heater(t,rest) return False, f"Temperature not stable within {params['threshold']}." def set_output_mode(self, session, params=None): """ Set output mode of the heater. :param params: dict with "heater" and "mode" parameters :type params: dict heater - Specifies which heater to control. Either 'sample' or 'still' mode - Specifies mode of heater. Can be "Off", "Monitor Out", "Open Loop", "Zone", "Still", "Closed Loop", or "Warm up" """ with self._lock.acquire_timeout(job='set_output_mode') as acquired: if not acquired: self.log.warn(f"Could not start Task because " f"{self._lock.job} is already running") return False, "Could not acquire lock" session.set_status('running') if params['heater'].lower() == 'still': # self.module.still_heater.set_mode(params['mode']) #TODO: add still heater to driver self.log.warn( f"{params['heater']} heater not yet implemented in this agent, please modify client" ) if params['heater'].lower() == 'sample': self.module.sample_heater.set_mode(params['mode']) self.log.info("Set {} output mode to {}".format( params['heater'], params['mode'])) return True, "Set {} output mode to {}".format(params['heater'], params['mode']) def set_heater_output(self, session, params=None): """ Set display type and output of the heater. :param params: dict with "heater", "display", and "output" parameters :type params: dict heater - Specifies which heater to control. Either 'sample' or 'still' output - Specifies heater output value. If display is set to "Current" or heater is "still", can be any number between 0 and 100. If display is set to "Power", can be any number between 0 and the maximum allowed power. display (opt)- Specifies heater display type. Can be "Current" or "Power". If None, heater display is not reset before setting output. """ with self._lock.acquire_timeout(job='set_heater_output') as acquired: if not acquired: self.log.warn(f"Could not start Task because " f"{self._lock.job} is already running") return False, "Could not acquire lock" heater = params['heater'].lower() output = params['output'] display = params.get('display', None) if heater == 'still': # TODO: add still heater to driver # self.module.still_heater.set_heater_output(output, display_type=display) self.log.warn( f"{heater} heater not yet implemented in this agent, please modify client" ) if heater.lower() == 'sample': self.log.info("display: {}\toutput: {}".format( display, output)) self.module.sample_heater.set_heater_output( output, display_type=display) self.log.info("Set {} heater display to {}, output to {}".format( heater, display, output)) session.set_status('running') data = { 'timestamp': time.time(), 'block_name': '{}_heater_out'.format(heater), 'data': { '{}_heater_out'.format(heater): output } } session.app.publish_to_feed('temperatures', data) return True, "Set {} display to {}, output to {}".format( heater, display, output) def get_channel_attribute(self, session, params): """Gets an arbitrary channel attribute, stored in the session.data dict Parameters ---------- params : dict Contains parameters 'attribute' (not optional), 'channel' (optional, default '1'). Channel attributes stored in the session.data object are in the structure:: >>> session.data {"calibration_curve": 21, "dwell": 3, "excitation": 6.32e-6, "excitation_mode": "voltage", "excitation_power": 2.0e-15, "kelvin_reading": 100.0e-3, "pause": 3, "reading_status": ["T.UNDER"] "resistance_range": 2.0e-3, "resistance_reading": 10.0e3, "temperature_coefficient": "negative", } Note: Only attribute called with this method will be populated for the given channel. This example shows all available attributes. """ with self._lock.acquire_timeout(job=f"get_{params['attribute']}", timeout=3) as acquired: if not acquired: print( f"Lock could not be acquired because it is held by {self._lock.job}" ) return False, 'Could not acquire lock' session.set_status('running') # get channel channel_key = int(params.get('channel', 1)) channel = self.module.chan_num2channel(channel_key) # check that attribute is a valid channel method if getattr(channel, f"get_{params['attribute']}", False) is not False: query = getattr(channel, f"get_{params['attribute']}") # get attribute resp = query() session.data[params['attribute']] = resp time.sleep(.1) return True, f"Retrieved {channel.name} {params['attribute']}" def get_heater_attribute(self, session, params): """Gets an arbitrary heater attribute, stored in the session.data dict Parameters ---------- params : dict Contains parameters 'attribute'. Heater attributes stored in the session.data object are in the structure:: >>> session.data {"heater_range": 1e-3, "heater_setup": ["current", 1e-3, 120], "input_channel": 6, "manual_out": 0.0, "mode": "Closed Loop", "pid": (80, 10, 0), "setpoint": 100e-3, "still_output", 10.607, "units": "kelvin", } Note: Only the attribute called with this method will be populated, this example just shows all available attributes. """ with self._lock.acquire_timeout(job=f"get_{params['attribute']}", timeout=3) as acquired: if not acquired: print( f"Lock could not be acquired because it is held by {self._lock.job}" ) return False, 'Could not acquire lock' session.set_status('running') # get heater heater = self.module.sample_heater # check that attribute is a valid heater method if getattr(heater, f"get_{params['attribute']}", False) is not False: query = getattr(heater, f"get_{params['attribute']}") # get attribute resp = query() session.data[params['attribute']] = resp time.sleep(.1) return True, f"Retrieved sample heater {params['attribute']}"
class LS372_Agent: """Agent to connect to a single Lakeshore 372 device. Args: name (ApplicationSession): ApplicationSession for the Agent. ip (str): IP Address for the 372 device. fake_data (bool, optional): generates random numbers without connecting to LS if True. dwell_time_delay (int, optional): Amount of time, in seconds, to delay data collection after switching channels. Note this time should not include the change pause time, which is automatically accounted for. Will automatically be reduced to dwell_time - 1 second if it is set longer than a channel's dwell time. This ensures at least one second of data collection at the end of a scan. enable_control_chan (bool, optional): If True, will read data from the control channel each iteration of the acq loop. Defaults to False. """ def __init__(self, agent, name, ip, fake_data=False, dwell_time_delay=0, enable_control_chan=False): # self._acq_proc_lock is held for the duration of the acq Process. # Tasks that require acq to not be running, at all, should use # this lock. self._acq_proc_lock = TimeoutLock() # self._lock is held by the acq Process only when accessing # the hardware but released occasionally so that (short) Tasks # may run. Use a YieldingLock to guarantee that a waiting # Task gets activated preferentially, even if the acq thread # immediately tries to reacquire. self._lock = YieldingLock(default_timeout=5) self.name = name self.ip = ip self.fake_data = fake_data self.dwell_time_delay = dwell_time_delay self.module = None self.thermometers = [] self.log = agent.log self.initialized = False self.take_data = False self.control_chan_enabled = enable_control_chan self.agent = agent # Registers temperature feeds agg_params = { 'frame_length': 10*60 # [sec] } self.agent.register_feed('temperatures', record=True, agg_params=agg_params, buffer_time=1) @ocs_agent.param('_') def enable_control_chan(self, session, params=None): """enable_control_chan() **Task** - Enables readout on the control channel (Channel A). """ self.control_chan_enabled = True return True, 'Enabled control channel' @ocs_agent.param('_') def disable_control_chan(self, session, params=None): """disable_control_chan() **Task** - Disables readout on the control channel (Channel A). """ self.control_chan_enabled = False return True, 'Disabled control channel' @ocs_agent.param('auto_acquire', default=False, type=bool) @ocs_agent.param('acq_params', type=dict, default=None) @ocs_agent.param('force', default=False, type=bool) @ocs_agent.param('configfile', type=str, default=None) def init_lakeshore(self, session, params=None): """init_lakeshore(auto_acquire=False, acq_params=None, force=False, configfile=None) **Task** - Perform first time setup of the Lakeshore 372 communication. Parameters: auto_acquire (bool, optional): Default is False. Starts data acquisition after initialization if True. acq_params (dict, optional): Params to pass to acq process if auto_acquire is True. force (bool, optional): Force initialization, even if already initialized. Defaults to False. configfile (str, optional): .yaml file for initializing 372 channel settings """ if params is None: params = {} if self.initialized and not params.get('force', False): self.log.info("Lakeshore already initialized. Returning...") return True, "Already initialized" with self._lock.acquire_timeout(job='init') as acquired1, \ self._acq_proc_lock.acquire_timeout(timeout=0., job='init') \ as acquired2: if not acquired1: self.log.warn(f"Could not start init because " f"{self._lock.job} is already running") return False, "Could not acquire lock" if not acquired2: self.log.warn(f"Could not start init because " f"{self._acq_proc_lock.job} is already running") return False, "Could not acquire lock" session.set_status('running') if self.fake_data: self.res = random.randrange(1, 1000) session.add_message("No initialization since faking data") self.thermometers = ["thermA", "thermB"] else: try: self.module = LS372(self.ip) except ConnectionError: self.log.error("Could not connect to the LS372. Exiting.") reactor.callFromThread(reactor.stop) return False, 'Lakeshore initialization failed' except Exception as e: self.log.error(f"Unhandled exception encountered: {e}") reactor.callFromThread(reactor.stop) return False, 'Lakeshore initialization failed' print("Initialized Lakeshore module: {!s}".format(self.module)) session.add_message("Lakeshore initilized with ID: %s"%self.module.id) self.thermometers = [channel.name for channel in self.module.channels] self.initialized = True if params.get('configfile') is not None: self.input_configfile(session, params) session.add_message("Lakeshore initial configurations uploaded using: %s"%params['configfile']) # Start data acquisition if requested if params.get('auto_acquire', False): self.agent.start('acq', params.get('acq_params', None)) return True, 'Lakeshore module initialized.' @ocs_agent.param('sample_heater', default=False, type=bool) @ocs_agent.param('run_once', default=False, type=bool) def acq(self, session, params=None): """acq(sample_heater=False) **Process** - Acquire data from the Lakeshore 372. Parameters: sample_heater (bool, optional): Default is False. Will record values from the sample heater, typically used to servo a DR if True. Notes: The most recent data collected is stored in session data in the structure:: >>> response.session['data'] {"fields": {"Channel_05": {"T": 293.644, "R": 33.752, "timestamp": 1601924482.722671}, "Channel_06": {"T": 0, "R": 1022.44, "timestamp": 1601924499.5258765}, "Channel_08": {"T": 0, "R": 1026.98, "timestamp": 1601924494.8172355}, "Channel_01": {"T": 293.41, "R": 108.093, "timestamp": 1601924450.9315426}, "Channel_02": {"T": 293.701, "R": 30.7398, "timestamp": 1601924466.6130798}, "control": {"T": 293.701, "R": 30.7398, "timestamp": 1601924466.6130798} } } """ pm = Pacemaker(10, quantize=True) with self._acq_proc_lock.acquire_timeout(timeout=0, job='acq') \ as acq_acquired, \ self._lock.acquire_timeout(job='acq') as acquired: if not acq_acquired: self.log.warn(f"Could not start Process because " f"{self._acq_proc_lock.job} is already running") return False, "Could not acquire lock" if not acquired: self.log.warn(f"Could not start Process because " f"{self._lock.job} is holding the lock") return False, "Could not acquire lock" session.set_status('running') self.log.info("Starting data acquisition for {}".format(self.agent.agent_address)) previous_channel = None last_release = time.time() session.data = {"fields": {}} self.take_data = True while self.take_data: pm.sleep() # Relinquish sampling lock occasionally. if time.time() - last_release > 1.: last_release = time.time() if not self._lock.release_and_acquire(timeout=10): self.log.warn(f"Failed to re-acquire sampling lock, " f"currently held by {self._lock.job}.") continue if self.fake_data: data = { 'timestamp': time.time(), 'block_name': 'fake-data', 'data': {} } for therm in self.thermometers: reading = np.random.normal(self.res, 20) data['data'][therm] = reading time.sleep(.1) else: active_channel = self.module.get_active_channel() # The 372 reports the last updated measurement repeatedly # during the "pause change time", this results in several # stale datapoints being recorded. To get around this we # query the pause time and skip data collection during it # if the channel has changed (as it would if autoscan is # enabled.) if previous_channel != active_channel: if previous_channel is not None: pause_time = active_channel.get_pause() self.log.debug("Pause time for {c}: {p}", c=active_channel.channel_num, p=pause_time) dwell_time = active_channel.get_dwell() self.log.debug("User set dwell_time_delay: {p}", p=self.dwell_time_delay) # Check user set dwell time isn't too long if self.dwell_time_delay > dwell_time: self.log.warn("WARNING: User set dwell_time_delay of " + \ "{delay} s is larger than channel " + \ "dwell time of {chan_time} s. If " + \ "you are autoscanning this will " + \ "cause no data to be collected. " + \ "Reducing dwell time delay to {s} s.", delay=self.dwell_time_delay, chan_time=dwell_time, s=dwell_time - 1) total_time = pause_time + dwell_time - 1 else: total_time = pause_time + self.dwell_time_delay for i in range(total_time): self.log.debug("Sleeping for {t} more seconds...", t=total_time-i) time.sleep(1) # Track the last channel we measured previous_channel = self.module.get_active_channel() current_time = time.time() data = { 'timestamp': current_time, 'block_name': active_channel.name, 'data': {} } # Collect both temperature and resistance values from each Channel channel_str = active_channel.name.replace(' ', '_') temp_reading = self.module.get_temp(unit='kelvin', chan=active_channel.channel_num) res_reading = self.module.get_temp(unit='ohms', chan=active_channel.channel_num) # For data feed data['data'][channel_str + '_T'] = temp_reading data['data'][channel_str + '_R'] = res_reading session.app.publish_to_feed('temperatures', data) self.log.debug("{data}", data=session.data) # For session.data field_dict = {channel_str: {"T": temp_reading, "R": res_reading, "timestamp": current_time}} session.data['fields'].update(field_dict) # Also queries control channel if enabled if self.control_chan_enabled: temp = self.module.get_temp(unit='kelvin', chan=0) res = self.module.get_temp(unit='ohms', chan=0) cur_time = time.time() data = { 'timestamp': time.time(), 'block_name': 'control_chan', 'data': { 'control_T': temp, 'control_R': res } } session.app.publish_to_feed('temperatures', data) self.log.debug("{data}", data=session.data) # Updates session data w/ control field session.data['fields'].update({ 'control': { 'T': temp, 'R': res, 'timestamp': cur_time } }) if params.get("sample_heater", False): # Sample Heater heater = self.module.sample_heater hout = heater.get_sample_heater_output() current_time = time.time() htr_data = { 'timestamp': current_time, 'block_name': "heaters", 'data': {} } htr_data['data']['sample_heater_output'] = hout session.app.publish_to_feed('temperatures', htr_data) if params['run_once']: break return True, 'Acquisition exited cleanly.' def _stop_acq(self, session, params=None): """ Stops acq process. """ if self.take_data: session.set_status('stopping') self.take_data = False return True, 'requested to stop taking data.' else: return False, 'acq is not currently running' @ocs_agent.param('heater', type=str) @ocs_agent.param('range') @ocs_agent.param('wait', type=float) def set_heater_range(self, session, params): """set_heater_range(heater=None, range=None, wait=0) **Task** - Adjust the heater range for servoing cryostat. Wait for a specified amount of time after the change. Parameters: heater (str): Name of heater to set range for, either 'sample' or 'still'. range (str, float): see arguments in :func:`socs.Lakeshore.Lakeshore372.Heater.set_heater_range` wait (float, optional): Amount of time to wait after setting the heater range. This allows the servo time to adjust to the new range. """ with self._lock.acquire_timeout(job='set_heater_range') as acquired: if not acquired: self.log.warn(f"Could not start Task because " f"{self._lock.job} is already running") return False, "Could not acquire lock" session.set_status('running') heater_string = params.get('heater', 'sample') if heater_string.lower() == 'sample': heater = self.module.sample_heater elif heater_string.lower() == 'still': heater = self.module.still_heater current_range = heater.get_heater_range() self.log.debug(f"Current heater range: {current_range}") if params['range'] == current_range: print("Current heater range matches commanded value. Proceeding unchanged.") else: heater.set_heater_range(params['range']) time.sleep(params.get('wait', 0)) return True, f'Set {heater_string} heater range to {params["range"]}' @ocs_agent.param('channel', type=int, check=lambda x: 1<=x<=16) @ocs_agent.param('mode', type=str, choices=['current', 'voltage']) def set_excitation_mode(self, session, params): """set_excitation_mode(channel=None, mode=None) **Task** - Set the excitation mode of a specified channel. Parameters: channel (int): Channel to set the excitation mode for. Valid values are 1-16. mode (str): Excitation mode. Possible modes are 'current' or 'voltage'. """ with self._lock.acquire_timeout(job='set_excitation_mode') as acquired: if not acquired: self.log.warn(f"Could not start Task because " f"{self._lock.job} is already running") return False, "Could not acquire lock" session.set_status('running') self.module.channels[params['channel']].set_excitation_mode(params['mode']) session.add_message(f'post message in agent for Set channel {params["channel"]} excitation mode to {params["mode"]}') print(f'print statement in agent for Set channel {params["channel"]} excitation mode to {params["mode"]}') return True, f'return text for Set channel {params["channel"]} excitation mode to {params["mode"]}' @ocs_agent.param('channel', type=int, check=lambda x: 1<=x<=16) @ocs_agent.param('value', type=float) def set_excitation(self, session, params): """set_excitation(channel=None, value=None) **Task** - Set the excitation voltage/current value of a specified channel. Parameters: channel (int): Channel to set the excitation for. Valid values are 1-16. value (float): Excitation value in volts or amps depending on set excitation mode. See :func:`socs.Lakeshore.Lakeshore372.Channel.set_excitation` """ with self._lock.acquire_timeout(job='set_excitation') as acquired: if not acquired: self.log.warn(f"Could not start Task because " f"{self._lock.job} is already running") return False, "Could not acquire lock" session.set_status('running') current_excitation = self.module.channels[params['channel']].get_excitation() mode = self.module.channels[params["channel"]].get_excitation_mode() units = 'amps' if mode == 'current' else 'volts' if params['value'] == current_excitation: session.add_message(f'Channel {params["channel"]} excitation {mode} already set to {params["value"]} {units}') else: self.module.channels[params['channel']].set_excitation(params['value']) session.add_message(f'Set channel {params["channel"]} excitation {mode} to {params["value"]} {units}') return True, f'Set channel {params["channel"]} excitation to {params["value"]} {units}' @ocs_agent.param('channel', type=int, check=lambda x: 1<=x<=16) def get_excitation(self, session, params): """get_excitation(channel=None) **Task** - Get the excitation voltage/current value of a specified channel. Parameters: channel (int): Channel to get the excitation for. Valid values are 1-16. """ with self._lock.acquire_timeout(job='get_excitation') as acquired: if not acquired: self.log.warn(f"Could not start Task because " f"{self._lock.job} is already running") return False, "Could not acquire lock" session.set_status('running') current_excitation = self.module.channels[params["channel"]].get_excitation() mode = self.module.channels[params["channel"]].get_excitation_mode() units = 'amps' if mode == 'current' else 'volts' session.add_message(f'Channel {params["channel"]} excitation {mode} is {current_excitation} {units}') session.data = {"excitation": current_excitation} return True, f'Channel {params["channel"]} excitation {mode} is {current_excitation} {units}' @ocs_agent.param('channel', type=int, check=lambda x: 1<=x<=16) @ocs_agent.param('resistance_range', type=float) def set_resistance_range(self, session, params): """set_resistance_range(channel=None,resistance_range=None) **Task** - Set the resistance range for a specified channel. Parameters: channel (int): Channel to set the resistance range for. Valid values are 1-16. resistance_range (float): range in ohms we want to measure. Doesn't need to be exactly one of the options on the lakeshore, will select closest valid range, though note these are in increments of 2, 6.32, 20, 63.2, etc. Notes: If autorange is on when you change the resistance range, it may try to change it to another value. """ with self._lock.acquire_timeout(job='set_resistance_range') as acquired: if not acquired: self.log.warn(f"Could not start Task because " f"{self._lock.job} is already running") return False, "Could not acquire lock" session.set_status('running') current_resistance_range = self.module.channels[params['channel']].get_resistance_range() if params['resistance_range'] == current_resistance_range: session.add_message(f'Channel {params["channel"]} resistance_range already set to {params["resistance_range"]}') else: self.module.channels[params['channel']].set_resistance_range(params['resistance_range']) session.add_message(f'Set channel {params["channel"]} resistance range to {params["resistance_range"]}') return True, f'Set channel {params["channel"]} resistance range to {params["resistance_range"]}' @ocs_agent.param('channel', type=int, check=lambda x: 1<=x<=16) def get_resistance_range(self, session, params): """get_resistance_range(channel=None) **Task** - Get the resistance range for a specified channel. Parameters: channel (int): Channel to get the resistance range for. Valid values are 1-16. """ with self._lock.acquire_timeout(job='get_resistance_range') as acquired: if not acquired: self.log.warn(f"Could not start Task because " f"{self._lock.job} is already running") return False, "Could not acquire lock" session.set_status('running') current_resistance_range = self.module.channels[params['channel']].get_resistance_range() session.add_message(f'Channel {params["channel"]} resistance range is {current_resistance_range}') session.data = {"resistance_range": current_resistance_range} return True, f'Channel {params["channel"]} resistance range is {current_resistance_range}' @ocs_agent.param('channel', type=int, check=lambda x: 1<=x<=16) @ocs_agent.param('dwell', type=int, check=lambda x: 1<=x<=200) def set_dwell(self, session, params): """set_dwell(channel=None, dwell=None) **Task** - Set the autoscanning dwell time for a particular channel. Parameters: channel (int): Channel to set the dwell time for. Valid values are 1-16. dwell (int): Dwell time in seconds, type is int and must be in the range 1-200 inclusive. """ with self._lock.acquire_timeout(job='set_dwell') as acquired: if not acquired: self.log.warn(f"Could not start Task because " f"{self._lock.job} is already running") return False, "Could not acquire lock" session.set_status('running') current_dwell = self.module.channels[params["channel"]].set_dwell(params["dwell"]) session.add_message(f'Set dwell to {params["dwell"]}') return True, f'Set channel {params["channel"]} dwell time to {params["dwell"]}' @ocs_agent.param('channel', type=int, check=lambda x: 1<=x<=16) def get_dwell(self, session, params): """get_dwell(channel=None, dwell=None) **Task** - Get the autoscanning dwell time for a particular channel. Parameters: channel (int): Channel to get the dwell time for. Valid values are 1-16. """ with self._lock.acquire_timeout(job='set_dwell') as acquired: if not acquired: self.log.warn(f"Could not start Task because " f"{self._lock.job} is already running") return False, "Could not acquire lock" session.set_status('running') current_dwell = self.module.channels[params["channel"]].get_dwell() session.add_message(f'Dwell time for channel {params["channel"]} is {current_dwell}') session.data = {"dwell_time": current_dwell} return True, f'Channel {params["channel"]} dwell time is {current_dwell}' @ocs_agent.param('P', type=int) @ocs_agent.param('I', type=int) @ocs_agent.param('D', type=int) def set_pid(self, session, params): """set_pid(P=None, I=None, D=None) **Task** - Set the PID parameters for servo control of fridge. Parameters: P (int): Proportional term for PID loop I (int): Integral term for the PID loop D (int): Derivative term for the PID loop Notes: Makes a call to :func:`socs.Lakeshore.Lakeshore372.Heater.set_pid`. """ with self._lock.acquire_timeout(job='set_pid') as acquired: if not acquired: self.log.warn(f"Could not start Task because " f"{self._lock.job} is already running") return False, "Could not acquire lock" session.set_status('running') self.module.sample_heater.set_pid(params["P"], params["I"], params["D"]) session.add_message(f'post message text for Set PID to {params["P"]}, {params["I"]}, {params["D"]}') print(f'print text for Set PID to {params["P"]}, {params["I"]}, {params["D"]}') return True, f'return text for Set PID to {params["P"]}, {params["I"]}, {params["D"]}' @ocs_agent.param('channel', type=int) def set_active_channel(self, session, params): """set_active_channel(channel=None) **Task** - Set the active channel on the LS372. Parameters: channel (int): Channel to switch readout to. Valid values are 1-16. """ with self._lock.acquire_timeout(job='set_active_channel') as acquired: if not acquired: self.log.warn(f"Could not start Task because " f"{self._lock.job} is already running") return False, "Could not acquire lock" session.set_status('running') self.module.set_active_channel(params["channel"]) session.add_message(f'post message text for set channel to {params["channel"]}') print(f'print text for set channel to {params["channel"]}') return True, f'return text for set channel to {params["channel"]}' @ocs_agent.param('autoscan', type=bool) def set_autoscan(self, session, params): """set_autoscan(autoscan=None) **Task** - Sets autoscan on the LS372. Parameters: autoscan (bool): True to enable autoscan, False to disable. """ with self._lock.acquire_timeout(job='set_autoscan') as acquired: if not acquired: self.log.warn(f"Could not start Task because " f"{self._lock.job} is already running") return False, "Could not acquire lock" session.set_status('running') if params['autoscan']: self.module.enable_autoscan() self.log.info('enabled autoscan') else: self.module.disable_autoscan() self.log.info('disabled autoscan') return True, 'Set autoscan to {}'.format(params['autoscan']) @ocs_agent.param('temperature', type=float, check=lambda x: x < 1) def servo_to_temperature(self, session, params): """servo_to_temperature(temperature=None) **Task** - Servo to a given temperature using a closed loop PID on a fixed channel. This will automatically disable autoscan if enabled. Parameters: temperature (float): Temperature to servo to in units of Kelvin. """ with self._lock.acquire_timeout(job='servo_to_temperature') as acquired: if not acquired: self.log.warn(f"Could not start Task because " f"{self._lock.job} is already running") return False, "Could not acquire lock" session.set_status('running') # Check we're in correct control mode for servo. if self.module.sample_heater.mode != 'Closed Loop': session.add_message(f'Changing control to Closed Loop mode for servo.') self.module.sample_heater.set_mode("Closed Loop") # Check we aren't autoscanning. if self.module.get_autoscan() is True: session.add_message(f'Autoscan is enabled, disabling for PID control on dedicated channel.') self.module.disable_autoscan() # Check we're scanning same channel expected by heater for control. if self.module.get_active_channel().channel_num != int(self.module.sample_heater.input): session.add_message(f'Changing active channel to expected heater control input') self.module.set_active_channel(int(self.module.sample_heater.input)) # Check we're setup to take correct units. if self.module.get_active_channel().units != 'kelvin': session.add_message(f'Setting preferred units to Kelvin on heater control input.') self.module.get_active_channel().set_units('kelvin') # Make sure we aren't servoing too high in temperature. if params["temperature"] > 1: return False, 'Servo temperature is set above 1K. Aborting.' self.module.sample_heater.set_setpoint(params["temperature"]) return True, f'Setpoint now set to {params["temperature"]} K' @ocs_agent.param('measurements', type=int) @ocs_agent.param('threshold', type=float) def check_temperature_stability(self, session, params): """check_temperature_stability(measurements=None, threshold=None) Check servo temperature stability is within threshold. Parameters: measurements (int): number of measurements to average for stability check threshold (float): amount within which the average needs to be to the setpoint for stability """ with self._lock.acquire_timeout(job='check_temp_stability') as acquired: if not acquired: self.log.warn(f"Could not start Task because " f"{self._lock.job} is already running") return False, "Could not acquire lock" session.set_status('running') setpoint = float(self.module.sample_heater.get_setpoint()) if params is None: params = {'measurements': 10, 'threshold': 0.5e-3} test_temps = [] for i in range(params['measurements']): test_temps.append(self.module.get_temp()) time.sleep(.1) # sampling rate is 10 readings/sec, so wait 0.1 s for a new reading mean = np.mean(test_temps) session.add_message(f'Average of {params["measurements"]} measurements is {mean} K.') print(f'Average of {params["measurements"]} measurements is {mean} K.') if np.abs(mean - setpoint) < params['threshold']: print("passed threshold") session.add_message(f'Setpoint Difference: ' + str(mean - setpoint)) session.add_message(f'Average is within {params["threshold"]} K threshold. Proceeding with calibration.') return True, f"Servo temperature is stable within {params['threshold']} K" else: print("we're in the else") #adjust_heater(t,rest) return False, f"Temperature not stable within {params['threshold']}." @ocs_agent.param('heater', type=str, choices=['sample', 'still']) @ocs_agent.param('mode', type=str, choices=['Off', 'Monitor Out', 'Open Loop', 'Zone', 'Still', 'Closed Loop', 'Warm up']) def set_output_mode(self, session, params=None): """set_output_mode(heater=None, mode=None) **Task** - Set output mode of the heater. Parameters: heater (str): Name of heater to set range for, either 'sample' or 'still'. mode (str): Specifies mode of heater. Can be "Off", "Monitor Out", "Open Loop", "Zone", "Still", "Closed Loop", or "Warm up" """ with self._lock.acquire_timeout(job='set_output_mode') as acquired: if not acquired: self.log.warn(f"Could not start Task because " f"{self._lock.job} is already running") return False, "Could not acquire lock" session.set_status('running') if params['heater'].lower() == 'still': self.module.still_heater.set_mode(params['mode']) if params['heater'].lower() == 'sample': self.module.sample_heater.set_mode(params['mode']) self.log.info("Set {} output mode to {}".format(params['heater'], params['mode'])) return True, "Set {} output mode to {}".format(params['heater'], params['mode']) @ocs_agent.param('heater', type=str, choices=['sample', 'still']) @ocs_agent.param('output', type=float) @ocs_agent.param('display', type=str, choices=['current', 'power'], default=None) def set_heater_output(self, session, params=None): """set_heater_output(heater=None, output=None, display=None) **Task** - Set display type and output of the heater. Parameters: heater (str): Name of heater to set range for, either 'sample' or 'still'. "Open Loop", "Zone", "Still", "Closed Loop", or "Warm up" output (float): Specifies heater output value. For possible values see :func:`socs.Lakeshore.Lakeshore372.Heater.set_heater_output` display (str, optional): Specifies heater display type. Can be "current" or "power". If None, heater display is not reset before setting output. Notes: For the still heater this sets the still heater manual output, *not* the still heater still output. Use :func:`LS372_Agent.set_still_output()` instead to set the still output. """ with self._lock.acquire_timeout(job='set_heater_output') as acquired: if not acquired: self.log.warn(f"Could not start Task because " f"{self._lock.job} is already running") return False, "Could not acquire lock" heater = params['heater'].lower() output = params['output'] display = params.get('display', None) if heater == 'still': self.module.still_heater.set_heater_output(output, display_type=display) if heater.lower() == 'sample': self.log.info("display: {}\toutput: {}".format(display, output)) self.module.sample_heater.set_heater_output(output, display_type=display) self.log.info("Set {} heater display to {}, output to {}".format(heater, display, output)) session.set_status('running') data = {'timestamp': time.time(), 'block_name': '{}_heater_out'.format(heater), 'data': {'{}_heater_out'.format(heater): output} } session.app.publish_to_feed('temperatures', data) return True, "Set {} display to {}, output to {}".format(heater, display, output) @ocs_agent.param('output', type=float, check=lambda x: 0 <= x <= 100) def set_still_output(self, session, params=None): """set_still_output(output=None) **Task** - Set the still output on the still heater. This is different than the manual output on the still heater. Use :func:`LS372_Agent.set_heater_output()` for that. Parameters: output (float): Specifies still heater output value as a percentage. Can be any number between 0 and 100. """ with self._lock.acquire_timeout(job='set_still_output') as acquired: if not acquired: self.log.warn(f"Could not start Task because " f"{self._lock.job} is already running") return False, "Could not acquire lock" output = params['output'] self.module.still_heater.set_still_output(output) self.log.info("Set still output to {}".format(output)) session.set_status('running') data = {'timestamp': time.time(), 'block_name': 'still_heater_still_out', 'data': {'still_heater_still_out': output} } session.app.publish_to_feed('temperatures', data) return True, "Set still output to {}".format(output) @ocs_agent.param('_') def get_still_output(self, session, params=None): """get_still_output() **Task** - Gets the current still output on the still heater. Notes: The still heater output is stored in the session data object in the format:: >>> response.session['data'] {"still_heater_still_out": 9.628} """ with self._lock.acquire_timeout(job='get_still_output') as acquired: if not acquired: self.log.warn(f"Could not start Task because " f"{self._lock.job} is already running") return False, "Could not acquire lock" still_output = self.module.still_heater.get_still_output() self.log.info("Current still output is {}".format(still_output)) session.set_status('running') session.data = {"still_heater_still_out": still_output} return True, "Current still output is {}".format(still_output) @ocs_agent.param('configfile', type=str) def input_configfile(self, session, params=None): """input_configfile(configfile=None) **Task** - Upload 372 configuration file to initialize channel/device settings. Parameters: configfile (str): name of .yaml config file """ with self._lock.acquire_timeout(job='input_configfile') as acquired: if not acquired: self.log.warn(f"Could not start Task because " f"{self._lock.job} is already running") return False, "Could not acquire lock" # path to configfile in docker container configpath = os.environ.get("OCS_CONFIG_DIR", "/config/") configfile = params['configfile'] ls372configs = os.path.join(configpath, configfile) with open(ls372configs) as f: config = yaml.safe_load(f) ls = self.module ls_serial = ls.id.split(',')[2] device_config = config[ls_serial]['device_settings'] ls_chann_settings = config[ls_serial]['channel'] session.set_status('running') # enable/disable autoscan if device_config['autoscan'] == 'on': ls.enable_autoscan() self.log.info("autoscan enabled") elif device_config['autoscan'] == 'off': ls.disable_autoscan() self.log.info("autoscan disabled") for i in ls_chann_settings: # enable/disable channel if ls_chann_settings[i]['enable'] == 'on': ls.channels[i].enable_channel() self.log.info("CH.{channel} enabled".format(channel=i)) elif ls_chann_settings[i]['enable'] == 'off': ls.channels[i].disable_channel() self.log.info("CH.{channel} disabled".format(channel=i)) # autorange if ls_chann_settings[i]['autorange'] == 'on': ls.channels[i].enable_autorange() self.log.info("autorange on") elif ls_chann_settings[i]['autorange'] == 'off': ls.channels[i].disable_autorange() self.log.info("autorange off") excitation_mode = ls_chann_settings[i]['excitation_mode'] ls.channels[i].set_excitation_mode(excitation_mode) self.log.info("excitation mode for CH.{channel} set to {exc_mode}".format(channel=i, exc_mode=excitation_mode)) excitation_value = ls_chann_settings[i]['excitation_value'] ls.channels[i].set_excitation(excitation_value) self.log.info("excitation for CH.{channel} set to {exc}".format(channel=i, exc=excitation_value)) dwell = ls_chann_settings[i]['dwell'] ls.channels[i].set_dwell(dwell) self.log.info("dwell for CH.{channel} is set to {dwell}".format(channel=i, dwell=dwell)) pause = ls_chann_settings[i]['pause'] ls.channels[i].set_pause(pause) self.log.info("pause for CH.{channel} is set to {pause}".format(channel=i, pause=pause)) calibration_curvenum = ls_chann_settings[i]['calibration_curve_num'] ls.channels[i].set_calibration_curve(calibration_curvenum) self.log.info("calibration curve for CH.{channel} set to {cal_curve}".format(channel=i, cal_curve=calibration_curvenum)) tempco = ls_chann_settings[i]['temperature_coeff'] ls.channels[i].set_temperature_coefficient(tempco) self.log.info("temperature coeff. for CH.{channel} set to {tempco}".format(channel=i, tempco=tempco)) return True, "Uploaded {}".format(configfile)
class LabJackAgent: """Agent to collect data from LabJack device. Parameters: agent (OCSAgent): OCSAgent object for the Agent. ip_address (str): IP Address for the LabJack device. active_channels (str or list): Active channel description, i.e. 'T7-all', 'T4-all', or list of channels in form ['AIN0', 'AIN1']. function_file (str): Path to file for unit conversion. sampling_frequency (float): Sampling rate in Hz. """ def __init__(self, agent, ip_address, active_channels, function_file, sampling_frequency): self.active = True self.agent = agent self.log = agent.log self.lock = TimeoutLock() self.ip_address = ip_address self.module = None self.ljf = LabJackFunctions() self.sampling_frequency = sampling_frequency # Labjack channels to read if active_channels[0] == 'T7-all': self.chs = ['AIN{}'.format(i) for i in range(14)] elif active_channels[0] == 'T4-all': self.chs = ['AIN{}'.format(i) for i in range(12)] else: self.chs = active_channels # Load dictionary of unit conversion functions from yaml file. Assumes # the file is in the $OCS_CONFIG_DIR directory if function_file == 'None': self.functions = {} else: function_file_path = os.path.join(os.environ['OCS_CONFIG_DIR'], function_file) with open(function_file_path, 'r') as stream: self.functions = yaml.safe_load(stream) if self.functions is None: self.functions = {} self.log.info( f"Applying conversion functions: {self.functions}") self.initialized = False self.take_data = False # Register main feed. Exclude influx due to potentially high scan rate agg_params = {'frame_length': 60, 'exclude_influx': True} self.agent.register_feed('sensors', record=True, agg_params=agg_params, buffer_time=1) # Register downsampled feed for influx. agg_params_downsampled = {'frame_length': 60} self.agent.register_feed('sensors_downsampled', record=True, agg_params=agg_params_downsampled, buffer_time=1) self.agent.register_feed('registers', record=True, agg_params={'frame_length': 10 * 60}, buffer_time=1.) # Task functions def init_labjack(self, session, params=None): """init_labjack(auto_acquire=False) **Task** - Initialize LabJack module. Parameters: auto_acquire (bool): Automatically start acq process after initialization. Defaults to False. """ if self.initialized: return True, "Already initialized module" with self.lock.acquire_timeout(0, job='init') as acquired: if not acquired: self.log.warn("Could not start init because " "{} is already running".format(self.lock.job)) return False, "Could not acquire lock." session.set_status('starting') # Connect with the labjack self.handle = ljm.openS("ANY", "ANY", self.ip_address) info = ljm.getHandleInfo(self.handle) self.log.info( "\nOpened LabJack of type: %i, Connection type: %i,\n" "Serial number: %i, IP address: %s, Port: %i" % (info[0], info[1], info[2], ljm.numberToIP(info[3]), info[4])) session.add_message("Labjack initialized") self.initialized = True # Start data acquisition if requested in site-config auto_acquire = params.get('auto_acquire', False) auto_acquire_reg = params.get('auto_acquire_reg', False) if auto_acquire: self.agent.start('acq') if auto_acquire_reg: self.agent.start('acq_reg') return True, 'LabJack module initialized.' def acq(self, session, params=None): """acq(sampling_freq=2.5) **Process** - Acquire data from the Labjack. Parameters: sampling_frequency (float): Sampling frequency for data collection. Defaults to 2.5 Hz. """ if params is None: params = {} # Setup streaming parameters. Data is collected and published in # blocks at 1 Hz or the scan rate, whichever is less. scan_rate_input = params.get('sampling_frequency', self.sampling_frequency) scans_per_read = max(1, int(scan_rate_input)) num_chs = len(self.chs) ch_addrs = ljm.namesToAddresses(num_chs, self.chs)[0] with self.lock.acquire_timeout(0, job='acq') as acquired: if not acquired: self.log.warn("Could not start acq because " "{} is already running".format(self.lock.job)) return False, "Could not acquire lock." session.set_status('running') self.take_data = True # Start the data stream. Use the scan rate returned by the stream, # which should be the same as the input scan rate. try: scan_rate = ljm.eStreamStart(self.handle, scans_per_read, num_chs, ch_addrs, scan_rate_input) except LJMError as e: # in case the stream is running self.log.error(e) self.log.error("Stopping previous stream and starting new one") ljm.eStreamStop(self.handle) scan_rate = ljm.eStreamStart(self.handle, scans_per_read, num_chs, ch_addrs, scan_rate_input) self.log.info( f"\nStream started with a scan rate of {scan_rate} Hz.") cur_time = time.time() while self.take_data: data = {'block_name': 'sens', 'data': {}} # Query the labjack raw_output = ljm.eStreamRead(self.handle) output = raw_output[0] # Data comes in form ['AIN0_1', 'AIN1_1', 'AIN0_2', ...] for i, ch in enumerate(self.chs): ch_output = output[i::num_chs] data['data'][ch + 'V'] = ch_output # Apply unit conversion function for this channel if ch in self.functions.keys(): new_ch_output, units = \ self.ljf.unit_conversion(np.array(ch_output), self.functions[ch]) data['data'][ch + units] = list(new_ch_output) # The labjack outputs at exactly the scan rate but doesn't # generate timestamps. So create them here. timestamps = [ cur_time + i / scan_rate for i in range(scans_per_read) ] cur_time += scans_per_read / scan_rate data['timestamps'] = timestamps self.agent.publish_to_feed('sensors', data) # Publish to the downsampled data feed only the first # timestamp and data point for each channel. data_downsampled = { 'block_name': 'sens', 'data': {}, 'timestamp': timestamps[0] } for key, value in data['data'].items(): data_downsampled['data'][key] = value[0] self.agent.publish_to_feed('sensors_downsampled', data_downsampled) session.data = data_downsampled # Flush buffer and stop the data stream self.agent.feeds['sensors'].flush_buffer() self.agent.feeds['sensors_downsampled'].flush_buffer() ljm.eStreamStop(self.handle) self.log.info("Data stream stopped") return True, 'Acquisition exited cleanly.' def _stop_acq(self, session, params=None): if self.take_data: self.take_data = False return True, 'requested to stop taking data.' else: return False, 'acq is not currently running' def acq_reg(self, session, params=None): """acq_reg(sampling_frequency=2.5) **Task** - Start data acquisition when you want to read out non-standard registers. In particular the custom registers labjack was built for reading out thermocouples. Maximum is about 2.5 Hz but is set by the register read time which is estimated at the beginning of the acq_reg setup step. Args: sampling_frequency (float): Sampling frequency for data collection. Defaults to 2.5 Hz Maximum set by the register read time. Will reset to lower rate if faster than possible read time. """ if params is None: params = {} #Determine the read time latency to set the max allowable #sampling rate by reading the register 100 times in a row #and recording the time it takes to read each time. Then #setting the max sample rate to be 50mS greater than the median #of the time it took to read. num_chs = len(self.chs) times = [] for i in range(100): times.append(time.time()) ljm.eReadNames(self.handle, num_chs, self.chs)[0] read_dt = np.round(np.median(np.diff(times)), 2) + 0.05 max_fsamp = min(2.5, 1 / read_dt) # Setup streaming parameters. Data is collected and published in # blocks at 1 Hz or the scan rate, whichever is less. scan_rate_input = params.get('sampling_frequency', self.sampling_frequency) #Warn user that they input too fast of a sample rate and set #to maximum allowable. if scan_rate_input > max_fsamp: self.log.warn(f'Sampling rate {scan_rate_input} exceeds' 'allowable range for register read mode' 'if you want to sample faster please use' 'standard acquire mode and do not read out ' 'any non-standard channels. Setting sampling' f'rate to maximum = {max_fsamp} Hz') scan_rate_input = max_fsamp scan_rate_dt = 0 else: scan_rate_dt = (1 / scan_rate_input) - read_dt with self.lock.acquire_timeout(0, job='acq_reg') as acquired: if not acquired: self.log.warn("Could not start acq because " "{} is already running".format(self.lock.job)) return False, "Could not acquire lock." session.set_status('running') self.take_data = True while self.take_data: data = {'block_name': 'reg', 'data': {}} # Get a timestamp timestamp = time.time() # Query the labjack output = ljm.eReadNames(self.handle, num_chs, self.chs) # Data comes in form ['reg1', 'reg2', 'reg3', ...] # where regn are the registers in your active_channels # list in your site config file. for i, ch in enumerate(self.chs): ch_output = output[i] data['data'][ch] = ch_output # Apply unit conversion function for this channel if ch in self.functions.keys(): new_ch_output, units = \ self.ljf.unit_conversion(ch_output, self.functions[ch]) data['data'][ch + units] = new_ch_output data['timestamp'] = timestamp self.agent.publish_to_feed('registers', data) time.sleep(scan_rate_dt) # Flush buffer and stop the data stream self.agent.feeds['registers'].flush_buffer() ljm.close(self.handle) self.log.info("Data stream stopped") return True, 'Acquisition exited cleanly.'
class PTCAgent: """Agent to connect to a single cryomech compressor. Parameters: port (int): TCP port to connect to. ip_address (str): IP Address for the compressor. f_sample (float, optional): Data acquisiton rate, defaults to 2.5 Hz. fake_errors (bool, optional): Generates fake errors in the string output 50% of the time. """ def __init__(self, agent, port, ip_address, f_sample=2.5, fake_errors=False): self.agent = agent self.log = agent.log self.lock = TimeoutLock() self.ip_address = ip_address self.fake_errors = fake_errors self.port = port self.module: Optional[Module] = None self.f_sample = f_sample self.initialized = False self.take_data = False # Registers data feeds agg_params = { 'frame_length': 60, } self.agent.register_feed('ptc_status', record=True, agg_params=agg_params, buffer_time=1) def init(self, session, params=None): """init(auto_acquire=False) **Task** - Initializes the connection to the PTC. Parameters: auto_acquire (bool): Automatically start acq process after initialization """ if params is None: params = {} auto_acquire = params.get('auto_acquire', False) if self.initialized: return True, "Already Initialized" with self.lock.acquire_timeout(0, job='init') as acquired: if not acquired: self.log.warn("Could not start init because " "{} is already running".format(self.lock.job)) return False, "Could not acquire lock." session.set_status('starting') # Establish connection to ptc self.ptc = PTC(self.ip_address, port=self.port, fake_errors=self.fake_errors) # Test connection and display identifying info self.ptc.get_data() print("PTC Model:", self.ptc.model) print("PTC Serial Number:", self.ptc.serial) print("Software Revision is:", self.ptc.software_revision) self.initialized = True # Start data acquisition if requested if auto_acquire: self.agent.start('acq') return True, "PTC agent initialized" def acq(self, session, params=None): """acq() **Process** - Starts acqusition of data from the PTC. """ with self.lock.acquire_timeout(0, job='acq') as acquired: if not acquired: self.log.warn("Could not start acq because {} is already" "running".format(self.lock.job)) return False, "Could not acquire lock." session.set_status('running') self.take_data = True # Publish data, waiting 1/f_sample seconds in between calls. while self.take_data: pub_data = { 'timestamp': time.time(), 'block_name': 'ptc_status' } data_flag, data = self.ptc.get_data() pub_data['data'] = data # If there is an error in compressor output (data_flag = True), # do not publish if not data_flag: self.agent.publish_to_feed('ptc_status', pub_data) time.sleep(1. / self.f_sample) self.agent.feeds["ptc_status"].flush_buffer() return True, 'Acquisition exited cleanly.' def _stop_acq(self, session, params=None): """Stops acqusition of data from the PTC.""" if self.take_data: self.take_data = False return True, 'requested to stop taking data.' else: return False, 'acq is not currently running'
class LabJackAgent: def __init__(self, agent, ip_address, active_channels, function_file, sampling_frequency): self.active = True self.agent = agent self.log = agent.log self.lock = TimeoutLock() self.ip_address = ip_address self.module = None self.ljf = LabJackFunctions() self.sampling_frequency = sampling_frequency # Labjack channels to read if active_channels == 'T7-all': self.chs = ['AIN{}'.format(i) for i in range(14)] elif active_channels == 'T4-all': self.chs = ['AIN{}'.format(i) for i in range(12)] else: self.chs = active_channels # Load dictionary of unit conversion functions from yaml file. Assumes # the file is in the $OCS_CONFIG_DIR directory if function_file == 'None': self.functions = {} else: function_file_path = os.path.join(os.environ['OCS_CONFIG_DIR'], function_file) with open(function_file_path, 'r') as stream: self.functions = yaml.safe_load(stream) if self.functions is None: self.functions = {} self.log.info( f"Applying conversion functions: {self.functions}") self.initialized = False self.take_data = False # Register main feed. Exclude influx due to potentially high scan rate agg_params = {'frame_length': 60, 'exclude_influx': True} self.agent.register_feed('sensors', record=True, agg_params=agg_params, buffer_time=1) # Register downsampled feed for influx. agg_params_downsampled = {'frame_length': 60} self.agent.register_feed('sensors_downsampled', record=True, agg_params=agg_params_downsampled, buffer_time=1) # Task functions def init_labjack_task(self, session, params=None): """ task to initialize labjack module """ if self.initialized: return True, "Already initialized module" with self.lock.acquire_timeout(0, job='init') as acquired: if not acquired: self.log.warn("Could not start init because " "{} is already running".format(self.lock.job)) return False, "Could not acquire lock." session.set_status('starting') # Connect with the labjack self.handle = ljm.openS("ANY", "ANY", self.ip_address) info = ljm.getHandleInfo(self.handle) self.log.info( "\nOpened LabJack of type: %i, Connection type: %i,\n" "Serial number: %i, IP address: %s, Port: %i" % (info[0], info[1], info[2], ljm.numberToIP(info[3]), info[4])) session.add_message("Labjack initialized") self.initialized = True # Start data acquisition if requested in site-config auto_acquire = params.get('auto_acquire', False) if auto_acquire: self.agent.start('acq') return True, 'LabJack module initialized.' def start_acq(self, session, params=None): """ Task to start data acquisition. Args: sampling_frequency (float): Sampling frequency for data collection. Defaults to 2.5 Hz """ if params is None: params = {} # Setup streaming parameters. Data is collected and published in # blocks at 1 Hz or the scan rate, whichever is less. scan_rate_input = params.get('sampling_frequency', self.sampling_frequency) scans_per_read = max(1, int(scan_rate_input)) num_chs = len(self.chs) ch_addrs = ljm.namesToAddresses(num_chs, self.chs)[0] with self.lock.acquire_timeout(0, job='acq') as acquired: if not acquired: self.log.warn("Could not start acq because " "{} is already running".format(self.lock.job)) return False, "Could not acquire lock." session.set_status('running') self.take_data = True # Start the data stream. Use the scan rate returned by the stream, # which should be the same as the input scan rate. try: scan_rate = ljm.eStreamStart(self.handle, scans_per_read, num_chs, ch_addrs, scan_rate_input) except LJMError as e: # in case the stream is running self.log.error(e) self.log.error("Stopping previous stream and starting new one") ljm.eStreamStop(self.handle) scan_rate = ljm.eStreamStart(self.handle, scans_per_read, num_chs, ch_addrs, scan_rate_input) self.log.info( f"\nStream started with a scan rate of {scan_rate} Hz.") cur_time = time.time() while self.take_data: data = {'block_name': 'sens', 'data': {}} # Query the labjack raw_output = ljm.eStreamRead(self.handle) output = raw_output[0] # Data comes in form ['AIN0_1', 'AIN1_1', 'AIN0_2', ...] for i, ch in enumerate(self.chs): ch_output = output[i::num_chs] data['data'][ch + 'V'] = ch_output # Apply unit conversion function for this channel if ch in self.functions.keys(): new_ch_output, units = \ self.ljf.unit_conversion(np.array(ch_output), self.functions[ch]) data['data'][ch + units] = list(new_ch_output) # The labjack outputs at exactly the scan rate but doesn't # generate timestamps. So create them here. timestamps = [ cur_time + i / scan_rate for i in range(scans_per_read) ] cur_time += scans_per_read / scan_rate data['timestamps'] = timestamps self.agent.publish_to_feed('sensors', data) # Publish to the downsampled data feed only the first # timestamp and data point for each channel. data_downsampled = { 'block_name': 'sens', 'data': {}, 'timestamp': timestamps[0] } for key, value in data['data'].items(): data_downsampled['data'][key] = value[0] self.agent.publish_to_feed('sensors_downsampled', data_downsampled) session.data = data_downsampled # Flush buffer and stop the data stream self.agent.feeds['sensors'].flush_buffer() self.agent.feeds['sensors_downsampled'].flush_buffer() ljm.eStreamStop(self.handle) self.log.info("Data stream stopped") return True, 'Acquisition exited cleanly.' def stop_acq(self, session, params=None): if self.take_data: self.take_data = False return True, 'requested to stop taking data.' else: return False, 'acq is not currently running'
class LS240_Agent: def __init__(self, agent, port="/dev/ttyUSB0", f_sample=2.5): self.agent: ocs_agent.OCSAgent = agent self.log = agent.log self.lock = TimeoutLock() self.port = port self.module: Optional[Module] = None self.f_sample = f_sample self.initialized = False self.take_data = False # Registers Temperature and Voltage feeds agg_params = { 'frame_length': 60, } self.agent.register_feed('temperatures', record=True, agg_params=agg_params, buffer_time=1) # Task functions. def init_lakeshore(self, session, params=None): """init_lakeshore(auto_acquire=False) **Task** - Perform first time setup of the Lakeshore 240 Module. Parameters: auto_acquire (bool, optional): Starts data acquisition after initialization if True. Defaults to False. """ if params is None: params = {} auto_acquire = params.get('auto_acquire', False) if self.initialized: return True, "Already Initialized Module" with self.lock.acquire_timeout(0, job='init') as acquired: if not acquired: self.log.warn("Could not start init because " "{} is already running".format(self.lock.job)) return False, "Could not acquire lock." session.set_status('starting') self.module = Module(port=self.port) print("Initialized Lakeshore module: {!s}".format(self.module)) session.add_message("Lakeshore initialized with ID: %s" % self.module.inst_sn) self.initialized = True # Start data acquisition if requested if auto_acquire: self.agent.start('acq') return True, 'Lakeshore module initialized.' def set_values(self, session, params=None): """set_values(channel, sensor=None, auto_range=None, range=None,\ current_reversal=None, units=None, enabled=None, name=None) **Task** - Set sensor parameters for a Lakeshore240 Channel. Args: channel (int): Channel number to set. Valid choices are 1-8. sensor (int, optional): Specifies sensor type. See :func:`socs.Lakeshore.Lakeshore240.Channel.set_values` for possible types. auto_range (int, optional): Specifies if channel should use autorange. Must be 0 or 1. range (int, optional): Specifies range if auto_range is false. Only settable for NTC RTD. See :func:`socs.Lakeshore.Lakeshore240.Channel.set_values` for possible ranges. current_reversal (int, optional): Specifies if input current reversal is on or off. Always 0 if input is a diode. units (int, optional): Specifies preferred units parameter, and sets the units for alarm settings. See :func:`socs.Lakeshore.Lakeshore240.Channel.set_values` for possible units. enabled (int, optional): Sets if channel is enabled. name (str, optional): Sets name of channel. """ if params is None: params = {} with self.lock.acquire_timeout(0, job='set_values') as acquired: if not acquired: self.log.warn("Could not start set_values because " "{} is already running".format(self.lock.job)) return False, "Could not acquire lock." self.module.channels[params['channel'] - 1].set_values( sensor=params.get('sensor'), auto_range=params.get('auto_range'), range=params.get('range'), current_reversal=params.get('current_reversal'), unit=params.get('unit'), enabled=params.get('enabled'), name=params.get('name'), ) return True, 'Set values for channel {}'.format(params['channel']) def upload_cal_curve(self, session, params=None): """upload_cal_curve(channel, filename) **Task** - Upload a calibration curve to a channel. Parameters: channel (int): Channel number, 1-8. filename (str): Filename for calibration curve. """ channel = params['channel'] filename = params['filename'] with self.lock.acquire_timeout(0, job='upload_cal_curve') as acquired: if not acquired: self.log.warn("Could not start set_values because " "{} is already running".format(self.lock.job)) return False, "Could not acquire lock." channel = self.module.channels[channel - 1] self.log.info("Starting upload to channel {}...".format(channel)) channel.load_curve(filename) self.log.info("Finished uploading.") return True, "Uploaded curve to channel {}".format(channel) def acq(self, session, params=None): """acq(sampling_frequency=2.5) **Process** - Start data acquisition. Parameters: sampling_frequency (float): Sampling frequency for data collection. Defaults to 2.5 Hz The most recent data collected is stored in session data in the structure:: >>> response.session['data'] {"fields": {"Channel_1": {"T": 99.26, "V": 99.42}, "Channel_2": {"T": 99.54, "V": 101.06}, "Channel_3": {"T": 100.11, "V":100.79}, "Channel_4": {"T": 98.49, "V": 100.77}, "Channel_5": {"T": 97.75, "V": 101.45}, "Channel_6": {"T": 99.58, "V": 101.75}, "Channel_7": {"T": 98.03, "V": 100.82}, "Channel_8": {"T": 101.14, "V":101.01}}, "timestamp":1601925677.6914878} """ if params is None: params = {} f_sample = params.get('sampling_frequency') # If f_sample is None, use value passed to Agent init if f_sample is None: f_sample = self.f_sample sleep_time = 1 / f_sample - 0.01 with self.lock.acquire_timeout(0, job='acq') as acquired: if not acquired: self.log.warn( "Could not start acq because {} is already running".format( self.lock.job)) return False, "Could not acquire lock." session.set_status('running') self.take_data = True session.data = {"fields": {}} while self.take_data: current_time = time.time() data = { 'timestamp': current_time, 'block_name': 'temps', 'data': {} } for chan in self.module.channels: # Read sensor on channel chan_string = "Channel_{}".format(chan.channel_num) temp_reading = chan.get_reading(unit='K') sensor_reading = chan.get_reading(unit='S') # For data feed data['data'][chan_string + '_T'] = temp_reading data['data'][chan_string + '_V'] = sensor_reading # For session.data field_dict = { chan_string: { "T": temp_reading, "V": sensor_reading } } session.data['fields'].update(field_dict) self.agent.publish_to_feed('temperatures', data) session.data.update({'timestamp': current_time}) time.sleep(sleep_time) self.agent.feeds['temperatures'].flush_buffer() return True, 'Acquisition exited cleanly.' def _stop_acq(self, session, params=None): """ Stops acq process. """ if self.take_data: self.take_data = False return True, 'requested to stop taking data.' else: return False, 'acq is not currently running'
class BLE2Agent: '''OCS agent class for BLE2 motor driver ''' def __init__(self, agent, port=PORT_DEFAULT): ''' Parameters ---------- port : string Port to connect ''' self.active = True self.agent = agent self.log = agent.log self.lock = TimeoutLock() self.take_data = False self._ble2 = BLE2(port=port) self.initialized = False agg_params = {'frame_length': 60} self.agent.register_feed('motor', record=True, agg_params=agg_params, buffer_time=1) def init_ble2(self, session, params=None): '''Initialization of BLE2 motor driver''' if self.initialized: return True, 'Already initialized' with self.lock.acquire_timeout(0, job='init_ble2') as acquired: if not acquired: self.log.warn('Could not start init because ' '{} is already running'.format(self.lock.job)) return False, 'Could not acquire lock.' session.set_status('starting') self._ble2.connect() session.add_message('BLE2 initialized.') self.initialized = True return True, 'BLE2 module initialized.' def start_acq(self, session, params): '''Starts acquiring data. ''' if params is None: params = {} f_sample = params.get('sampling_frequency', 2.5) sleep_time = 1 / f_sample - 0.1 if not self.initialized: self.agent.start('init_ble2') for _ in range(INIT_TIMEOUT): if self.initialized: break time.sleep(0.1) if not self.initialized: return False, 'Could not initialize..' with self.lock.acquire_timeout(timeout=0, job='acq') as acquired: if not acquired: self.log.warn( f'Could not start acq because {self.lock.job} is already running' ) return False, 'Could not acquire lock.' session.set_status('running') self.take_data = True session.data = {"fields": {}} last_release = time.time() while self.take_data: # Release lock if time.time() - last_release > LOCK_RELEASE_SEC: last_release = time.time() if not self.lock.release_and_acquire( timeout=LOCK_RELEASE_TIMEOUT): print(f'Re-acquire failed: {self.lock.job}') return False, 'Could not re-acquire lock.' # Data acquisition current_time = time.time() data = { 'timestamp': current_time, 'block_name': 'motor', 'data': {} } speed = self._ble2.get_status() data['data']['RPM'] = speed field_dict = {f'motor': {'RPM': speed}} session.data['fields'].update(field_dict) self.agent.publish_to_feed('motor', data) session.data.update({'timestamp': current_time}) time.sleep(sleep_time) self.agent.feeds['motor'].flush_buffer() return True, 'Acquisition exited cleanly.' def stop_acq(self, session, params=None): """ Stops the data acquisiton. """ if self.take_data: self.take_data = False return True, 'requested to stop taking data.' return False, 'acq is not currently running.' def set_values(self, session, params=None): '''A task to set parameters for BLE2 motor driver Parameters ---------- speed : int Motor rotation speed in RPM accl_time : float Acceleration time decl_time : float Deceleration time ''' if params is None: params = {} with self.lock.acquire_timeout(3, job='set_values') as acquired: if not acquired: self.log.warn('Could not start set_values because ' f'{self.lock.job} is already running') return False, 'Could not acquire lock.' speed = params.get('speed') if not speed is None: self._ble2.set_speed(speed) accl_time = params.get('accl_time') if not accl_time is None: self._ble2.set_accl_time(accl_time, accl=True) decl_time = params.get('decl_time') if not decl_time is None: self._ble2.set_accl_time(decl_time, accl=False) return True, f'Set values for BLE2' def start_rotation(self, session, params=None): '''Start rotation Parameters ---------- forward : bool, default True Move forward if True ''' if params is None: params = {} if not self.take_data: self.agent.start('acq') for _ in range(ACQ_TIMEOUT): if self.take_data: break time.sleep(0.1) if not self.take_data: return False, 'Could not start acq.' with self.lock.acquire_timeout(3, job='set_values') as acquired: if not acquired: self.log.warn('Could not start set_values because ' f'{self.lock.job} is already running') return False, 'Could not acquire lock.' if not self.take_data: return False, 'acq is not currently running.' forward = params.get('forward') if forward is None: forward = True self._ble2.start(forward=forward) return True, f'BLE2 rotation started.' def stop_rotation(self, session, params=None): '''Stop rotation''' if params is None: params = {} with self.lock.acquire_timeout(3, job='set_values') as acquired: if not acquired: self.log.warn('Could not start set_values because ' f'{self.lock.job} is already running') return False, 'Could not acquire lock.' self._ble2.stop() return True, f'BLE2 rotation stop command was published.'
class LS336_Agent: """Agent to connect to a single Lakeshore 336 device. Supports channels 'A','B','C', and 'D' for Lakeshore 336s that do not have the extra Lakeshore 3062 scanner installed. Also has channels 'D2','D3','D4', and 'D5' for 336s that have the extra scanner. Currently only supports heaters '1' and '2'. Parameters ---------- sn: str Serial number of the LS336 ip: str IP Address for the 336 device f_sample: float, optional (default 0.1) The frequency of sampling for acquiring data (in Hz) threshold: float, optional (default 0.1) The max difference (in K) between the setpoint and current temperature that will be considered stable window: int, optional (default 900) The amount of time (in s) over which the difference between the setpoint and the current temperature must not exceed threshold while checking for temperature stability. Attributes ---------- sn: str Serial number of the LS336 ip: str IP Address for the 336 device module: LS336 object Driver object module.channels: dict The available channels in the LS336 object module.heaters: dict The available heaters in the LS336 object f_sample: float The frequency of sampling for acquiring data (in Hz) t_sample: float The time between each sample (inverse of f_sample - 0.01) threshold: float The max difference (in K) between the setpoint and current temperature that will be considered stable window: int The amount of time (in s) over which the difference between the setpoint and the current temperature must not exceed threshold while checking for temperature stability. _recent_temps: numpy array, protected Array of recent temperatures for checking temperature stability _static_setpoint: float, protected The final setpoint value to avoid issues when the setpoint is ramping to a new value. Used in checking temperature stability """ def __init__(self, agent, sn, ip, f_sample=0.1, threshold=0.1, window=900): self.agent = agent self.sn = sn self.ip = ip self.f_sample = f_sample self.t_sample = 1/self.f_sample - 0.01 assert self.t_sample < 7200, \ "acq sampling freq must be such that t_sample is less than 2 hours" self._lock = TimeoutLock() self.log = agent.log self.initialized = False self.take_data = False self.module = None # for stability checking self.threshold = threshold self.window = window self._recent_temps = None self._static_setpoint = None agg_params = {'frame_length': 10*60} # sec # combined feed for thermometry and control data self.agent.register_feed( 'temperatures', record=True, agg_params=agg_params, buffer_time=1 ) @ocs_agent.param('auto_acquire', default=False, type=bool) def init_lakeshore(self, session, params=None): """init_lakeshore(auto_acquire=False) **Task** - Perform first time setup of the Lakeshore 336 communication Parameters: auto_acquire (bool, optional): Default is False. Starts data acquisition after initialization if True. """ if params is None: params = {} # test if this agent is already running if self.initialized: self.log.info('Lakeshore already initialized, returning...') return True, 'Already initialized' # initialize lakeshore with self._lock.acquire_timeout(job='init', timeout=0) as acquired: if not acquired: print( f"Lock could not be acquired because it is held by " f"{self._lock.job}") return False, 'Could not acquire lock' session.set_status('running') # get lakeshore self.module = LS336(self.ip) session.add_message( f'Lakeshore initialized with ID: {self.module.id}') self.initialized = True # start data acq if passed if params.get('auto_acquire', False): self.agent.start('acq') return True, 'Lakeshore module initialized' @ocs_agent.param('f_sample', default=0.1, type=float) def acq(self, session, params=None): """acq(f_sample=0.1) **Process** - Begins recording data from thermometers and heaters. Parameters: f_sample (float, optional): Default is 0.1. Sets the sampling rate in Hz. Notes: The most recent data collected is stored in session.data in the structure: >>> response.session['data'] {"ls336_fields": {"timestamp": 1921920543, "block_name": "temperatures" "data": {"Channel_A_T": (some value) "Channel_A_V": (some value) "Channel_B_T": (some value) "Channel_B_V": (some value) "Channel_C_T": (some value) "Channel_C_V": (some value) "Channel_D_T": (some value) "Channel_D_V": (some value) } } } """ if params is None: params = {} # get sampling frequency f_sample = params.get('f_sample') if f_sample is None: t_sample = self.t_sample else: t_sample = 1/f_sample - 0.01 self.t_sample = t_sample # acquire lock and start Process with self._lock.acquire_timeout(job='acq', timeout=3) as acquired: if not acquired: print( f"Lock could not be acquired because it is held by " f"{self._lock.job}") return False, 'Could not acquire lock' session.set_status('running') # initialize recent temps array # shape is N_points x N_channels # N_points is 2 hour / t_sample rounded up # N_channels is 8 if the extra scanner is installed, 4 otherwise # t_sample can't be more than 2 hours N_channels = len(self.module.channels) self._recent_temps = np.full( (int(np.ceil(7200 / self.t_sample)), N_channels), -1.0) print(self._recent_temps.size) # acquire data from Lakeshore self.take_data = True while self.take_data: # get thermometry data current_time = time.time() temperatures_message = { 'timestamp': current_time, 'block_name': 'temperatures', 'data': {} } temps = self.module.get_kelvin('0') # array of 4 (or 8) floats voltages = self.module.get_sensor('0') # array of 4/8 floats for i, channel in enumerate(self.module.channels.values()): channel_str = channel.input_name.replace(' ', '_') temperatures_message['data'][channel_str + '_T'] = temps[i] temperatures_message['data'][channel_str + '_V'] = voltages[i] # append to recent temps array for temp stability check self._recent_temps = np.roll(self._recent_temps, 1, axis=0) self._recent_temps[0] = temps # publish to feed self.agent.publish_to_feed( 'temperatures', temperatures_message) # For session.data - named to avoid conflicting with LS372 # if in use at same time. session.data['ls336_fields'] = temperatures_message # get heater data heaters_message = { 'timestamp': current_time, 'block_name': 'heaters', 'data': {} } for i, heater in enumerate(self.module.heaters.values()): heater_str = heater.output_name.replace(' ', '_') heaters_message['data'][ heater_str + '_Percent'] = heater.get_heater_percent() heaters_message['data'][ heater_str + '_Range'] = heater.get_heater_range() heaters_message['data'][ heater_str + '_Max_Current'] = heater.get_max_current() heaters_message['data'][ heater_str + '_Setpoint'] = heater.get_setpoint() # publish to feed self.agent.publish_to_feed('temperatures', heaters_message) # finish sample self.log.debug( f'Sleeping for {np.round(self.t_sample)} seconds...') # release and reacquire lock between data acquisition self._lock.release() time.sleep(t_sample) if not self._lock.acquire(timeout=10, job='acq'): print( f"Lock could not be acquired because it is held by " f"{self._lock.job}") return False, 'Could not re-acquire lock' return True, 'Acquisition exited cleanly' @ocs_agent.param('_') def stop_acq(self, session, params=None): """stop_acq() **Task** - Stops acq process. """ if params is None: params = {} if self.take_data: self.take_data = False return True, 'Requested to stop taking data' else: return False, 'acq is not currently running' @ocs_agent.param('range', type=str, choices=['off', 'low', 'medium', 'high']) @ocs_agent.param('heater', default='2', type=str, choices=['1', '2']) def set_heater_range(self, session, params): """set_heater_range(range=None,heater='2') **Task** - Adjusts the heater range for servoing the load. Parameters: range (str): Sets the range of the chosen heater. Must be one of 'off', 'low', 'medium', and 'high'. heater (str, optional): default '2'. Chooses which heater's range to change. Must be '1' or '2'. Notes: The range setting has no effect if an output is in the Off mode, and it does not apply to an output in Monitor Out mode. An output in Monitor Out mode is always on. """ with self._lock.acquire_timeout(job='set_heater_range', timeout=3) as acquired: if not acquired: print( f"Lock could not be acquired because it is held by " f"{self._lock.job}") return False, 'Could not acquire lock' session.set_status('running') # get heater heater_key = params.get('heater', '2') # default to 50W output heater = self.module.heaters[heater_key] # set range current_range = heater.get_heater_range() if params['range'] == current_range: print( 'Current heater range matches commanded value. ' 'Proceeding unchanged') else: heater.set_heater_range(params['range']) session.add_message( f"Set {heater.output_name} range to {params['range']}") return True, f"Set {heater.output_name} range to {params['range']}" @ocs_agent.param('P', type=float, check=lambda x: 0.1 <= x <= 1000) @ocs_agent.param('I', type=float, check=lambda x: 0.1 <= x <= 1000) @ocs_agent.param('D', type=float, check=lambda x: 0 <= x <= 200) @ocs_agent.param('heater', default='2', type=str, choices=['1', '2']) def set_pid(self, session, params): """set_pid(P=None,I=None,D=None,heater='2') **Task** - Set the PID parameters for servoing the load. Parameters: P (float): Proportional term for PID loop (must be between 0.1 and 1000) I (float): Integral term for PID loop (must be between 0.1 and 1000) D (float): Derivative term for PID loop (must be between 0 and 200) heater (str, optional): Selects the heater on which to change the PID settings. Must be '1' or '2'. """ with self._lock.acquire_timeout(job='set_pid', timeout=3) as acquired: if not acquired: print( f"Lock could not be acquired because it is held by " f"{self._lock.job}") return False, 'Could not acquire lock' session.set_status('running') # get heater heater_key = params.get('heater', '2') # default to 50W output heater = self.module.heaters[heater_key] # set pid current_p, current_i, current_d = heater.get_pid() if (params['P'] == current_p and params['I'] == current_i and params['D'] == current_d): print('Current heater PID matches commanded value. ' 'Proceeding unchanged') else: heater.set_pid(params['P'], params['I'], params['D']) session.add_message( f"Set {heater.output_name} PID to {params['P']}, " f"{params['I']}, {params['D']}") return True, (f"Set {heater.output_name} PID to {params['P']}, " f" {params['I']}, {params['D']}") @ocs_agent.param('mode', type=str, choices=['off', 'closed loop', 'zone', 'open loop']) @ocs_agent.param('heater', default='2', type=str, choices=['1', '2']) def set_mode(self, session, params): """set_mode(mode=None,heater='2') **Task** - Sets the output mode of the heater. Parameters: mode (str): Selects the output mode for the heater. Accepts four options: 'off', 'closed loop', 'zone', and 'open loop'. for restrictions based on the selected heater. heater (str, optional): Default '2'. Selects the heater on which to change the mode. Must be '1' or '2'. Notes: Does not support the options 'monitor out' and 'warm up', which only work for the unsupported analog outputs (heaters 3 and 4). """ with self._lock.acquire_timeout(job='set_mode', timeout=3) as acquired: if not acquired: print( f"Lock could not be acquired because it is held by " f"{self._lock.job}") return False, 'Could not acquire lock' session.set_status('running') # get heater heater_key = params.get('heater', '2') # default to 50W output heater = self.module.heaters[heater_key] # set mode current_mode = heater.get_mode() if params['mode'] == current_mode: print( 'Current heater mode matches commanded value. ' 'Proceeding unchanged') else: heater.set_mode(params['mode']) session.add_message( f"Set {heater.output_name} mode to {params['mode']}") return True, f"Set {heater.output_name} mode to {params['mode']}" @ocs_agent.param('resistance', type=float) @ocs_agent.param('heater', default='2', type=str, choices=['1', '2']) def set_heater_resistance(self, session, params): """set_heater_resistance(resistance=None,heater='2') **Task** - Sets the heater resistance and resistance setting of the heater. The associated 'get' function in the Heater class is get_heater_resistance_setting(). Parameters: resistance (float): The actual resistance of the load heater (str, optional): Default '2'. Selects the heater on which to set the resistance. Must be '1' or '2'. """ with self._lock.acquire_timeout(job='set_heater_resistance', timeout=3) as acquired: if not acquired: print( f"Lock could not be acquired because it is held by " f"{self._lock.job}") return False, 'Could not acquire lock' session.set_status('running') # get heater heater_key = params.get('heater', '2') # default to 50W output heater = self.module.heaters[heater_key] # set heater resistance _ = heater.get_heater_resistance_setting() if params['resistance'] == heater.resistance: print( 'Current heater resistance matches commanded value. ' 'Proceeding unchanged') else: heater.set_heater_resistance(params['resistance']) session.add_message( f"Set {heater.output_name} resistance to " f"{params['resistance']}") return True, (f"Set {heater.output_name} resistance to " f"{params['resistance']}") @ocs_agent.param('current', type=float, check=lambda x: 0.0 <= x <= 2.0) @ocs_agent.param('heater', default='2', type=str, choices=['1', '2']) def set_max_current(self, session, params): """set_max_current(current=None,heater='2') **Task** - Sets the maximum current that can pass through a heater. Parameters: current (float): The desired max current. Must be between 0 and 2 A. heater (str, optional): Default '2'. Selects the heater on which to set the max current. Must be '1' or '2'. """ with self._lock.acquire_timeout(job='set_max_current', timeout=3) as acquired: if not acquired: print( f"Lock could not be acquired because it is held by " f"{self._lock.job}") return False, 'Could not acquire lock' session.set_status('running') # get heater heater_key = params.get('heater', '2') # default to 50W output heater = self.module.heaters[heater_key] # set max current current_max_current = heater.get_max_current() if params['current'] == current_max_current: print( 'Current max current matches commanded value. ' 'Proceeding unchanged') else: heater.set_max_current(params['current']) session.add_message( f"Set {heater.output_name} max current to {params['current']}") return True, (f"Set {heater.output_name} max current to " f"{params['current']}") @ocs_agent.param('percent', type=float) @ocs_agent.param('heater', default='2', type=str, choices=['1', '2']) def set_manual_out(self, session, params): """set_manual_out(percent=None,heater='2') **Task** - Sets the manual output of the heater as a percentage of the full current or power depending on which display the heater is set to use. Parameters: percent (float): Percent of full current or power to set on the heater. Must have 2 or fewer decimal places. heater (str, optional): Default '2'. Selects the heater on which to set the manual output. Must be '1' or '2'. """ with self._lock.acquire_timeout(job='set_manual_out', timeout=3) as acquired: if not acquired: print( f"Lock could not be acquired because it is held by " f"{self._lock.job}") return False, 'Could not acquire lock' session.set_status('running') # get heater heater_key = params.get('heater', '2') # default to 50W output heater = self.module.heaters[heater_key] # set manual out current_manual_out = heater.get_manual_out() if params['percent'] == current_manual_out: print('Current manual out matches commanded value. ' 'Proceeding unchanged') else: heater.set_manual_out(params['percent']) session.add_message( f"Set {heater.output_name} manual out to {params['percent']}") return True, (f"Set {heater.output_name} manual out to " f"{params['percent']}") @ocs_agent.param('input', type=str, choices=['A', 'B', 'C', 'D', 'D1', 'D2', 'D3', 'D4', 'D5']) @ocs_agent.param('heater', default='2', type=str, choices=['1', '2']) def set_input_channel(self, session, params): """set_input_channel(input=None,heater='2') **Task** - Sets the input channel of the heater control loop. Parameters: input (str): The name of the heater to use as the input channel. Must be one of 'none','A','B','C', or 'D'. Can also be 'D2','D3','D4', or 'D5' if the extra Lakeshore 3062 Scanner is installed in your LS336. heater (str, optional): Default '2'. Selects the heater for which to set the input channel. Must be '1' or '2'. """ with self._lock.acquire_timeout(job='set_input_channel', timeout=3) as acquired: if not acquired: print( f"Lock could not be acquired because it is held by " f"{self._lock.job}") return False, 'Could not acquire lock' session.set_status('running') # get heater heater_key = params.get('heater', '2') # default to 50W output heater = self.module.heaters[heater_key] # D1 is the same as D if params['input'] == 'D1': params['input'] = 'D' # set input channel current_input_channel = heater.get_input_channel() if params['input'] == current_input_channel: print( 'Current input channel matches commanded value. ' 'Proceeding unchanged') else: heater.set_input_channel(params['input']) session.add_message( f"Set {heater.output_name} input channel to {params['input']}") return True, (f"Set {heater.output_name} input channel to " f"{params['input']}") @ocs_agent.param('setpoint', type=float) @ocs_agent.param('heater', default='2', type=str, choices=['1', '2']) def set_setpoint(self, session, params): """set_setpoint(setpoint=None,heater='2') **Task** - Sets the setpoint of the heater control loop, after first turning ramp off. May be a limit to how high the setpoint can go based on your system parameters. Parameters: setpoint (float): The setpoint for the control loop. Units depend on the preferred sensor units (Kelvin, Celsius, or Sensor). heater (str, optional): Default '2'. Selects the heater for which to set the input channel. Must be '1' or '2'. """ with self._lock.acquire_timeout(job='set_setpoint', timeout=3) as acquired: if not acquired: print( f"Lock could not be acquired because it is held by " f"{self._lock.job}") return False, 'Could not acquire lock' session.set_status('running') # get heater heater_key = params.get('heater', '2') # default to 50W output heater = self.module.heaters[heater_key] # set setpoint current_setpoint = heater.get_setpoint() if params['setpoint'] == current_setpoint: print('Current setpoint matches commanded value. ' 'Proceeding unchanged') else: heater.set_ramp_on_off('off') heater.set_setpoint(params['setpoint']) # static setpoint used in temp stability check # to avoid ramping bug self._static_setpoint = params['setpoint'] session.add_message( f"Turned ramp off and set {heater.output_name} setpoint to " f"{params['setpoint']}") return True, (f"Turned ramp off and set {heater.output_name} " f"setpoint to {params['setpoint']}") @ocs_agent.param('T_limit', type=int) @ocs_agent.param('channel', type=str, default='A', choices=['A', 'B', 'C', 'D', 'D2', 'D3', 'D4', 'D5']) def set_T_limit(self, session, params): """set_T_limit(T_limit=None,channel='A') **Task** - Sets the temperature limit above which the control output assigned to the selected channel shut off. Parameters: T_limit (int): The temperature limit in Kelvin. Note that a limit of 0 K turns off this feature for the given channel. channel (str, optional): Default 'A'. Selects which channel to use for controlling the temperature. Options are 'A','B','C', and 'D'. Can also be 'D2','D3','D4', or 'D5' if the extra Lakeshore 3062 Scanner is installed in your LS336. """ with self._lock.acquire_timeout(job='set_T_limit', timeout=3) as acquired: if not acquired: print( f"Lock could not be acquired because it is held by " f"{self._lock.job}") return False, 'Could not acquire lock' session.set_status('running') # get channel channel_key = params.get('channel', 'A') # default to input A channel = self.module.channels[channel_key] # set T limit current_limit = channel.get_T_limit() if params['T_limit'] == current_limit: print('Current T limit matches commanded value. ' 'Proceeding unchanged') else: channel.set_T_limit(params['T_limit']) session.add_message( f"Set {channel.input_name} T limit to {params['T_limit']}") return True, f"Set {channel.input_name} T limit to {params['T_limit']}" @ocs_agent.param('temperature', type=float) @ocs_agent.param('ramp', default=0.1, type=float) @ocs_agent.param('heater', default='2', type=str, choices=['1', '2']) @ocs_agent.param('transport', default=False, type=bool) @ocs_agent.param('transport_offset', default=0, type=float, check=lambda x: x >= 0.0) def servo_to_temperature(self, session, params): """servo_to_temperature(temperature=None,ramp=0.1,heater='2',\ transport=False,transport_offset=0) **Task** - A wrapper for setting the heater setpoint. Performs sanity checks on heater configuration before publishing setpoint: 1. checks control mode of heater (closed loop) 2. checks units of input channel (kelvin) 3. resets setpoint to current temperature with ramp off 4. sets ramp on to specified rate 5. checks setpoint does not exceed input channel T_limit 6. sets setpoint to commanded value Note that this function does NOT turn on the heater if it is off. You must use set_heater_range() to pick a range first. Parameters: temperature (float): The new setpoint in Kelvin. Make sure there is is a control input set to the heater and its units are Kelvin. ramp (float, optional): Default 0.1. The rate for how quickly the setpoint ramps to new value. Units of K/min. heater (str, optional): Default '2'. The heater to use for servoing. Must be '1' or '2'. transport (bool, optional): Default False. See Notes for description. transport_offset (float, optional): Default 0. In Kelvin. See Notes. Notes: If param 'transport' is provided and True, the control loop restarts when the setpoint is first reached. This is useful for loads with long cooling times or time constant to help minimize over/undershoot. If param 'transport' is provided and True, and 'transport_offset' is provided and positive, and the setpoint is higher than the current temperature, then the control loop will restart when the setpoint - transport_offset is first reached. This is useful to avoid a "false positive" temperature stability check too shortly after transport completes. """ # get sampling frequency t_sample = self.t_sample with self._lock.acquire_timeout(job='servo_to_temperature', timeout=3) as acquired: if not acquired: print( f"Lock could not be acquired because it is held by " f"{self._lock.job}") return False, 'Could not acquire lock' session.set_status('running') # get heater heater_key = params.get('heater', '2') # default to 50W output heater = self.module.heaters[heater_key] # get current setpoint current_setpoint = heater.get_setpoint() # check in correct control mode if heater.get_mode() != 'closed loop': session.add_message( 'Changing control to closed loop mode for servo.') heater.set_mode("closed loop") # check in correct units channel = heater.get_input_channel() if channel == 'none': return False, (f'{heater.output_name} does not have an ' f'input channel assigned') if self.module.channels[channel].get_units() != 'kelvin': session.add_message( 'Setting preferred units to kelvin on ' 'heater control input.') self.module.channels[channel].set_units('kelvin') # restart setpoint at current temperature current_temp = np.round(float(self.module.get_kelvin(channel)), 4) session.add_message( f'Turning ramp off and setting setpoint to current ' f'temperature {current_temp}') heater.set_ramp_on_off('off') heater.set_setpoint(current_temp) # reset ramp settings ramp = params.get('ramp', 0.1) session.add_message( f'Turning ramp on and setting rate to {ramp}K/min') heater.set_ramp_on_off('on') heater.set_ramp_rate(ramp) # make sure not exceeding channel T limit T_limit = self.module.channels[channel].get_T_limit() if T_limit <= params['temperature']: return False, (f"{heater.output_name} control channel " f"{channel} T limit of {T_limit}K is lower " f"than setpoint of {params['temperature']}") # set setpoint if params['temperature'] == current_setpoint: print('Current setpoint matches commanded value. ' 'Proceeding unchanged') else: session.add_message( f"Setting {heater.output_name} setpoint to " f"{params['temperature']}") heater.set_setpoint(params['temperature']) # static setpoint used in temp stability check # to avoid pulling the ramping setpoint self._static_setpoint = params['temperature'] # if transport, restart control loop when setpoint # first crossed if params.get('transport', False): current_range = heater.get_heater_range() starting_sign = np.sign( params['temperature'] - current_temp) transporting = True # if we are raising temp, allow possibility of # stopping transport at a cooler temp T_offset = 0 if starting_sign > 0: T_offset = params.get('transport_offset', 0) if T_offset < 0: return False, ('Transport offset temperature ' 'cannot be negative') while transporting: current_temp = np.round( self.module.get_kelvin(channel), 4) # check when this flips current_sign = np.sign( params['temperature'] - T_offset - current_temp) # release and reacquire lock between data acquisition self._lock.release() time.sleep(t_sample) if not self._lock.acquire(timeout=10, job='servo_to_temperature'): print( f"Lock could not be acquired because it is " f"held by {self._lock.job}") return False, 'Could not re-acquire lock' if current_sign != starting_sign: transporting = False # update flag # cycle control loop session.add_message( 'Transport complete, restarting control ' 'loop at provided setpoint') heater.set_heater_range('off') # necessary 1 s for prev command to register # in ls336 firmware for some reason time.sleep(1) heater.set_heater_range(current_range) return True, (f"Set {heater.output_name} setpoint to " f"{params['temperature']}") @ocs_agent.param('threshold', default=0.1, type=float) @ocs_agent.param('window', default=900, type=int) @ocs_agent.param('heater', default='2', type=str, choices=['1', '2']) def check_temperature_stability(self, session, params): """check_temperature_stability(threshold=0.1,window=900,heater='2') **Task** - Assesses whether the load is stable around the setpoint to within some threshold. Parameters: threshold (float, optional): Default 0.1. See notes. window (int, optional): Default 900. See notes. heater (str, optional): Default '2'. Selects the heater for which to set the input channel. Must be '1' or '2'. Notes ----- Param 'threshold' sets the upper bound on the absolute temperature difference between the setpoint and any temperature from the input channel in the last 'window' seconds. Param 'window' sets the lookback time into the most recent temperature data, in seconds. Note that this function grabs the most recent data in one window-length of time; it does not take new data. If you want to use the result of this task for making logical decisions in a client (e.g. waiting longer before starting a process if the temperature is not yet stable), use the session['success'] key. It will be True if the temperature is stable and False if not. Example: >>> response = ls336.check_temperature_stability() >>> response.session['success'] True """ # get threshold threshold = params.get('threshold') if threshold is None: threshold = self.threshold # get window window = params.get('window') if window is None: window = self.window num_idxs = int(np.ceil(window / self.t_sample)) with self._lock.acquire_timeout(job='check_temperature_stability', timeout=3) as acquired: if not acquired: print( f"Lock could not be acquired because it is held by " f"{self._lock.job}") return False, 'Could not acquire lock' session.set_status('running') # get heater heater_key = params.get('heater', '2') # default to 50W output heater = self.module.heaters[heater_key] # get channel channel = heater.get_input_channel() channel_num = self.module.channels[channel].num # get current temp current_temp = np.round(self.module.get_kelvin(channel), 4) # check if recent temps and current temps are within threshold _recent_temps = self._recent_temps[:num_idxs, channel_num-1] _recent_temps = np.concatenate( (np.array([current_temp]), _recent_temps)) # get static setpoint if None if self._static_setpoint is None: self._static_setpoint = heater.get_setpoint() # avoids checking against the ramping setpoint, # i.e. want to compare to commanded setpoint not mid-ramp setpoint setpoint = self._static_setpoint session.add_message( f'Maximum absolute difference in recent temps is ' f'{np.max(np.abs(_recent_temps - setpoint))}K') if np.all(np.abs(_recent_temps - setpoint) < threshold): session.add_message( f'Recent temps are within {threshold}K of setpoint') return True, (f'Servo temperature is stable within ' f'{threshold}K of setpoint') return False, (f'Servo temperature is not stable within ' f'{threshold}K of setpoint') @ocs_agent.param('attribute', type=str) @ocs_agent.param('channel', type=str, default='A', choices=['A', 'B', 'C', 'D', 'D1', 'D2', 'D3', 'D4', 'D5']) def get_channel_attribute(self, session, params): """get_channel_attribute(attribute=None,channel='A') **Task** - Gets an arbitrary channel attribute and stores it in the session.data dict. Attribute must be the name of a method in the namespace of the Lakeshore336 Channel class, with a leading "get\_" removed (see example). Parameters: attribute (str): The name of the channel attribute to get. See the Lakeshore 336 Channel class API for all options. channel (str, optional): Default 'A'. Selects which channel for which to get the attribute. Options are 'A','B','C', and 'D'. Can also be 'D2','D3','D4', or 'D5' if the extra Lakeshore 3062 Scanner is installed in your LS336. Example: >>> ls.get_channel_attribute(attribute = 'T_limit').session['data'] {'T_limit': 30.0} """ with self._lock.acquire_timeout(job=f"get_{params['attribute']}", timeout=3) as acquired: if not acquired: print( f"Lock could not be acquired because it is held by " f"{self._lock.job}") return False, 'Could not acquire lock' session.set_status('running') # get channel channel_key = params.get('channel', 'A') # default to input A channel = self.module.channels[channel_key] # check that attribute is a valid channel method if getattr(channel, f"get_{params['attribute']}", False) is not False: query = getattr(channel, f"get_{params['attribute']}") # get attribute resp = query() session.data[params['attribute']] = resp return True, (f"Retrieved {channel.input_name} {params['attribute']}, value is {resp}") @ocs_agent.param('attribute', type=str) @ocs_agent.param('heater', default='2', type=str, choices=['1', '2']) def get_heater_attribute(self, session, params): """get_heater_attribute(attribute=None,heater='2') **Task** - Gets an arbitrary heater attribute and stores it in the session.data dict. Attribute must be the name of a method in the namespace of the Lakeshore336 Heater class, with a leading "get\_" removed (see example). Parameters: attribute (str): The name of the channel attribute to get. See the Lakeshore 336 Heater class API for all options. heater (str, optional): Default '2'. Selects the heater for which to get the heater attribute. Must be '1' or '2'. Examples -------- >>> ls.get_heater_attribute(attribute = 'heater_range').session['data'] {'heater_range': 'off'} """ with self._lock.acquire_timeout(job=f"get_{params['attribute']}", timeout=3) as acquired: if not acquired: print( f"Lock could not be acquired because it is held by " f"{self._lock.job}") return False, 'Could not acquire lock' session.set_status('running') # get heater heater_key = params.get('heater', '2') # default to 50W output heater = self.module.heaters[heater_key] # check that attribute is a valid heater method if getattr(heater, f"get_{params['attribute']}", False) is not False: query = getattr(heater, f"get_{params['attribute']}") # get attribute resp = query() session.data[params['attribute']] = resp return True, f"Retrieved {heater.output_name} {params['attribute']}, value is {resp}"
class FTSAerotechAgent: """ Agent for connecting to the FTS mirror control Args: ip_addr: IP address of Motion Controller port: Port of Motion Controller mode: 'acq': Start data acquisition on initialize samp: default sampling frequency in Hz """ def __init__(self, agent, ip_addr, port, mode=None, samp=2): self.ip_addr = ip_addr self.port = int(port) self.stage = None self.initialized = False self.take_data = False self.agent = agent self.log = agent.log self.lock = TimeoutLock() if mode == 'acq': self.auto_acq = True else: self.auto_acq = False self.sampling_frequency = float(samp) ### register the position feeds agg_params = { 'frame_length' : 10*60, #[sec] } self.agent.register_feed('position', record = True, agg_params = agg_params, buffer_time = 0) def init_stage_task(self, session, params=None): """init_stage_task(params=None) Perform first time setup for communication with FTS stage. Args: params (dict): Parameters dictionary for passing parameters to task. """ if params is None: params = {} if self.stage is not None and self.initialized: return True, 'Stages already Initialized' self.log.debug("Trying to acquire lock") with self.lock.acquire_timeout(timeout=0, job='init') as acquired: # Locking mechanism stops code from proceeding if no lock acquired if not acquired: self.log.warn("Could not start init because {} is already" \ "running".format(self.lock.job)) return False, "Could not acquire lock." # Run the function you want to run self.log.debug("Lock Acquired Connecting to Stages") try: self.stage = FTSAerotechStage(self.ip_addr, self.port) except Exception as e: self.log.error(f"Error while connecting to FTS: {e}") reactor.callFromThread(reactor.stop) return False, "FTS Stage Initialization Failed" # This part is for the record and to allow future calls to proceed, # so does not require the lock self.initialized = True if self.auto_acq: self.agent.start('acq') return True, 'Stage Initialized.' def home_task(self, session, params=None): """ Home the stage to its negative limit """ with self.lock.acquire_timeout(timeout=3, job='home') as acquired: if not acquired: self.log.warn("Could not start home because lock held by" \ f"{self.lock.job}") return False, "Could not get lock" try: self.stage.home() except Exception as e: self.log.error(f"Homing Failed: {e}") return False, "Homing Failed" return True, "Homing Complete" def move_to(self, session, params=None): """Move to absolute position relative to stage center (in mm) params: {'position':float between -74.8 and 74.8} """ if params is None: return False, "No Position Given" if 'position' not in params: return False, "No Position Given" with self.lock.acquire_timeout(timeout=3, job='move') as acquired: if not acquired: self.log.warn("Could not start move because lock held by" \ f"{self.lock.job}") return False, "Could not get lock" return self.stage.move_to( params.get('position') ) return False, "Move did not complete correctly?" def start_acq(self, session, params=None): """ params: dict: {'sampling_frequency': float, sampling rate in Hz} The most recent position data is stored in session.data in the format:: {"position":{"pos" : mirror position } """ if params is None: params = {} f_sample = params.get('sampling_frequency', self.sampling_frequency) pm = Pacemaker(f_sample, quantize=True) if not self.initialized or self.stage is None: raise Exception("Connection to Stages not initialized") with self.lock.acquire_timeout(timeout=0, job='acq') as acquired: if not acquired: self.log.warn(f"Could not start acq because {self.lock.job} " \ "is already running") return False, "Could not acquire lock." self.log.info("Starting Data Acquisition for FTS Mirror at" \ f"{f_sample} Hz") session.set_status('running') self.take_data = True last_release = time.time() while self.take_data: if time.time()-last_release > 1.: if not self.lock.release_and_acquire(timeout=20): self.log.warn("Could not re-acquire lock now held by" \ f"{self.lock.job}.") return False, "could not re-acquire lock" last_release = time.time() pm.sleep() data = { 'timestamp':time.time(), 'block_name':'position', 'data':{}} success, pos = self.stage.get_position() if not success: self.log.info("stage.get_position call failed") else: data['data']['pos'] = pos self.agent.publish_to_feed('position',data) return True, 'Acquisition exited cleanly.' def stop_acq(self, session, params=None): """ params: dict: {} """ if self.take_data: self.take_data = False return True, 'requested to stop taking data.' else: return False, 'acq is not currently running.'
class stmACAgent: ''' OCS agent class for stimulator AC source ''' def __init__(self, agent, ipaddr=IPADDR_DEFAULT): ''' Parameters ---------- ipaddr : str IP address of AC supply ''' self.active = True self.agent = agent self.log = agent.log self.lock = TimeoutLock() self.take_data = False self.initialized = False self._pcr = PCR500MA(ipaddr) agg_params = {'frame_length': 60} self.agent.register_feed('acsupply', record=True, agg_params=agg_params, buffer_time=1) def init_pcr500(self, session, params=None): '''Initialization of pcr500 AC supply ''' if self.initialized: return True, "Already initialized." with self.lock.acquire_timeout(timeout=0, job='init') as acquired: if not acquired: self.log.warn( "Could not start init because {} is already running". format(self.lock.job)) return False, "Could not acquire lock." try: self._pcr.checkID() except ValueError: pass print("AC supply PCR500 initialized.") self.initialized = True return True, "AC supply PCR500 initialized." def start_acq(self, session, params): '''Starts acquiring data. ''' f_sample = params.get('sampling frequency', 0.1) sleep_time = 1 / f_sample - 0.1 if not self.initialized: self.init_pcr500(session) #with self.lock.acquire_timeout(timeout=0, job='acq') as acquired: # if not acquired: # self.log.warn("Could not start acq because {} is already running".format(self.lock.job)) # return False, "Could not acquire lock." session.set_status('running') self.take_data = True session.data = {"fields": {}} while self.take_data: with self.lock.acquire_timeout(timeout=1, job='acq') as acquired: if not acquired: print( f"Lock could not be acquired because it is held by {self.lock.job}" ) return False current_time = time.time() data = { 'timestamp': current_time, 'block_name': 'acsupply', 'data': {} } voltage = self._pcr.getVoltage() current = self._pcr.getCurrent() power = self._pcr.getPower() if not self.lock.release_and_acquire(timeout=10): print( f"Could not re-acquire lock now held by {self.lock.job}." ) return False data['data']['voltage'] = voltage data['data']['current'] = current data['data']['power'] = power field_dict = { f'acsupply': { 'voltage': voltage, 'current': current, 'power': power } } session.data['fields'].update(field_dict) time.sleep(sleep_time) self.agent.publish_to_feed('acsupply', data) self.agent.feeds['acsupply'].flush_buffer() return True, 'Acquisition exited cleanly.' def stop_acq(self, session, params=None): """ Stops the data acquisiton. """ if self.take_data: self.take_data = False return True, 'requested to stop taking data.' return False, 'acq is not currently running.' def set_values(self, session, params=None): '''A task to set sensor parameters for AC supply volt : float operate AC voltage ''' if params is None: params = {} with self.lock.acquire_timeout(3, job='set_values') as acquired: if not acquired: self.log.warn('Could not start set_values because ' f'{self.lock.job} is already running') return False, 'Could not acquire lock.' volt = params.get('volt') if not volt is None: self.voltsetting = volt # self._ble2.set_speed(speed) def get_values(self, session, params=None): '''A task to provide configuration information ''' pass def switchPower(self, session, params=None, state=0): '''A task to turn switch, state 0 = off, 1 = on ''' pass def get_settings(self, session, params=None): ''' Get relay states''' if params is None: params = {} with self.lock.acquire_timeout(3, job='get_settings') as acquired: if not acquired: self.log.warn('Could not start get_setting because ' f'{self.lock.job} is already running') return False, 'Could not acquire lock.' setV = self.Voltage session.data = {'volt': setV} return True, f'Got AC status' def getACstatus(self, session, params=None): with self.lock.acquire_timeout(3, job='get_settings') as acquired: if not acquired: self.log.warn('Could not start get_setting because ' f'{self.lock.job} is already running') return False, 'Could not acquire lock.' #print(self, session, params) volt = self._pcr._a('MEAS:VOLT:AC?') curr = self._pcr._a('MEAS:CURR:AC?') freq = self._pcr._a('MEAS:FREQ?') power = self._pcr._a('MEAS:POW:AC?') preac = self._pcr._a('MEAS:POW:AC:REAC?') print(volt, curr, freq, power, preac) return True, f'AC {volt}, {curr}, {freq}, {power}, {preac}' def rampVoltage(self, session, params=None): # normal temperature control print(params) voltgoal = params.get('volt', 0) print(voltgoal) if (voltgoal < 0): print("Voltage cannot be negative!") return False, 'Voltage cannot be negative' while (abs(voltgoal - self._pcr.Voltage) > VoltStep): if (self._pcr.Voltage < voltgoal): self._pcr.Voltage = self._pcr.Voltage + VoltStep elif (self._pcr.Voltage > voltgoal): self._pcr.Voltage = self._pcr.Voltage - VoltStep with self.lock.acquire_timeout(timeout=3, job='set_voltage') as acquired: print("Set ", self._pcr.Voltage) self._pcr.setVoltage(self._pcr.Voltage) time.sleep(0.5) print(self._pcr.getCurrent()) time.sleep(WaitTimeStep - 0.5) with self.lock.acquire_timeout(timeout=3, job='set_voltage') as acquired: print("last step to", voltgoal) self._pcr.Voltage = voltgoal print(self, self._pcr.getCurrent()) self._pcr.setVoltage(self._pcr.Voltage) time.sleep(0.5) print(self._pcr.getCurrent()) return True, f'Reached to voltage {voltgoal}' def forceZero(self, session, params=None): #for site work while (self._pcr.Voltage > VoltStep): with self.lock.acquire_timeout(timeout=3, job='set_voltage') as acquired: self._pcr.Voltage = self._pcr.Voltage - VoltStep print("go down to ", self._pcr.Voltage) self._pcr.setVoltage(self._pcr.Voltage) time.sleep(WaitTimeForce) print("set to 0 Volt") with self.lock.acquire_timeout(timeout=3, job='set_voltage') as acquired: self._pcr.Voltage = 0.0 self._pcr.setVoltage(0.0) return True, f'Ramped down to 0 volt.'
class LS240_Agent: def __init__(self, agent, port="/dev/ttyUSB0", f_sample=2.5): self.agent: ocs_agent.OCSAgent = agent self.log = agent.log self.lock = TimeoutLock() self.port = port self.module: Optional[Module] = None self.f_sample = f_sample self.initialized = False self.take_data = False # Registers Temperature and Voltage feeds agg_params = { 'frame_length': 60, } self.agent.register_feed('temperatures', record=True, agg_params=agg_params, buffer_time=1) # Task functions. def init_lakeshore_task(self, session, params=None): """init_lakeshore_task(params=None) Perform first time setup of the Lakeshore 240 Module. Args: params (dict): Parameters dictionary for passing parameters to task. Parameters: auto_acquire (bool, optional): Default is False. Starts data acquisition after initialization if True. """ if params is None: params = {} auto_acquire = params.get('auto_acquire', False) if self.initialized: return True, "Already Initialized Module" with self.lock.acquire_timeout(0, job='init') as acquired: if not acquired: self.log.warn("Could not start init because " "{} is already running".format(self.lock.job)) return False, "Could not acquire lock." session.set_status('starting') self.module = Module(port=self.port) print("Initialized Lakeshore module: {!s}".format(self.module)) session.add_message("Lakeshore initialized with ID: %s" % self.module.inst_sn) self.initialized = True # Start data acquisition if requested if auto_acquire: self.agent.start('acq') return True, 'Lakeshore module initialized.' def set_values(self, session, params=None): """set_values(params=None) A task to set sensor parameters for a Lakeshore240 Channel Args: channel (int, 1 -- 2 or 8): Channel number to set. sensor (int, 1, 2, or 3, optional): Specifies sensor type: +---+---------+ | 1 | Diode | +---+---------+ | 2 | PlatRTC | +---+---------+ | 3 | NTC RTD | +---+---------+ auto_range (int, 0 or 1, optional): Must be 0 or 1. Specifies if channel should use autorange. range (int 0-8, optional): Specifies range if autorange is false. Only settable for NTC RTD: +---+--------------------+ | 0 | 10 Ohms (1 mA) | +---+--------------------+ | 1 | 30 Ohms (300 uA) | +---+--------------------+ | 2 | 100 Ohms (100 uA) | +---+--------------------+ | 3 | 300 Ohms (30 uA) | +---+--------------------+ | 4 | 1 kOhm (10 uA) | +---+--------------------+ | 5 | 3 kOhms (3 uA) | +---+--------------------+ | 6 | 10 kOhms (1 uA) | +---+--------------------+ | 7 | 30 kOhms (300 nA) | +---+--------------------+ | 8 | 100 kOhms (100 nA) | +---+--------------------+ current_reversal (int, 0 or 1, optional): Specifies if input current reversal is on or off. Always 0 if input is a diode. units (int, 1-4, optional): Specifies preferred units parameter, and sets the units for alarm settings: +---+------------+ | 1 | Kelvin | +---+------------+ | 2 | Celsius | +---+------------+ | 3 | Sensor | +---+------------+ | 4 | Fahrenheit | +---+------------+ enabled (int, 0 or 1, optional): sets if channel is enabled name (str, optional): sets name of channel """ if params is None: params = {} with self.lock.acquire_timeout(0, job='set_values') as acquired: if not acquired: self.log.warn("Could not start set_values because " "{} is already running".format(self.lock.job)) return False, "Could not acquire lock." self.module.channels[params['channel'] - 1].set_values( sensor=params.get('sensor'), auto_range=params.get('auto_range'), range=params.get('range'), current_reversal=params.get('current_reversal'), unit=params.get('unit'), enabled=params.get('enabled'), name=params.get('name'), ) return True, 'Set values for channel {}'.format(params['channel']) def upload_cal_curve(self, session, params=None): """ Task to upload a calibration curve to a channel. Args: channel (int, 1 -- 2 or 8): Channel number filename (str): filename for cal curve """ channel = params['channel'] filename = params['filename'] with self.lock.acquire_timeout(0, job='upload_cal_curve') as acquired: if not acquired: self.log.warn("Could not start set_values because " "{} is already running".format(self.lock.job)) return False, "Could not acquire lock." channel = self.module.channels[channel - 1] self.log.info("Starting upload to channel {}...".format(channel)) channel.load_curve(filename) self.log.info("Finished uploading.") return True, "Uploaded curve to channel {}".format(channel) def start_acq(self, session, params=None): """acq(params=None) Method to start data acquisition process. Args: sampling_frequency (float): Sampling frequency for data collection. Defaults to 2.5 Hz """ if params is None: params = {} f_sample = params.get('sampling_frequency') # If f_sample is None, use value passed to Agent init if f_sample is None: f_sample = self.f_sample sleep_time = 1 / f_sample - 0.01 with self.lock.acquire_timeout(0, job='acq') as acquired: if not acquired: self.log.warn( "Could not start acq because {} is already running".format( self.lock.job)) return False, "Could not acquire lock." session.set_status('running') self.take_data = True while self.take_data: data = { 'timestamp': time.time(), 'block_name': 'temps', 'data': {} } for chan in self.module.channels: chan_string = "Channel {}".format(chan.channel_num) data['data'][chan_string + ' T'] = chan.get_reading(unit='K') data['data'][chan_string + ' V'] = chan.get_reading(unit='S') self.agent.publish_to_feed('temperatures', data) time.sleep(sleep_time) self.agent.feeds['temperatures'].flush_buffer() return True, 'Acquisition exited cleanly.' def stop_acq(self, session, params=None): """ Stops acq process. """ if self.take_data: self.take_data = False return True, 'requested to stop taking data.' else: return False, 'acq is not currently running'
class Keithley2230GAgent: def __init__(self, agent, ip_address, gpib_slot): self.agent = agent self.log = agent.log self.lock = TimeoutLock() self.job = None self.ip_address = ip_address self.gpib_slot = gpib_slot self.monitor = False self.psu = None # Registers Temperature and Voltage feeds agg_params = { 'frame_length': 10 * 60, } self.agent.register_feed('psu_output', record=True, agg_params=agg_params, buffer_time=0) def init_psu(self, session, params=None): """ Task to connect to Keithley power supply """ with self.lock.acquire_timeout(0) as acquired: if not acquired: return False, "Could not acquire lock" try: self.psu = psuInterface(self.ip_address, self.gpib_slot) self.idn = self.psu.identify() except socket.timeout as e: self.log.error("PSU timed out during connect") return False, "Timeout" self.log.info("Connected to psu: {}".format(self.idn)) return True, 'Initialized PSU.' def monitor_output(self, session, params=None): """ Process to continuously monitor PSU output current and voltage and send info to aggregator. Args: wait (float, optional): time to wait between measurements [seconds]. """ if params is None: params = {} wait_time = params.get('wait', 1) self.monitor = True while self.monitor: with self.lock.acquire_timeout(1) as acquired: if acquired: data = { 'timestamp': time.time(), 'block_name': 'output', 'data': {} } for chan in [1, 2, 3]: data['data']["Voltage_{}".format( chan)] = self.psu.getVolt(chan) data['data']["Current_{}".format( chan)] = self.psu.getCurr(chan) # self.log.info(str(data)) # print(data) self.agent.publish_to_feed('psu_output', data) else: self.log.warn("Could not acquire in monitor_current") time.sleep(wait_time) return True, "Finished monitoring current" def stop_monitoring(self, session, params=None): self.monitor = False return True, "Stopping current monitor" def set_voltage(self, session, params=None): """ Sets voltage of power supply: Args: channel (int): Channel number (1, 2, or 3) volts (float): Voltage to set. Must be between 0 and 30. """ with self.lock.acquire_timeout(1) as acquired: if acquired: self.psu.setVolt(params['channel'], params['volts']) else: return False, "Could not acquire lock" return True, 'Set channel {} voltage to {}'.format( params['channel'], params['volts']) def set_current(self, session, params=None): """ Sets current of power supply: Args: channel (int): Channel number (1, 2, or 3) "current" (float): Curent to set. Must be between x and y. """ with self.lock.acquire_timeout(1) as acquired: if acquired: self.psu.setCurr(params['channel'], params['current']) else: return False, "Could not acquire lock" return True, 'Set channel {} current to {}'.format( params['channel'], params['current']) def set_output(self, session, params=None): """ Task to turn channel on or off. Args: channel (int): Channel number (1, 2, or 3) state (bool): True for on, False for off """ with self.lock.acquire_timeout(1) as acquired: if acquired: self.psu.setOutput(params['channel'], params['state']) else: return False, "Could not acquire lock" return True, 'Initialized PSU.'
class LATRtXYStageAgent: """ Agent for connecting to the LATRt XY Stages Args: ip_addr: IP address where RPi server is running port: Port the RPi Server is listening on mode: 'acq': Start data acquisition on initialize samp: default sampling frequency in Hz """ def __init__(self, agent, ip_addr, port, mode=None, samp=2): self.ip_addr = ip_addr self.port = port self.xy_stage = None self.initialized = False self.take_data = False self.is_moving = False self.agent = agent self.log = agent.log self.lock = TimeoutLock() if mode == 'acq': self.auto_acq = True else: self.auto_acq = False self.sampling_frequency = float(samp) ### register the position feeds agg_params = { 'frame_length': 10 * 60, #[sec] } self.agent.register_feed('positions', record=True, agg_params=agg_params, buffer_time=0) def init_xy_stage_task(self, session, params=None): """init_xy_stage_task(params=None) Perform first time setup for communivation with XY stages. Args: params (dict): Parameters dictionary for passing parameters to task. """ if params is None: params = {} self.log.debug("Trying to acquire lock") with self.lock.acquire_timeout(timeout=0, job='init') as acquired: # Locking mechanism stops code from proceeding if no lock acquired if not acquired: self.log.warn( "Could not start init because {} is already running". format(self.lock.job)) return False, "Could not acquire lock." # Run the function you want to run self.log.debug("Lock Acquired Connecting to Stages") self.xy_stage = XY_Stage(self.ip_addr, self.port) self.xy_stage.init_stages() print("XY Stages Initialized") # This part is for the record and to allow future calls to proceed, # so does not require the lock self.initialized = True if self.auto_acq: self.agent.start('acq') return True, 'XY Stages Initialized.' def move_x_cm(self, session, params): """ params: dict: { 'distance': float, 'velocity':float < 1.2} """ with self.lock.acquire_timeout(timeout=3, job='move_x_cm') as acquired: if not acquired: self.log.warn( f"Could not start x move because lock held by {self.lock.job}" ) return False self.xy_stage.move_x_cm(params.get('distance', 0), params.get('velocity', 1)) time.sleep(1) while True: ## data acquisition updates the moving field if it is running if not self.take_data: with self.lock.acquire_timeout(timeout=3, job='move_x_cm') as acquired: if not acquired: self.log.warn( f"Could not check because lock held by {self.lock.job}" ) return False, "Could not acquire lock" self.is_moving = self.xy_stage.moving if not self.is_moving: break return True, "X Move Complete" def move_y_cm(self, session, params): """ params: dict: { 'distance': float, 'velocity':float < 1.2} """ with self.lock.acquire_timeout(timeout=3, job='move_y_cm') as acquired: if not acquired: self.log.warn( f"Could not start y move because lock held by {self.lock.job}" ) return False, "could not acquire lock" self.xy_stage.move_y_cm(params.get('distance', 0), params.get('velocity', 1)) time.sleep(1) while True: ## data acquisition updates the moving field if it is running if not self.take_data: with self.lock.acquire_timeout(timeout=3, job='move_y_cm') as acquired: if not acquired: self.log.warn( f"Could not check for move because lock held by {self.lock.job}" ) return False, "could not acquire lock" self.is_moving = self.xy_stage.moving if not self.is_moving: break return True, "Y Move Complete" def set_position(self, session, params): """ params: dict: {'position': (float, float)} """ with self.lock.acquire_timeout(timeout=3, job='set_position') as acquired: if not acquired: self.log.warn( f"Could not set position because lock held by {self.lock.job}" ) return False, "Could not acquire lock" self.xy_stage.position = params['position'] return True, "Position Updated" def start_acq(self, session, params=None): """ params: dict: {'sampling_frequency': float, sampling rate in Hz} """ if params is None: params = {} f_sample = params.get('sampling_frequency', self.sampling_frequency) pm = Pacemaker(f_sample, quantize=True) if not self.initialized or self.xy_stage is None: raise Exception("Connection to XY Stages not initialized") with self.lock.acquire_timeout(timeout=0, job='acq') as acquired: if not acquired: self.log.warn( "Could not start acq because {} is already running".format( self.lock.job)) return False, "Could not acquire lock." self.log.info( f"Starting Data Acquisition for XY Stages at {f_sample} Hz") session.set_status('running') self.take_data = True last_release = time.time() while self.take_data: if time.time() - last_release > 1.: if not self.lock.release_and_acquire(timeout=10): self.log.warn( f"Could not re-acquire lock now held by {self.lock.job}." ) return False, "could not re-acquire lock" last_release = time.time() pm.sleep() data = { 'timestamp': time.time(), 'block_name': 'positions', 'data': {} } pos = self.xy_stage.position self.is_moving = self.xy_stage.moving data['data']['x'] = pos[0] data['data']['y'] = pos[1] self.agent.publish_to_feed('positions', data) return True, 'Acquisition exited cleanly.' def stop_acq(self, session, params=None): """ params: dict: {} """ if self.take_data: self.take_data = False return True, 'requested to stop taking data.' else: return False, 'acq is not currently running.'
class VantagePro2Agent: """Agent to connect to single VantagePro2 Weather Monitor Device. Args: sample_freq (double): frequency (Hz) at which the weather monitor samples data. Can not be faster than 0.5 Hz. This value is converted to period (sec) for time.wait(seconds) port (string): usb port that the weather monitor is connected to. The monitor will communicate via this port. """ # change port argument when I figure out how to generalize it! def __init__(self, agent, port="/dev/ttyUSB0", sample_freq=0.5): self.active = True self.agent: ocs_agent.OCSAgent = agent self.log = agent.log self.lock = TimeoutLock() self.port = port self.module: Optional[VantagePro2] = None if sample_freq > 0.5: self.log.warn("Sample frequency too fast! Setting to 0.5Hz") sample_freq = 0.5 self.sample_freq = sample_freq self.initialized = False self.take_data = False # Registers weather data feed agg_params = { 'frame_length': 60, } self.agent.register_feed('weather_data', record=True, agg_params=agg_params) # Task functions. def init_VantagePro2_task(self, session, params=None): """ Perform first time setup of the Weather Monitor Module. Args: params (dict): Parameters dictionary for passing parameters to task. """ if params is None: params = {} auto_acquire = params.get('auto_acquire', False) if self.initialized: return True, "Already Initialized Module" with self.lock.acquire_timeout(0, job='init') as acquired: if not acquired: self.log.warn("Could not start init because " "{} is already running".format(self.lock.job)) return False, "Could not acquire lock." session.set_status('starting') self.module = VantagePro2(self.port) print("Initialized Vantage Pro2 module: {!s}".format( self.module)) self.initialized = True # Start data acquisition if requested if auto_acquire: self.agent.start('acq') time.sleep(2) return True, 'Vantage Pro2 module initialized.' def start_acq(self, session, params=None): """ Method to start data acquisition process. Args: sample_freq (double): Frequency at which weather data is sampled. Defaults to 0.5 Hz. """ if params is None: params = {} sample_freq = params.get('sample_freq') # If loops is None, use value passed to Agent init if sample_freq is None: sample_freq = self.sample_freq wait_time = 1/sample_freq with self.lock.acquire_timeout(0, job='acq') as acquired: if not acquired: self.log.warn("""Could not start acq because {} is already running""" .format(self.lock.job)) return False, "Could not acquire lock." session.set_status('running') # use pacemaker class to take data at regular intervals if sample_freq % 1 == 0: pm = Pacemaker(sample_freq, True) else: pm = Pacemaker(sample_freq) self.take_data = True while self.take_data: pm.sleep() data = { 'timestamp': time.time(), 'block_name': 'weather', 'data': {} } data['data'] = self.module.weather_daq() self.agent.publish_to_feed('weather_data', data) time.sleep(wait_time) self.agent.feeds['weather_data'].flush_buffer() return True, 'Acquisition exited cleanly.' def stop_acq(self, session, params=None): """ Stops acq process. """ if self.take_data: self.take_data = False print('requested to stop taking data.') return True, 'data taking succesfully halted' else: return False, 'acq is not currently running'
class TektronixAWGAgent: def __init__(self, agent, ip_address, gpib_slot): self.agent = agent self.log = agent.log self.lock = TimeoutLock() self.job = None self.ip_address = ip_address self.gpib_slot = gpib_slot self.monitor = False self.awg = None # Registers data feeds agg_params = { 'frame_length': 60, } self.agent.register_feed('AWG', record=True, agg_params=agg_params) def init_awg(self, session, params=None): """ Task to connect to Tektronix AWG """ with self.lock.acquire_timeout(0) as acquired: if not acquired: return False, "Could not acquire lock" try: self.awg = tektronixInterface(self.ip_address, self.gpib_slot) self.idn = self.awg.identify() except socket.timeout as e: self.log.error("Tektronix AWG timed out during connect") return False, "Timeout" self.log.info("Connected to AWG: {}".format(self.idn)) return True, 'Initialized AWG.' def set_frequency(self, session, params=None): """ Sets frequency of function generator: Args: frequency (float): Frequency to set. Must be between 0 and 25,000,000 """ with self.lock.acquire_timeout(1) as acquired: if acquired: freq = params['frequency'] self.awg.setFreq(freq) data = { 'timestamp': time.time(), 'block_name': "AWG_frequency", 'data': { 'AWG_frequency': freq } } self.agent.publish_to_feed('AWG', data) else: return False, "Could not acquire lock" return True, 'Set frequency {}'.format(params) def set_amplitude(self, session, params=None): """ Sets current of power supply: Args: Amplitude (float): Peak to Peak voltage to set. Must be between 0 and 10. """ with self.lock.acquire_timeout(1) as acquired: if acquired: amp = params['amplitude'] self.awg.setAmp(amp) data = { 'timestamp': time.time(), 'block_name': "AWG_amplitude", 'data': { 'AWG_amplitude': amp } } self.agent.publish_to_feed('AWG', data) else: return False, "Could not acquire lock" return True, 'Set amplitude to {} '.format(params) def set_output(self, session, params=None): """ Task to turn channel on or off. Args: state (bool): True for on, False for off. """ with self.lock.acquire_timeout(1) as acquired: if acquired: state = params['state'] self.awg.setOutput(state) data = { 'timestamp': time.time(), 'block_name': "AWG_output", 'data': { 'AWG_output': int(state) } } self.agent.publish_to_feed('AWG', data) else: return False, "Could not acquire lock" return True, 'Initialized AWG.'