def _run(self): def on_command(message): if message['$'] == 'before': self.log_append() if message['name'] == command_types.PAUSE: self.set_state('paused') if message['name'] == command_types.RESUME: self.set_state('running') self._reset() _unsubscribe = self._broker.subscribe(command_types.COMMAND, on_command) self.startTime = now() self.set_state('running') try: if self._use_v2: self.resume() self._pre_run_hooks() self._hardware.cache_instruments() ctx = ProtocolContext.build_using(self._protocol, loop=self._loop, broker=self._broker, extra_labware=getattr( self._protocol, 'extra_labware', {})) ctx.connect(self._hardware) ctx.home() run_protocol(self._protocol, context=ctx) else: robot.broker = self._broker assert isinstance(self._protocol, PythonProtocol),\ 'Internal error: v1 should only be used for python' if not robot.is_connected(): robot.connect() self.resume() self._pre_run_hooks() robot.cache_instrument_models() robot.discover_modules() exec(self._protocol.contents, {}) # If the last command in a protocol was a pause, the protocol # will immediately finish executing because there's no smoothie # command to block... except the home that's about to happen, # which will confuse the app and lock it up. So we need to # do our own pause here, and sleep the thread until/unless the # app resumes us. # # Cancelling from the app during this pause will result in the # smoothie giving us an error during the subsequent home, which # is tragic but expected. while self.state == 'paused': sleep(0.1) self.set_state('finished') self._hw_iface().home() except SmoothieAlarm: log.info("Protocol cancelled") except Exception as e: log.exception("Exception during run:") self.error_append(e) self.set_state('error') raise e finally: _unsubscribe()
def execute(protocol_file: TextIO, propagate_logs: bool = False, log_level: str = 'warning', emit_runlog: Callable[[Dict[str, Any]], None] = None): """ Run the protocol itself. This is a one-stop function to run a protocol, whether python or json, no matter the api verson, from external (i.e. not bound up in other internal server infrastructure) sources. To run an opentrons protocol from other places, pass in a file like object as protocol_file; this function either returns (if the run has no problems) or raises an exception. To call from the command line use either the autogenerated entrypoint ``opentrons_execute`` or ``python -m opentrons.execute``. If the protocol is using Opentrons Protocol API V1, it does not need to explicitly call :py:meth:`.Robot.connect` or :py:meth:`.Robot.discover_modules`, or :py:meth:`.Robot.cache_instrument_models`. :param file-like protocol_file: The protocol file to execute :param propagate_logs: Whether this function should allow logs from the Opentrons stack to propagate up to the root handler. This can be useful if you're integrating this function in a larger application, but most logs that occur during protocol simulation are best associated with the actions in the protocol that cause them. Default: ``False`` :type propagate_logs: bool :param log_level: The level of logs to emit on the command line.. Default: 'warning' :type log_level: 'debug', 'info', 'warning', or 'error' :param emit_runlog: A callback for printing the runlog. If specified, this will be called whenever a command adds an entry to the runlog, which can be used for display and progress estimation. If specified, the callback should take a single argument (the name doesn't matter) which will be a dictionary (see below). Default: ``None`` The format of the runlog entries is as follows: .. code-block:: python { 'name': command_name, 'payload': { 'text': string_command_text, # The rest of this struct is command-dependent; see # opentrons.commands.commands. Its keys match format # keys in 'text', so that # entry['payload']['text'].format(**entry['payload']) # will produce a string with information filled in } } """ stack_logger = logging.getLogger('opentrons') stack_logger.propagate = propagate_logs stack_logger.setLevel(getattr(logging, log_level.upper(), logging.WARNING)) contents = protocol_file.read() protocol = parse(contents, protocol_file.name) if ff.use_protocol_api_v2(): context = get_protocol_api( bundled_labware=getattr(protocol, 'bundled_labware', None), bundled_data=getattr(protocol, 'bundled_data', None)) if emit_runlog: context.broker.subscribe(commands.command_types.COMMAND, emit_runlog) context.home() execute_apiv2.run_protocol(protocol, simulate=False, context=context) else: robot.connect() robot.cache_instrument_models() robot.discover_modules() robot.home() if emit_runlog: robot.broker.subscribe(commands.command_types.COMMAND, emit_runlog) if isinstance(protocol, JsonProtocol): legacy_api.protocols.execute_protocol(protocol) else: exec(protocol.contents, {})
def prepare(self): if not self._use_v2: robot.discover_modules() self.refresh()
with open("CoordinatesOrdered.json", "r") as infile: coordinates = json.loads(infile.read()) #Correcting Offset (can change after renewed calibration) for x in ["A", "B", "C", "D", "E", "F", "G", "H"]: for y in [str(x) for x in range(1, 13)]: coordinates["%s%s" % (x, y)]["x"] += 6 coordinates["%s%s" % (x, y)]["y"] -= 8 coordinates["%s%s" % (x, y)]["z"] += 25 #75 # ### Connecting robot and modules # In[7]: robot.connect() robot.discover_modules() # ### Custom function to get ordered jason # In[8]: #Pass the json above as an argument and return it as an ordered list def order_json(unord_json): coords = [] for i in sorted(unord_json, key=lambda x: (int(x[1:]), x[0])): coords.append(unord_json[i]) return coords # In[9]: