def __init__(self, name, protocol, hardware, loop, broker, motion_lock): self._broker = broker self._default_logger = self._broker.logger self._sim_logger = self._broker.logger.getChild('sim') self._run_logger = self._broker.logger.getChild('run') self._loop = loop self.name = name self._protocol = protocol self.api_level = getattr(self._protocol, 'api_level', APIVersion(2, 0)) self._use_v2 = self.api_level >= APIVersion(2, 0) log.info(f'Protocol API Version: {self.api_level}; ' f'Protocol kind: {type(self._protocol)}') # self.metadata is exposed via rpc self.metadata = getattr(self._protocol, 'metadata', {}) self._hardware = hardware self._simulating_ctx = ProtocolContext.build_using(self._protocol, loop=self._loop, broker=self._broker) self.state: 'State' = None #: The current state self.stateInfo: 'StateInfo' = {} #: A message associated with the current state self.commands = [] self.command_log = {} self.errors = [] self._containers = [] self._instruments = [] self._modules = [] self._interactions = [] self.instruments = None self.containers = None self.modules = None self.protocol_text = protocol.text self.startTime: Optional[float] = None self._motion_lock = motion_lock self._event_watcher = None self.door_state: Optional[str] = None self.blocked: Optional[bool] = None self._run_lock: Lock = Lock() self._is_running: Event = Event()
def test_select_next_tip(): labware_name = 'opentrons_96_tiprack_300ul' labware_def = labware.get_labware_definition(labware_name) tiprack = labware.Labware(labware_def, Location(Point(0, 0, 0), 'Test Slot')) well_list = tiprack.wells() next_one = tiprack.next_tip() assert next_one == well_list[0] next_five = tiprack.next_tip(5) assert next_five == well_list[0] next_eight = tiprack.next_tip(8) assert next_eight == well_list[0] next_nine = tiprack.next_tip(9) assert next_nine is None # A1 tip only has been used tiprack.use_tips(well_list[0]) next_one = tiprack.next_tip() assert next_one == well_list[1] next_five = tiprack.next_tip(5) assert next_five == well_list[1] next_eight = tiprack.next_tip(8) assert next_eight == well_list[8] # 2nd column has also been used tiprack.use_tips(well_list[8], num_channels=8) next_one = tiprack.next_tip() assert next_one == well_list[1] next_five = tiprack.next_tip(5) assert next_five == well_list[1] next_eight = tiprack.next_tip(8) assert next_eight == well_list[16] # Bottom 4 tips of 1rd column are also used tiprack.use_tips(well_list[4], num_channels=4) next_one = tiprack.next_tip() assert next_one == well_list[1] next_three = tiprack.next_tip(3) assert next_three == well_list[1] next_five = tiprack.next_tip(5) assert next_five == well_list[16] next_eight = tiprack.next_tip(8) assert next_eight == well_list[16] # you can reuse tips infinitely on api level 2.2 tiprack.use_tips(well_list[0]) tiprack.use_tips(well_list[0]) # you can't on api level 2.1 or previous early_tr = labware.Labware(labware_def, Location(Point(0, 0, 0), 'Test Slot'), api_level=APIVersion(2, 1)) early_tr.use_tips(well_list[0]) with pytest.raises(AssertionError): early_tr.use_tips(well_list[0])
async def test_too_high_version(main_router): minor_over = APIVersion(MAX_SUPPORTED_VERSION.major, MAX_SUPPORTED_VERSION.minor + 1) minor_over_mdata = {'apiLevel': str(minor_over)} proto = 'metadata=' + str(minor_over_mdata) + """ def run(ctx): pass """ with pytest.raises(RuntimeError): main_router.session_manager.create(name='<blank>', contents=proto)
def _check_version_wrapper(*args, **kwargs): slf = args[0] added_in = decorated_obj.__opentrons_version_added # type: ignore current_version = slf._api_version if current_version >= APIVersion(2, 0)\ and current_version < added_in: raise APIVersionError( f'{decorated_obj} was added in {added_in}, but your ' f'protocol requested version {current_version}. You ' f'must increase your API version to {added_in} to ' 'use this functionality.') return decorated_obj(*args, **kwargs)
def test_multichannel_transfer_old_version(loop): # for API version below 2.2, multichannel pipette can only # reach row A of 384-well plates ctx = papi.ProtocolContext(loop, api_version=APIVersion(2, 1)) lw1 = ctx.load_labware('biorad_96_wellplate_200ul_pcr', 1) lw2 = ctx.load_labware('corning_384_wellplate_112ul_flat', 2) tiprack = ctx.load_labware('opentrons_96_tiprack_300ul', 3) instr_multi = ctx.load_instrument('p300_multi', Mount.LEFT, tip_racks=[tiprack]) xfer_plan = tx.TransferPlan( 100, lw1.rows()[0][0], [lw2.rows()[0][1], lw2.rows()[1][1]], instr_multi, max_volume=instr_multi.hw_pipette['working_volume'], api_version=ctx.api_version) xfer_plan_list = [] for step in xfer_plan: xfer_plan_list.append(step) exp1 = [{ 'method': 'pick_up_tip', 'args': [], 'kwargs': {} }, { 'method': 'aspirate', 'args': [100, lw1.wells_by_name()['A1'], 1.0], 'kwargs': {} }, { 'method': 'dispense', 'args': [100, lw2.wells_by_index()['A2'], 1.0], 'kwargs': {} }, { 'method': 'drop_tip', 'args': [], 'kwargs': {} }] assert xfer_plan_list == exp1 # target without row limit with pytest.raises(IndexError): xfer_plan = tx.TransferPlan( 100, lw1.rows()[0][1], lw2.rows()[1][1], instr_multi, max_volume=instr_multi.hw_pipette['working_volume'], api_version=ctx.api_version) xfer_plan_list = [] for step in xfer_plan: xfer_plan_list.append(step)
def test_parse_bundle_details(get_bundle_fixture): fixture = get_bundle_fixture('simple_bundle') filename = fixture['filename'] parsed = parse(fixture['binary_zipfile'], filename) assert isinstance(parsed, PythonProtocol) assert parsed.filename == 'protocol.ot2.py' assert parsed.bundled_labware == fixture['bundled_labware'] assert parsed.bundled_python == fixture['bundled_python'] assert parsed.bundled_data == fixture['bundled_data'] assert parsed.metadata == fixture['metadata'] assert parsed.api_level == APIVersion(2, 0)
def engage(self, height: float = None, offset: float = None, height_from_base: float = None): """ Raise the Magnetic Module's magnets. The destination of the magnets can be specified in several different ways, based on internally stored default heights for labware: - If neither ``height``, ``height_from_base`` nor ``offset`` is specified, the magnets will raise to a reasonable default height based on the specified labware. - The recommended way to adjust the height of the magnets is to specify ``height_from_base``, which should be a distance in mm relative to the base of the labware that is on the magnetic module - If ``height`` is specified, it should be a distance in mm from the home position of the magnets. - If ``offset`` is specified, it should be an offset in mm from the default position. A positive number moves the magnets higher and a negative number moves the magnets lower. Only certain labwares have defined engage heights for the Magnetic Module. If a labware that does not have a defined engage height is loaded on the Magnetic Module (or if no labware is loaded), then ``height`` or ``height_from_labware`` must be specified. :param height_from_base: The height to raise the magnets to, in mm from the base of the labware :param height: The height to raise the magnets to, in mm from home. :param offset: An offset relative to the default height for the labware in mm .. versionadded:: 2.1 The *height_from_base* parameter. """ if height is not None: dist = height elif height_from_base is not None and\ self._ctx._api_version >= APIVersion(2, 2): dist = height_from_base +\ modules.magdeck.OFFSET_TO_LABWARE_BOTTOM[self._module.model()] elif self.labware and self.labware.magdeck_engage_height is not None: dist = self._determine_lw_engage_height() if offset: dist += offset else: raise ValueError( "Currently loaded labware {} does not have a known engage " "height; please specify explicitly with the height param". format(self.labware)) self._module.engage(dist)
def __init__(self, name, protocol, hardware, loop, broker, motion_lock): self._broker = broker self._default_logger = self._broker.logger self._sim_logger = self._broker.logger.getChild('sim') self._run_logger = self._broker.logger.getChild('run') self._loop = loop self.name = name self._protocol = protocol self.api_level = getattr(self._protocol, 'api_level', APIVersion(2, 0)) self._use_v2 = self.api_level >= APIVersion(2, 0) log.info(f'Protocol API Version: {self.api_level}; ' f'Protocol kind: {type(self._protocol)}') # self.metadata is exposed via rpc self.metadata = getattr(self._protocol, 'metadata', {}) self._hardware = hardware self._simulating_ctx = ProtocolContext.build_using(self._protocol, loop=self._loop, broker=self._broker) self.state = None self.commands = [] self.command_log = {} self.errors = [] self._containers = [] self._instruments = [] self._modules = [] self._interactions = [] self.instruments = None self.containers = None self.modules = None self.protocol_text = protocol.text self.startTime = None self._motion_lock = motion_lock
def test_build_edges(): lw_def = get_labware_definition('corning_96_wellplate_360ul_flat') test_lw = Labware(lw_def, Location(Point(0, 0, 0), None)) off = Point(0, 0, 1.0) deck = Deck() old_correct_edges = [ test_lw['A1']._from_center_cartesian(x=1.0, y=0, z=1) + off, test_lw['A1']._from_center_cartesian(x=-1.0, y=0, z=1) + off, test_lw['A1']._from_center_cartesian(x=0, y=1.0, z=1) + off, test_lw['A1']._from_center_cartesian(x=0, y=-1.0, z=1) + off, ] res = build_edges(test_lw['A1'], 1.0, APIVersion(2, 2), Mount.RIGHT, deck) assert res == old_correct_edges new_correct_edges = [ test_lw['A1']._from_center_cartesian(x=1.0, y=0, z=1) + off, test_lw['A1']._from_center_cartesian(x=-1.0, y=0, z=1) + off, test_lw['A1']._from_center_cartesian(x=0, y=0, z=1) + off, test_lw['A1']._from_center_cartesian(x=0, y=1.0, z=1) + off, test_lw['A1']._from_center_cartesian(x=0, y=-1.0, z=1) + off, ] res2 = build_edges(test_lw['A1'], 1.0, APIVersion(2, 4), Mount.RIGHT, deck) assert res2 == new_correct_edges
def build_edges( where: 'Well', offset: float, mount: top_types.Mount, deck: 'Deck', radius: float = 1.0, version: APIVersion = APIVersion(2, 7) ) -> List[top_types.Point]: # Determine the touch_tip edges/points offset_pt = top_types.Point(0, 0, offset) edge_list = EdgeList( right=where._from_center_cartesian(x=radius, y=0, z=1) + offset_pt, left=where._from_center_cartesian(x=-radius, y=0, z=1) + offset_pt, center=where._from_center_cartesian(x=0, y=0, z=1) + offset_pt, up=where._from_center_cartesian(x=0, y=radius, z=1) + offset_pt, down=where._from_center_cartesian(x=0, y=-radius, z=1) + offset_pt) if version < APIVersion(2, 4): edge_list.center = None # Add the center value before switching axes return [edge for edge in astuple(edge_list) if edge] new_edges = determine_edge_path(where, mount, edge_list, deck) return [edge for edge in astuple(new_edges) if edge]
def use_tips(self, start_well: Well, num_channels: int = 1): """ Removes tips from the tip tracker. This method should be called when a tip is picked up. Generally, it will be called with `num_channels=1` or `num_channels=8` for single- and multi-channel respectively. If picking up with more than one channel, this method will automatically determine which tips are used based on the start well, the number of channels, and the geometry of the tiprack. :param start_well: The :py:class:`.Well` from which to pick up a tip. For a single-channel pipette, this is the well to send the pipette to. For a multi-channel pipette, this is the well to send the back-most nozzle of the pipette to. :type start_well: :py:class:`.Well` :param num_channels: The number of channels for the current pipette :type num_channels: int """ assert num_channels > 0, 'Bad call to use_tips: num_channels<=0' # Select the column of the labware that contains the target well target_column: List[Well] = [ col for col in self.columns() if start_well in col ][0] well_idx = target_column.index(start_well) # Number of tips to pick up is the lesser of (1) the number of tips # from the starting well to the end of the column, and (2) the number # of channels of the pipette (so a 4-channel pipette would pick up a # max of 4 tips, and picking up from the 2nd-to-bottom well in a # column would get a maximum of 2 tips) num_tips = min(len(target_column) - well_idx, num_channels) target_wells = target_column[well_idx:well_idx + num_tips] # In API version 2.2, we no longer reset the tip tracker when a tip # is dropped back into a tiprack well. This fixes a behavior where # subsequent transfers would reuse the dirty tip. However, sometimes # the user explicitly wants to use a dirty tip, and this check would # raise an exception if they tried to do so. # An extension of work here is to have separate tip trackers for # dirty tips and non-present tips; but until then, we can avoid the # exception. if self._api_version < APIVersion(2, 2): assert all([well.has_tip for well in target_wells]),\ '{} is out of tips'.format(str(self)) for well in target_wells: well.has_tip = False
def test_get_parent_identifier(): labware_name = 'corning_96_wellplate_360ul_flat' labware_def = labware.get_labware_definition(labware_name) lw = labware.Labware(labware_def, Location(Point(0, 0, 0), 'Test Slot')) # slots have no parent identifier assert labware._get_parent_identifier(lw) == '' # modules do mmg = ModuleGeometry('my magdeck', MagneticModuleModel.MAGNETIC_V1, ModuleType.MAGNETIC, Point(0, 0, 0), 10, 10, Location(Point(1, 2, 3), '3'), APIVersion(2, 4)) lw = labware.Labware(labware_def, mmg.location) assert labware._get_parent_identifier(lw)\ == MagneticModuleModel.MAGNETIC_V1.value
def test_parse_python_details(protocol, protocol_text_kind, filename, protocol_file): if protocol_text_kind == 'bytes': text = protocol.text.encode('utf-8') else: text = protocol.text if filename == 'real': fake_fname = protocol.filename else: fake_fname = None parsed = parse(text, fake_fname) assert isinstance(parsed, PythonProtocol) assert parsed.text == protocol.text assert isinstance(parsed.text, str) fname = fake_fname if fake_fname else '<protocol>' assert parsed.filename == fname if '2' in protocol.filename: assert parsed.api_level == APIVersion(2, 0) assert parsed.metadata == { 'protocolName': 'Testosaur', 'author': 'Opentrons <*****@*****.**>', 'description': 'A variant on "Dinosaur" for testing', 'source': 'Opentrons Repository', 'apiLevel': '2.0' } else: assert parsed.api_level == APIVersion(1, 0) assert parsed.metadata == { 'protocolName': 'Testosaur', 'author': 'Opentrons <*****@*****.**>', 'description': 'A variant on "Dinosaur" for testing', 'source': 'Opentrons Repository', } assert parsed.contents == compile(protocol.text, filename=fname, mode='exec')
def _is_valid_row(self, well: Union[Well, types.Location]): if isinstance(well, types.Location): test_well: Well = well.labware # type: ignore else: test_well = well if self._api_version < APIVersion(2, 2): return test_well in test_well.parent.rows()[0] else: # Allow the first 2 rows to be accessible to 384-well plates; # otherwise, only the first row is accessible if test_well.parent.parameters['format'] == '384Standard': valid_wells = [ well for row in test_well.parent.rows()[:2] for well in row ] return test_well in valid_wells else: return test_well in test_well.parent.rows()[0]
def _find_value_for_api_version(for_version: APIVersion, values: Dict[str, float]) -> float: """ Parse a dict that looks like {"2.0": 5, "2.5": 4} (aka the flow rate values from pipette config) and return the value for the highest api level that is at or underneath ``for_version`` """ sorted_versions = sorted( {APIVersion.from_string(k): v for k, v in values.items()}) last = values[str(sorted_versions[0])] for version in sorted_versions: if version > for_version: break last = values[str(version)] return last
def run_protocol(protocol: Protocol, context: ProtocolContext): """ Run a protocol. :param protocol: The :py:class:`.protocols.types.Protocol` to execute :param context: The context to use. """ if isinstance(protocol, PythonProtocol): if protocol.api_level >= APIVersion(2, 0): _run_python(protocol, context) else: raise RuntimeError( f'Unsupported python API version: {protocol.api_level}' ) else: if protocol.contents['schemaVersion'] == 3: ins = execute_v3.load_pipettes_from_json( context, protocol.contents) lw = execute_v3.load_labware_from_json_defs( context, protocol.contents) execute_v3.dispatch_json(context, protocol.contents, ins, lw) elif protocol.contents['schemaVersion'] == 4: # reuse the v3 fns for loading labware and pipettes # b/c the v4 protocol has no changes for these keys ins = execute_v3.load_pipettes_from_json( context, protocol.contents) modules = execute_v4.load_modules_from_json( context, protocol.contents) lw = execute_v4.load_labware_from_json_defs( context, protocol.contents, modules) execute_v4.dispatch_json( context, protocol.contents, ins, lw, modules, pipette_command_map, magnetic_module_command_map, temperature_module_command_map, thermocycler_module_command_map) else: raise RuntimeError( f'Unsupported JSON protocol schema: {protocol.schema_version}')
def test_touch_tip_default_args(loop, monkeypatch): ctx = papi.ProtocolContext(loop, api_version=APIVersion(2, 3)) ctx.home() lw = ctx.load_labware( 'opentrons_24_tuberack_eppendorf_1.5ml_safelock_snapcap', 1) tiprack = ctx.load_labware('opentrons_96_tiprack_300ul', 3) instr = ctx.load_instrument('p300_single', Mount.RIGHT, tip_racks=[tiprack]) instr.pick_up_tip() total_hw_moves = [] async def fake_hw_move(self, mount, abs_position, speed=None, critical_point=None, max_speeds=None): nonlocal total_hw_moves total_hw_moves.append((abs_position, speed)) instr.aspirate(10, lw.wells()[0]) monkeypatch.setattr(API, 'move_to', fake_hw_move) instr.touch_tip() z_offset = Point(0, 0, 1) # default z offset of 1mm speed = 60 # default speed edges = [ lw.wells()[0]._from_center_cartesian(1, 0, 1) - z_offset, lw.wells()[0]._from_center_cartesian(-1, 0, 1) - z_offset, lw.wells()[0]._from_center_cartesian(0, 1, 1) - z_offset, lw.wells()[0]._from_center_cartesian(0, -1, 1) - z_offset ] for i in range(1, 5): assert total_hw_moves[i] == (edges[i - 1], speed) # Check that the old api version initial well move has the same z height # as the calculated edges. total_hw_moves.clear() instr.touch_tip(v_offset=1) assert total_hw_moves[0][0].z != total_hw_moves[1][0].z
def run_protocol(protocol: Protocol, context: ProtocolContext): """ Run a protocol. :param protocol: The :py:class:`.protocols.types.Protocol` to execute :param context: The context to use. """ if isinstance(protocol, PythonProtocol): if protocol.api_level >= APIVersion(2, 0): _run_python(protocol, context) else: raise RuntimeError( f'Unsupported python API version: {protocol.api_level}') else: if protocol.schema_version == 3: ins = execute_v3.load_pipettes_from_json(context, protocol.contents) lw = execute_v3.load_labware_from_json_defs( context, protocol.contents) execute_v3.dispatch_json(context, protocol.contents, ins, lw) else: raise RuntimeError( f'Unsupported JSON protocol schema: {protocol.schema_version}')
def _determine_lw_engage_height(self) -> float: """ Return engage height based on Protocol API and module versions For API Version 2.3 or later: - Multiply non-standard labware engage heights by 2 for gen1 modules - Divide standard labware engage heights by 2 for gen2 modules If none of the above, return the labware engage heights as defined in the labware definitions """ assert self.labware, self.labware.magdeck_engage_height engage_height = self.labware.magdeck_engage_height is_api_breakpoint = (self._ctx._api_version >= APIVersion(2, 3)) is_v1_module = (self._module.model() == 'magneticModuleV1') is_standard_lw = self.labware.load_name in STANDARD_MAGDECK_LABWARE if is_api_breakpoint and is_v1_module and not is_standard_lw: return engage_height * ENGAGE_HEIGHT_UNIT_CNV elif is_api_breakpoint and not is_v1_module and is_standard_lw: return engage_height / ENGAGE_HEIGHT_UNIT_CNV else: return engage_height
def load_labware( self, name: str, label: str = None, namespace: str = None, version: int = 1, ) -> Labware: """ Specify the presence of a piece of labware on the module. :param name: The name of the labware object. :param str label: An optional special name to give the labware. If specified, this is the name the labware will appear as in the run log and the calibration view in the Opentrons app. .. versionadded:: 2.1 :param str namespace: The namespace the labware definition belongs to. If unspecified, will search 'opentrons' then 'custom_beta' .. versionadded:: 2.1 :param int version: The version of the labware definition. If unspecified, will use version 1. .. versionadded:: 2.1 :returns: The initialized and loaded labware object. """ if self.api_version < APIVersion(2, 1) and\ (label or namespace or version): MODULE_LOG.warning( f'You have specified API {self.api_version}, but you ' 'are trying to utilize new load_labware parameters in 2.1') lw = load(name, self._geometry.location, label, namespace, version, bundled_defs=self._ctx._bundled_labware, extra_defs=self._ctx._extra_labware) return self.load_labware_object(lw)
right_pip_edges = [ test_lw2['A12']._from_center_cartesian(x=1.0, y=0, z=1) + off, test_lw2['A12']._from_center_cartesian(x=-1.0, y=0, z=1) + off, test_lw2['A12']._from_center_cartesian(x=0, y=0, z=1) + off, test_lw2['A12']._from_center_cartesian(x=0, y=1.0, z=1) + off, test_lw2['A12']._from_center_cartesian(x=0, y=-1.0, z=1) + off, ] # Test that labware in slot 6 results in unmodified edge list res2 = build_edges(test_lw2['A12'], 1.0, APIVersion(2, 4), Mount.RIGHT, ctx._deck_layout) assert res2 == right_pip_edges @pytest.mark.parametrize('data,level,desired', [({ '2.0': 5 }, APIVersion(2, 0), 5), ({ '2.0': 5 }, APIVersion(2, 5), 5), ({ '2.6': 4, '2.0': 5 }, APIVersion(2, 1), 5), ({ '2.6': 4, '2.0': 5 }, APIVersion(2, 6), 4), ({ '2.0': 5, '2.6': 4 }, APIVersion(2, 3), 5), ({ '2.0': 5, '2.6': 4 }, APIVersion(2, 6), 4)]) def test_find_value_for_api_version(data, level, desired):
def test_multichannel_transfer_locs(loop): ctx = papi.ProtocolContext(loop, api_version=APIVersion(2, 2)) lw1 = ctx.load_labware('biorad_96_wellplate_200ul_pcr', 1) lw2 = ctx.load_labware('corning_384_wellplate_112ul_flat', 2) tiprack = ctx.load_labware('opentrons_96_tiprack_300ul', 3) instr_multi = ctx.load_instrument('p300_multi', Mount.LEFT, tip_racks=[tiprack]) # targets within row limit xfer_plan = tx.TransferPlan( 100, lw1.rows()[0][1], lw2.rows()[1][1], instr_multi, max_volume=instr_multi.hw_pipette['working_volume'], api_version=ctx.api_version) xfer_plan_list = [] for step in xfer_plan: xfer_plan_list.append(step) exp1 = [{ 'method': 'pick_up_tip', 'args': [], 'kwargs': {} }, { 'method': 'aspirate', 'args': [100, lw1.wells_by_name()['A2'], 1.0], 'kwargs': {} }, { 'method': 'dispense', 'args': [100, lw2.wells_by_index()['B2'], 1.0], 'kwargs': {} }, { 'method': 'drop_tip', 'args': [], 'kwargs': {} }] assert xfer_plan_list == exp1 # targets outside of row limit will be skipped xfer_plan = tx.TransferPlan( 100, lw1.rows()[0][1], [lw2.rows()[1][1], lw2.rows()[2][1]], instr_multi, max_volume=instr_multi.hw_pipette['working_volume'], api_version=ctx.api_version) xfer_plan_list = [] for step in xfer_plan: xfer_plan_list.append(step) assert xfer_plan_list == exp1 # no valid source or targets, raise error with pytest.raises(RuntimeError): assert tx.TransferPlan( 100, lw1.rows()[0][1], lw2.rows()[2][1], instr_multi, max_volume=instr_multi.hw_pipette['working_volume'], api_version=ctx.api_version)
def test_home_plunger(monkeypatch): ctx = papi.ProtocolContext(api_version=APIVersion(2, 0)) ctx.home() instr = ctx.load_instrument('p1000_single', 'left') instr.home_plunger()
def simulate( protocol_file: TextIO, file_name: str = None, custom_labware_paths: List[str] = None, custom_data_paths: List[str] = None, propagate_logs: bool = False, hardware_simulator_file_path: str = None, log_level: str = 'warning' ) -> Tuple[List[Mapping[str, Any]], Optional[BundleContents]]: """ Simulate the protocol itself. This is a one-stop function to simulate a protocol, whether python or json, no matter the api version, from external (i.e. not bound up in other internal server infrastructure) sources. To simulate an opentrons protocol from other places, pass in a file like object as protocol_file; this function either returns (if the simulation has no problems) or raises an exception. To call from the command line use either the autogenerated entrypoint ``opentrons_simulate`` (``opentrons_simulate.exe``, on windows) or ``python -m opentrons.simulate``. The return value is the run log, a list of dicts that represent the commands executed by the robot; and either the contents of the protocol that would be required to bundle, or ``None``. Each dict element in the run log has the following keys: - ``level``: The depth at which this command is nested - if this an aspirate inside a mix inside a transfer, for instance, it would be 3. - ``payload``: The command, its arguments, and how to format its text. For more specific details see :py:mod:`opentrons.commands`. To format a message from a payload do ``payload['text'].format(**payload)``. - ``logs``: Any log messages that occurred during execution of this command, as a logging.LogRecord :param file-like protocol_file: The protocol file to simulate. :param str file_name: The name of the file :param custom_labware_paths: A list of directories to search for custom labware, or None. Ignored if the apiv2 feature flag is not set. Loads valid labware from these paths and makes them available to the protocol context. :param custom_data_paths: A list of directories or files to load custom data files from. Ignored if the apiv2 feature flag if not set. Entries may be either files or directories. Specified files and the non-recursive contents of specified directories are presented by the protocol context in :py:attr:`.ProtocolContext.bundled_data`. :param hardware_simulator_file_path: A path to a JSON file defining a hardware simulator. :param propagate_logs: Whether this function should allow logs from the Opentrons stack to propagate up to the root handler. This can be useful if you're integrating this function in a larger application, but most logs that occur during protocol simulation are best associated with the actions in the protocol that cause them. Default: ``False`` :type propagate_logs: bool :param log_level: The level of logs to capture in the runlog. Default: ``'warning'`` :type log_level: 'debug', 'info', 'warning', or 'error' :returns: A tuple of a run log for user output, and possibly the required data to write to a bundle to bundle this protocol. The bundle is only emitted if bundling is allowed (see :py:meth:`allow_bundling`) and this is an unbundled Protocol API v2 python protocol. In other cases it is None. """ stack_logger = logging.getLogger('opentrons') stack_logger.propagate = propagate_logs contents = protocol_file.read() if custom_labware_paths: extra_labware = labware_from_paths(custom_labware_paths) else: extra_labware = {} if custom_data_paths: extra_data = datafiles_from_paths(custom_data_paths) else: extra_data = {} hardware_simulator = None if hardware_simulator_file_path: hardware_simulator = asyncio.get_event_loop().run_until_complete( load_simulator(pathlib.Path(hardware_simulator_file_path))) protocol = parse.parse(contents, file_name, extra_labware=extra_labware, extra_data=extra_data) bundle_contents: Optional[BundleContents] = None if getattr(protocol, 'api_level', APIVersion(2, 0)) < APIVersion(2, 0): def _simulate_v1(): opentrons.robot.disconnect() opentrons.robot.reset() scraper = CommandScraper(stack_logger, log_level, opentrons.robot.broker) exec(protocol.contents, {}) # type: ignore return scraper scraper = _simulate_v1() else: # we want a None literal rather than empty dict so get_protocol_api # will look for custom labware if this is a robot gpa_extras = getattr(protocol, 'extra_labware', None) or None context = get_protocol_api( getattr(protocol, 'api_level', MAX_SUPPORTED_VERSION), bundled_labware=getattr(protocol, 'bundled_labware', None), bundled_data=getattr(protocol, 'bundled_data', None), hardware_simulator=hardware_simulator, extra_labware=gpa_extras) scraper = CommandScraper(stack_logger, log_level, context.broker) try: execute.run_protocol(protocol, context) if isinstance(protocol, PythonProtocol)\ and protocol.api_level >= APIVersion(2, 0)\ and protocol.bundled_labware is None\ and allow_bundle(): bundle_contents = bundle_from_sim(protocol, context) finally: context.cleanup() return scraper.commands, bundle_contents
def execute(protocol_file: TextIO, protocol_name: str, propagate_logs: bool = False, log_level: str = 'warning', emit_runlog: Callable[[Dict[str, Any]], None] = None, custom_labware_paths: List[str] = None, custom_data_paths: List[str] = None): """ Run the protocol itself. This is a one-stop function to run a protocol, whether python or json, no matter the api verson, from external (i.e. not bound up in other internal server infrastructure) sources. To run an opentrons protocol from other places, pass in a file like object as protocol_file; this function either returns (if the run has no problems) or raises an exception. To call from the command line use either the autogenerated entrypoint ``opentrons_execute`` or ``python -m opentrons.execute``. If the protocol is using Opentrons Protocol API V1, it does not need to explicitly call :py:meth:`.Robot.connect` or :py:meth:`.Robot.discover_modules`, or :py:meth:`.Robot.cache_instrument_models`. :param file-like protocol_file: The protocol file to execute :param str protocol_name: The name of the protocol file. This is required internally, but it may not be a thing we can get from the protocol_file argument. :param propagate_logs: Whether this function should allow logs from the Opentrons stack to propagate up to the root handler. This can be useful if you're integrating this function in a larger application, but most logs that occur during protocol simulation are best associated with the actions in the protocol that cause them. Default: ``False`` :type propagate_logs: bool :param log_level: The level of logs to emit on the command line.. Default: 'warning' :type log_level: 'debug', 'info', 'warning', or 'error' :param emit_runlog: A callback for printing the runlog. If specified, this will be called whenever a command adds an entry to the runlog, which can be used for display and progress estimation. If specified, the callback should take a single argument (the name doesn't matter) which will be a dictionary (see below). Default: ``None`` :param custom_labware_paths: A list of directories to search for custom labware, or None. Ignored if the apiv2 feature flag is not set. Loads valid labware from these paths and makes them available to the protocol context. :param custom_data_paths: A list of directories or files to load custom data files from. Ignored if the apiv2 feature flag if not set. Entries may be either files or directories. Specified files and the non-recursive contents of specified directories are presented by the protocol context in :py:attr:`.ProtocolContext.bundled_data`. The format of the runlog entries is as follows: .. code-block:: python { 'name': command_name, 'payload': { 'text': string_command_text, # The rest of this struct is command-dependent; see # opentrons.commands.commands. Its keys match format # keys in 'text', so that # entry['payload']['text'].format(**entry['payload']) # will produce a string with information filled in } } """ stack_logger = logging.getLogger('opentrons') stack_logger.propagate = propagate_logs stack_logger.setLevel(getattr(logging, log_level.upper(), logging.WARNING)) contents = protocol_file.read() if custom_labware_paths: extra_labware = labware_from_paths(custom_labware_paths) else: extra_labware = {} if custom_data_paths: extra_data = datafiles_from_paths(custom_data_paths) else: extra_data = {} protocol = parse(contents, protocol_name, extra_labware=extra_labware, extra_data=extra_data) if getattr(protocol, 'api_level', APIVersion(2, 0)) < APIVersion(2, 0): opentrons.robot.connect() opentrons.robot.cache_instrument_models() opentrons.robot.discover_modules() opentrons.robot.home() if emit_runlog: opentrons.robot.broker.subscribe(commands.command_types.COMMAND, emit_runlog) assert isinstance(protocol, PythonProtocol),\ 'Internal error: Only Python protocols may be executed in v1' exec(protocol.contents, {}) else: bundled_data = getattr(protocol, 'bundled_data', {}) bundled_data.update(extra_data) gpa_extras = getattr(protocol, 'extra_labware', None) or None context = get_protocol_api(getattr(protocol, 'api_level', MAX_SUPPORTED_VERSION), bundled_labware=getattr( protocol, 'bundled_labware', None), bundled_data=bundled_data, extra_labware=gpa_extras) if emit_runlog: context.broker.subscribe(commands.command_types.COMMAND, emit_runlog) context.home() try: execute_apiv2.run_protocol(protocol, context) finally: context.cleanup()
} print('wat?') metadata['hello'] = 'moon' fakedata['what?'] = 'ham' """ parsed = ast.parse(prot, filename='testy', mode='exec') metadata = extract_metadata(parsed) assert metadata == expected infer_version_cases = [(""" from opentrons import instruments p = instruments.P10_Single(mount='right') """, APIVersion(1, 0)), (""" import opentrons.instruments p = instruments.P10_Single(mount='right') """, APIVersion(1, 0)), (""" from opentrons import instruments as instr p = instr.P10_Single(mount='right') """, APIVersion(1, 0)), (""" from opentrons import instruments metadata = { 'apiLevel': '1'
def _check_valid_well_list(self, well_list, id, old_well_list): if self._api_version >= APIVersion(2, 2) and len(well_list) < 1: raise RuntimeError( f"Invalid {id} for multichannel transfer: {old_well_list}")
def load_module( self, module_name: str, location: Optional[types.DeckLocation] = None, configuration: str = None) -> ModuleTypes: """ Load a module onto the deck given its name. This is the function to call to use a module in your protocol, like :py:meth:`load_instrument` is the method to call to use an instrument in your protocol. It returns the created and initialized module context, which will be a different class depending on the kind of module loaded. A map of deck positions to loaded modules can be accessed later using :py:attr:`loaded_modules`. :param str module_name: The name or model of the module. :param location: The location of the module. This is usually the name or number of the slot on the deck where you will be placing the module. Some modules, like the Thermocycler, are only valid in one deck location. You do not have to specify a location when loading a Thermocycler - it will always be in Slot 7. :param configuration: Used to specify the slot configuration of the Thermocycler. Only Valid in Python API Version 2.4 and later. If you wish to use the non-full plate configuration, you must pass in the key word value `semi` :type location: str or int or None :returns ModuleContext: The loaded and initialized :py:class:`ModuleContext`. """ resolved_model = resolve_module_model(module_name) resolved_type = resolve_module_type(resolved_model) resolved_location = self._deck_layout.resolve_module_location( resolved_type, location) if self._api_version < APIVersion(2, 4) and configuration: raise APIVersionError( f'You have specified API {self._api_version}, but you are' 'using thermocycler parameters only available in 2.4') geometry = load_module( resolved_model, self._deck_layout.position_for( resolved_location), self._api_version, configuration) hc_mod_instance = None mod_class = { ModuleType.MAGNETIC: MagneticModuleContext, ModuleType.TEMPERATURE: TemperatureModuleContext, ModuleType.THERMOCYCLER: ThermocyclerContext}[resolved_type] for mod in self._hw_manager.hardware.attached_modules: if models_compatible( module_model_from_string(mod.model()), resolved_model): hc_mod_instance = SynchronousAdapter(mod) break if self.is_simulating() and hc_mod_instance is None: mod_type = { ModuleType.MAGNETIC: modules.magdeck.MagDeck, ModuleType.TEMPERATURE: modules.tempdeck.TempDeck, ModuleType.THERMOCYCLER: modules.thermocycler.Thermocycler }[resolved_type] hc_mod_instance = SynchronousAdapter(mod_type( port='', simulating=True, loop=self._hw_manager.hardware.loop, execution_manager=ExecutionManager( loop=self._hw_manager.hardware.loop), sim_model=resolved_model.value)) hc_mod_instance._connect() if hc_mod_instance: mod_ctx = mod_class(self, hc_mod_instance, geometry, self.api_version, self._loop) else: raise RuntimeError( f'Could not find specified module: {module_name}') self._modules.add(mod_ctx) self._deck_layout[resolved_location] = geometry return mod_ctx
# Test that module in slot 1 results in modified edge list res = build_edges( test_lw['A1'], 1.0, Mount.RIGHT, ctx._deck_layout, version=APIVersion(2, 4)) assert res == right_pip_edges right_pip_edges = [ test_lw2['A12']._from_center_cartesian(x=1.0, y=0, z=1) + off, test_lw2['A12']._from_center_cartesian(x=-1.0, y=0, z=1) + off, test_lw2['A12']._from_center_cartesian(x=0, y=0, z=1) + off, test_lw2['A12']._from_center_cartesian(x=0, y=1.0, z=1) + off, test_lw2['A12']._from_center_cartesian(x=0, y=-1.0, z=1) + off, ] # Test that labware in slot 6 results in unmodified edge list res2 = build_edges( test_lw2['A12'], 1.0, Mount.RIGHT, ctx._deck_layout, version=APIVersion(2, 4)) assert res2 == right_pip_edges @pytest.mark.parametrize('data,level,desired', [ ({'2.0': 5}, APIVersion(2, 0), 5), ({'2.0': 5}, APIVersion(2, 5), 5), ({'2.6': 4, '2.0': 5}, APIVersion(2, 1), 5), ({'2.6': 4, '2.0': 5}, APIVersion(2, 6), 4), ({'2.0': 5, '2.6': 4}, APIVersion(2, 3), 5), ({'2.0': 5, '2.6': 4}, APIVersion(2, 6), 4) ]) def test_find_value_for_api_version(data, level, desired): assert _find_value_for_api_version(level, data) == desired
from opentrons import protocol_api from opentrons.protocols.types import APIVersion from opentrons_functions.transfer import add_buffer metadata = { 'apiLevel': '2.5', 'author': 'Jon Sanders'} api_version = APIVersion(2, 5) cols = ['A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8', 'A9', 'A10', 'A11', 'A12'] def run(protocol: protocol_api.ProtocolContext): # define deck positions and labware # tips tiprack_300 = protocol.load_labware('opentrons_96_tiprack_300ul', 1) tiprack_10f = protocol.load_labware('opentrons_96_filtertiprack_10ul', 2) # plates reagents = protocol.load_labware('usascientific_12_reservoir_22ml', 6, 'reagents') assay = protocol.load_labware('corning_96_wellplate_360ul_flat', 5, 'assay') samples = protocol.load_labware('biorad_96_wellplate_200ul_pcr', 4, 'samples')