Esempio n. 1
0
def test_proto_with_exception(ensure_api2, loop):
    ctx = ProtocolContext(loop)
    exc_in_root = '''
def run(ctx):
    raise Exception("hi")
'''
    protocol = parse(exc_in_root)
    with pytest.raises(execute.ExceptionInProtocolError) as e:
        execute.run_protocol(protocol, context=ctx)
    assert 'Exception [line 3]: hi' in str(e.value)

    nested_exc = '''
import ast

def this_throws():
    raise Exception("hi")

def run(ctx):
    this_throws()
'''
    protocol = parse(nested_exc)
    with pytest.raises(execute.ExceptionInProtocolError) as e:
        execute.run_protocol(protocol, context=ctx)
    assert '[line 5]' in str(e.value)
    assert 'Exception [line 5]: hi' in str(e.value)
Esempio n. 2
0
def test_proto_with_exception(ensure_api2, loop):
    ctx = ProtocolContext(loop)
    exc_in_root = '''
def run(ctx):
    raise Exception("hi")
'''
    comped = compile(exc_in_root, 'test_file.py', 'exec')
    with pytest.raises(execute.ExceptionInProtocolError) as e:
        execute.run_protocol(protocol_code=comped, context=ctx)
    assert 'Exception [line 3]: hi' in str(e.value)

    nested_exc = '''
import ast

def this_throws():
    raise Exception("hi")

def run(ctx):
    this_throws()
'''
    comped = compile(nested_exc, 'nested.py', 'exec')
    with pytest.raises(execute.ExceptionInProtocolError) as e:
        execute.run_protocol(protocol_code=comped, context=ctx)
    assert '[line 5]' in str(e.value)
    assert 'Exception [line 5]: hi' in str(e.value)
def test_papi_execute_json_v4(monkeypatch, loop, get_json_protocol_fixture):
    protocol_data = get_json_protocol_fixture('4', 'testModulesProtocol',
                                              False)
    protocol = parse(protocol_data, None)
    ctx = ProtocolContext(loop=loop)
    ctx.home()
    # Check that we end up executing the protocol ok
    execute.run_protocol(protocol, ctx)
Esempio n. 4
0
def test_papi_execute_json_v3(monkeypatch, loop, get_json_protocol_fixture):
    protocol_data = get_json_protocol_fixture(
        '3', 'testAllAtomicSingleV3', False)
    protocol = parse(protocol_data, None)
    ctx = ProtocolContext(loop=loop)
    ctx.home()
    # Check that we end up executing the protocol ok
    execute.run_protocol(protocol, True, ctx)
Esempio n. 5
0
    def _run(self):
        def on_command(message):
            if message['$'] == 'before':
                self.log_append()
            if message['name'] == command_types.PAUSE:
                self.set_state('paused')
            if message['name'] == command_types.RESUME:
                self.set_state('running')

        self._reset()

        _unsubscribe = self._broker.subscribe(command_types.COMMAND,
                                              on_command)

        self.startTime = now()
        self.set_state('running')

        try:
            self.resume()
            self._pre_run_hooks()
            if ff.use_protocol_api_v2():
                bundled_data = None
                bundled_labware = None
                if isinstance(self._protocol, PythonProtocol):
                    bundled_data = self._protocol.bundled_data
                    bundled_labware = self._protocol.bundled_labware
                self._hardware.cache_instruments()
                ctx = ProtocolContext(loop=self._loop,
                                      broker=self._broker,
                                      bundled_labware=bundled_labware,
                                      bundled_data=bundled_data)
                ctx.connect(self._hardware)
                ctx.home()
                run_protocol(self._protocol, context=ctx)
            else:
                self._hardware.broker = self._broker
                if isinstance(self._protocol, JsonProtocol):
                    execute_protocol(self._protocol)
                else:
                    exec(self._protocol.contents, {})
            self.set_state('finished')
            self._hardware.home()
        except Exception as e:
            log.exception("Exception during run:")
            self.error_append(e)
            self.set_state('error')
            raise e
        finally:
            _unsubscribe()
Esempio n. 6
0
def test_bad_protocol(loop):
    ctx = ProtocolContext(loop)

    no_args = parse('''
metadata={"apiLevel": "2.0"}
def run():
    pass
''')
    with pytest.raises(execute.MalformedProtocolError) as e:
        execute.run_protocol(no_args, context=ctx)
        assert "Function 'run()' does not take any parameters" in str(e.value)

    many_args = parse('''
metadata={"apiLevel": "2.0"}
def run(a, b):
    pass
''')
    with pytest.raises(execute.MalformedProtocolError) as e:
        execute.run_protocol(many_args, context=ctx)
        assert "must be called with more than one argument" in str(e.value)
Esempio n. 7
0
def test_bad_protocol(ensure_api2, loop):
    ctx = ProtocolContext(loop)
    with pytest.raises(execute.MalformedProtocolError) as e:
        execute.run_protocol(protocol_code='print("hi")', context=ctx)
        assert "No function 'run" in str(e.value)
    with pytest.raises(execute.MalformedProtocolError) as e:
        execute.run_protocol(protocol_code='def run(): pass', context=ctx)
        assert "Function 'run()' does not take any parameters" in str(e.value)
    with pytest.raises(execute.MalformedProtocolError) as e:
        execute.run_protocol(protocol_code='def run(a, b): pass', context=ctx)
        assert "must be called with more than one argument" in str(e.value)
Esempio n. 8
0
    def _run(self):
        def on_command(message):
            if message['$'] == 'before':
                self.log_append()
            if message['name'] == command_types.PAUSE:
                self.set_state('paused')
            if message['name'] == command_types.RESUME:
                self.set_state('running')

        self._reset()

        _unsubscribe = self._broker.subscribe(command_types.COMMAND,
                                              on_command)

        self.startTime = now()
        self.set_state('running')

        try:
            if self._use_v2:
                self.resume()
                self._pre_run_hooks()
                self._hardware.cache_instruments()
                ctx = ProtocolContext.build_using(self._protocol,
                                                  loop=self._loop,
                                                  broker=self._broker,
                                                  extra_labware=getattr(
                                                      self._protocol,
                                                      'extra_labware', {}))
                ctx.connect(self._hardware)
                ctx.home()
                run_protocol(self._protocol, context=ctx)
            else:
                robot.broker = self._broker
                assert isinstance(self._protocol, PythonProtocol),\
                    'Internal error: v1 should only be used for python'
                if not robot.is_connected():
                    robot.connect()
                self.resume()
                self._pre_run_hooks()
                robot.cache_instrument_models()
                robot.discover_modules()
                exec(self._protocol.contents, {})

            # If the last command in a protocol was a pause, the protocol
            # will immediately finish executing because there's no smoothie
            # command to block... except the home that's about to happen,
            # which will confuse the app and lock it up. So we need to
            # do our own pause here, and sleep the thread until/unless the
            # app resumes us.
            #
            # Cancelling from the app during this pause will result in the
            # smoothie giving us an error during the subsequent home, which
            # is tragic but expected.
            while self.state == 'paused':
                sleep(0.1)
            self.set_state('finished')
            self._hw_iface().home()
        except SmoothieAlarm:
            log.info("Protocol cancelled")
        except Exception as e:
            log.exception("Exception during run:")
            self.error_append(e)
            self.set_state('error')
            raise e
        finally:
            _unsubscribe()
Esempio n. 9
0
    def _simulate(self):
        self._reset()

        stack = []
        res = []
        commands = []

        self._containers.clear()
        self._instruments.clear()
        self._modules.clear()
        self._interactions.clear()

        def on_command(message):
            payload = message['payload']
            description = payload.get('text', '').format(**payload)

            if message['$'] == 'before':
                level = len(stack)

                stack.append(message)
                commands.append(payload)

                res.append({
                    'level': level,
                    'description': description,
                    'id': len(res)
                })
            else:
                stack.pop()

        unsubscribe = self._broker.subscribe(command_types.COMMAND, on_command)
        old_robot_connect = robot.connect

        try:
            # ensure actual pipettes are cached before driver is disconnected
            self._hardware.cache_instruments()
            if self._use_v2:
                instrs = {}
                for mount, pip in self._hardware.attached_instruments.items():
                    if pip:
                        instrs[mount] = {
                            'model': pip['model'],
                            'id': pip.get('pipette_id', '')
                        }
                sim = adapters.SynchronousAdapter.build(
                    API.build_hardware_simulator,
                    instrs,
                    [mod.name() for mod in self._hardware.attached_modules],
                    strict_attached_instruments=False)
                sim.home()
                self._simulating_ctx = ProtocolContext.build_using(
                    self._protocol,
                    loop=self._loop,
                    hardware=sim,
                    broker=self._broker,
                    extra_labware=getattr(self._protocol, 'extra_labware', {}))
                run_protocol(self._protocol, context=self._simulating_ctx)
            else:
                robot.broker = self._broker
                # we don't rely on being connected anymore so make sure we are
                robot.connect()
                robot.cache_instrument_models()
                robot.disconnect()

                def robot_connect_error(port=None, options=None):
                    raise RuntimeError(
                        'Protocols executed through the Opentrons App may not '
                        'use robot.connect(). Allowing this call would cause '
                        'the robot to execute commands during simulation, and '
                        'then raise an error on execution.')

                robot.connect = robot_connect_error
                exec(self._protocol.contents, {})
        finally:
            # physically attached pipettes are re-cached during robot.connect()
            # which is important, because during a simulation, the robot could
            # think that it holds a pipette model that it actually does not
            if not self._use_v2:
                robot.connect = old_robot_connect
                robot.connect()

            unsubscribe()

            instruments, containers, modules, interactions = _accumulate(
                [_get_labware(command) for command in commands])

            self._containers.extend(_dedupe(containers))
            self._instruments.extend(
                _dedupe(
                    instruments +
                    list(self._simulating_ctx.loaded_instruments.values())))
            self._modules.extend(
                _dedupe(modules + [
                    m._geometry
                    for m in self._simulating_ctx.loaded_modules.values()
                ]))
            self._interactions.extend(_dedupe(interactions))

            # Labware calibration happens after simulation and before run, so
            # we have to clear the tips if they are left on after simulation
            # to ensure that the instruments are in the expected state at the
            # beginning of the labware calibration flow
            if not self._use_v2:
                robot.clear_tips()

        return res
Esempio n. 10
0
def execute(protocol_file: TextIO,
            protocol_name: str,
            propagate_logs: bool = False,
            log_level: str = 'warning',
            emit_runlog: Callable[[Dict[str, Any]], None] = None,
            custom_labware_paths: List[str] = None,
            custom_data_paths: List[str] = None):
    """
    Run the protocol itself.

    This is a one-stop function to run a protocol, whether python or json,
    no matter the api verson, from external (i.e. not bound up in other
    internal server infrastructure) sources.

    To run an opentrons protocol from other places, pass in a file like
    object as protocol_file; this function either returns (if the run has no
    problems) or raises an exception.

    To call from the command line use either the autogenerated entrypoint
    ``opentrons_execute`` or ``python -m opentrons.execute``.

    If the protocol is using Opentrons Protocol API V1, it does not need to
    explicitly call :py:meth:`.Robot.connect`
    or :py:meth:`.Robot.discover_modules`, or
    :py:meth:`.Robot.cache_instrument_models`.

    :param file-like protocol_file: The protocol file to execute
    :param str protocol_name: The name of the protocol file. This is required
                              internally, but it may not be a thing we can get
                              from the protocol_file argument.
    :param propagate_logs: Whether this function should allow logs from the
                           Opentrons stack to propagate up to the root handler.
                           This can be useful if you're integrating this
                           function in a larger application, but most logs that
                           occur during protocol simulation are best associated
                           with the actions in the protocol that cause them.
                           Default: ``False``
    :type propagate_logs: bool
    :param log_level: The level of logs to emit on the command line.. Default:
                      'warning'
    :type log_level: 'debug', 'info', 'warning', or 'error'
    :param emit_runlog: A callback for printing the runlog. If specified, this
                        will be called whenever a command adds an entry to the
                        runlog, which can be used for display and progress
                        estimation. If specified, the callback should take a
                        single argument (the name doesn't matter) which will
                        be a dictionary (see below). Default: ``None``
    :param custom_labware_paths: A list of directories to search for custom
                                 labware, or None. Ignored if the apiv2 feature
                                 flag is not set. Loads valid labware from
                                 these paths and makes them available to the
                                 protocol context.
    :param custom_data_paths: A list of directories or files to load custom
                              data files from. Ignored if the apiv2 feature
                              flag if not set. Entries may be either files or
                              directories. Specified files and the
                              non-recursive contents of specified directories
                              are presented by the protocol context in
                              :py:attr:`.ProtocolContext.bundled_data`.
    The format of the runlog entries is as follows:

    .. code-block:: python

        {
            'name': command_name,
            'payload': {
                 'text': string_command_text,
                  # The rest of this struct is command-dependent; see
                  # opentrons.commands.commands. Its keys match format
                  # keys in 'text', so that
                  # entry['payload']['text'].format(**entry['payload'])
                  # will produce a string with information filled in
             }
        }


    """
    stack_logger = logging.getLogger('opentrons')
    stack_logger.propagate = propagate_logs
    stack_logger.setLevel(getattr(logging, log_level.upper(), logging.WARNING))
    contents = protocol_file.read()
    if custom_labware_paths:
        extra_labware = labware_from_paths(custom_labware_paths)
    else:
        extra_labware = {}
    if custom_data_paths:
        extra_data = datafiles_from_paths(custom_data_paths)
    else:
        extra_data = {}
    protocol = parse(contents,
                     protocol_name,
                     extra_labware=extra_labware,
                     extra_data=extra_data)
    if getattr(protocol, 'api_level', APIVersion(2, 0)) < APIVersion(2, 0):
        opentrons.robot.connect()
        opentrons.robot.cache_instrument_models()
        opentrons.robot.discover_modules()
        opentrons.robot.home()
        if emit_runlog:
            opentrons.robot.broker.subscribe(commands.command_types.COMMAND,
                                             emit_runlog)
        assert isinstance(protocol, PythonProtocol),\
            'Internal error: Only Python protocols may be executed in v1'
        exec(protocol.contents, {})
    else:
        bundled_data = getattr(protocol, 'bundled_data', {})
        bundled_data.update(extra_data)
        gpa_extras = getattr(protocol, 'extra_labware', None) or None
        context = get_protocol_api(getattr(protocol, 'api_level',
                                           MAX_SUPPORTED_VERSION),
                                   bundled_labware=getattr(
                                       protocol, 'bundled_labware', None),
                                   bundled_data=bundled_data,
                                   extra_labware=gpa_extras)
        if emit_runlog:
            context.broker.subscribe(commands.command_types.COMMAND,
                                     emit_runlog)
        context.home()
        try:
            execute_apiv2.run_protocol(protocol, context)
        finally:
            context.cleanup()
Esempio n. 11
0
def test_execute_ok(protocol, protocol_file, ensure_api2, loop):
    proto = compile(protocol.text, protocol.filename, 'exec')
    ctx = ProtocolContext(loop)
    execute.run_protocol(protocol_code=proto, context=ctx)
Esempio n. 12
0
def parse(protocol_path):
    if not protocol_path:
        print('No protocol path... something weird happened!')
        return {}
    print('Parsing protocol: {}'.format(protocol_path))

    fields_json_path = Path(protocol_path).parent / ('fields.json')
    has_fields = Path(fields_json_path).is_file()

    with open(protocol_path) as f:
        original_contents = f.read()

    fields = []
    contents = original_contents
    if has_fields:
        with open(fields_json_path) as f:
            fields = json.load(f)
            # for simulation, we need to add a get_values() fn that supplies
            # the default values
            default_values = {
                f['name']: get_default_field_value(f)
                for f in fields
            }
            contents = prepend_get_values_fn(original_contents, default_values)

    # load any custom labware in protocols/{PROTOCOL_SLUG}/labware/*.json
    custom_labware_defs = []
    custom_labware_path = Path(protocol_path).parent / 'labware'
    if custom_labware_path.is_dir():
        for l_path in custom_labware_path.iterdir():
            with open(l_path) as lf:
                custom_labware_defs.append(json.load(lf))
    for labware_def in custom_labware_defs:
        opentrons.protocol_api.labware.save_definition(labware_def, force=True)

    protocol = parse_protocol(protocol_file=contents, filename=protocol_path)

    assert protocol.api_level >= (2, 0)

    context = opentrons.protocol_api.contexts.ProtocolContext()
    # NOTE:(IL, 2020-05-13)L there’s no deck calibration, and the
    # identity deck calibration is about 25 mm too high (as of v1.17.1).
    # Because of this, tall labware can cross the threshold and cause a
    # LabwareHeightError even though they're safe to use.
    # So we'll apply a HACK-y -25 offset of the deck.
    context._hw_manager.hardware._config.gantry_calibration[2][3] = -25
    context.home()
    run_protocol(protocol, context=context)

    instruments = [{
        'mount': mount,
        'name': pipette.name
    } for mount, pipette in context.loaded_instruments.items() if pipette]

    labware = filter_none([
        parse_labware(slot, labware)
        for slot, labware in context.loaded_labwares.items()
    ])

    # NOTE: this isn't really used right now...
    metadata = protocol.metadata

    # NOTE: module population broke library deck layout 3/5/2020
    # modules = filter_none([parse_module(slot, module)
    #                        for slot, module
    #                        in context.loaded_modules.items()])
    modules = []

    return {
        "instruments": instruments,
        "labware": labware,
        "fields": fields,
        "modules": modules,
        "metadata": metadata,
        "content": original_contents,
        "custom_labware_defs": custom_labware_defs
    }
Esempio n. 13
0
def simulate(protocol_file: TextIO,
             file_name: str = None,
             custom_labware_paths: List[str] = None,
             custom_data_paths: List[str] = None,
             propagate_logs: bool = False,
             log_level: str = 'warning') -> Tuple[List[Mapping[str, Any]],
                                                  Optional[BundleContents]]:
    """
    Simulate the protocol itself.

    This is a one-stop function to simulate a protocol, whether python or json,
    no matter the api version, from external (i.e. not bound up in other
    internal server infrastructure) sources.

    To simulate an opentrons protocol from other places, pass in a file like
    object as protocol_file; this function either returns (if the simulation
    has no problems) or raises an exception.

    To call from the command line use either the autogenerated entrypoint
    ``opentrons_simulate`` (``opentrons_simulate.exe``, on windows) or
    ``python -m opentrons.simulate``.

    The return value is the run log, a list of dicts that represent the
    commands executed by the robot; and either the contents of the protocol
    that would be required to bundle, or ``None``.

    Each dict element in the run log has the following keys:

        - ``level``: The depth at which this command is nested - if this an
                     aspirate inside a mix inside a transfer, for instance,
                     it would be 3.
        - ``payload``: The command, its arguments, and how to format its text.
                       For more specific details see
                       :py:mod:`opentrons.commands`. To format a message from
                       a payload do ``payload['text'].format(**payload)``.
        - ``logs``: Any log messages that occurred during execution of this
                    command, as a logging.LogRecord

    :param file-like protocol_file: The protocol file to simulate.
    :param str file_name: The name of the file
    :param custom_labware_paths: A list of directories to search for custom
                                 labware, or None. Ignored if the apiv2 feature
                                 flag is not set. Loads valid labware from
                                 these paths and makes them available to the
                                 protocol context.
    :param custom_data_paths: A list of directories or files to load custom
                              data files from. Ignored if the apiv2 feature
                              flag if not set. Entries may be either files or
                              directories. Specified files and the
                              non-recursive contents of specified directories
                              are presented by the protocol context in
                              :py:attr:`.ProtocolContext.bundled_data`.
    :param propagate_logs: Whether this function should allow logs from the
                           Opentrons stack to propagate up to the root handler.
                           This can be useful if you're integrating this
                           function in a larger application, but most logs that
                           occur during protocol simulation are best associated
                           with the actions in the protocol that cause them.
                           Default: ``False``
    :type propagate_logs: bool
    :param log_level: The level of logs to capture in the runlog. Default:
                      ``'warning'``
    :type log_level: 'debug', 'info', 'warning', or 'error'
    :returns: A tuple of a run log for user output, and possibly the required
              data to write to a bundle to bundle this protocol. The bundle is
              only emitted if bundling is allowed (see
              :py:meth:`allow_bundling`)  and this is an unbundled Protocol API
              v2 python protocol. In other cases it is None.
    """
    stack_logger = logging.getLogger('opentrons')
    stack_logger.propagate = propagate_logs

    contents = protocol_file.read()
    if custom_labware_paths:
        extra_labware = labware_from_paths(custom_labware_paths)
    else:
        extra_labware = {}

    if custom_data_paths:
        extra_data = datafiles_from_paths(custom_data_paths)
    else:
        extra_data = {}

    protocol = parse.parse(contents, file_name,
                           extra_labware=extra_labware,
                           extra_data=extra_data)
    bundle_contents:  Optional[BundleContents] = None

    if getattr(protocol, 'api_level', APIVersion(2, 0)) < APIVersion(2, 0):
        def _simulate_v1():
            opentrons.robot.disconnect()
            opentrons.robot.reset()
            scraper = CommandScraper(stack_logger, log_level,
                                     opentrons.robot.broker)
            exec(protocol.contents, {})
            return scraper

        scraper = _simulate_v1()
    else:
        # we want a None literal rather than empty dict so get_protocol_api
        # will look for custom labware if this is a robot
        gpa_extras = getattr(protocol, 'extra_labware', None) or None
        context = get_protocol_api(
            getattr(protocol, 'api_level', MAX_SUPPORTED_VERSION),
            bundled_labware=getattr(protocol, 'bundled_labware', None),
            bundled_data=getattr(protocol, 'bundled_data', None),
            extra_labware=gpa_extras)
        scraper = CommandScraper(stack_logger, log_level, context.broker)
        try:
            execute.run_protocol(protocol, context)
            if isinstance(protocol, PythonProtocol)\
               and protocol.api_level >= APIVersion(2, 0)\
               and protocol.bundled_labware is None\
               and allow_bundle():
                bundle_contents = bundle_from_sim(
                    protocol, context)
        finally:
            context.cleanup()

    return scraper.commands, bundle_contents
Esempio n. 14
0
def parse(protocol_path):
    if not protocol_path:
        print('No protocol path... something weird happened!')
        return {}
    print('Parsing protocol: {}'.format(protocol_path))

    fields_json_path = Path(protocol_path).parent / ('fields.json')
    has_fields = Path(fields_json_path).is_file()

    with open(protocol_path) as f:
        original_contents = f.read()

    fields = []
    contents = original_contents
    if has_fields:
        with open(fields_json_path) as f:
            fields = json.load(f)
            # for simulation, we need to add a get_values() fn that supplies
            # the default values
            default_values = {
                f['name']: get_default_field_value(f)
                for f in fields
            }
            contents = prepend_get_values_fn(original_contents, default_values)

    # load any custom labware in protocols/{PROTOCOL_SLUG}/labware/*.json
    custom_labware_defs = []
    custom_labware_path = Path(protocol_path).parent / 'labware'
    if custom_labware_path.is_dir():
        for l_path in custom_labware_path.iterdir():
            with open(l_path) as lf:
                custom_labware_defs.append(json.load(lf))
    for labware_def in custom_labware_defs:
        opentrons.protocol_api.labware.save_definition(labware_def, force=True)

    protocol = parse_protocol(protocol_file=contents, filename=protocol_path)

    assert protocol.api_level >= (2, 0)

    context = opentrons.protocol_api.contexts.ProtocolContext()
    context.home()
    run_protocol(protocol, context=context)

    instruments = [{
        'mount': mount,
        'name': pipette.name
    } for mount, pipette in context.loaded_instruments.items() if pipette]

    labware = filter_none([
        parse_labware(slot, labware)
        for slot, labware in context.loaded_labwares.items()
    ])

    # NOTE: this isn't really used right now...
    metadata = protocol.metadata

    # NOTE: module population broke library deck layout 3/5/2020
    # modules = filter_none([parse_module(slot, module)
    #                        for slot, module
    #                        in context.loaded_modules.items()])
    modules = []

    return {
        "instruments": instruments,
        "labware": labware,
        "fields": fields,
        "modules": modules,
        "metadata": metadata,
        "content": original_contents,
        "custom_labware_defs": custom_labware_defs
    }
Esempio n. 15
0
    def _simulate(self):
        self._reset()

        stack = []
        res = []
        commands = []

        self._containers.clear()
        self._instruments.clear()
        self._modules.clear()
        self._interactions.clear()

        def on_command(message):
            payload = message['payload']
            description = payload.get('text', '').format(**payload)

            if message['$'] == 'before':
                level = len(stack)

                stack.append(message)
                commands.append(payload)

                res.append({
                    'level': level,
                    'description': description,
                    'id': len(res)
                })
            else:
                stack.pop()

        unsubscribe = self._broker.subscribe(command_types.COMMAND, on_command)

        try:
            # ensure actual pipettes are cached before driver is disconnected
            if ff.use_protocol_api_v2():
                self._hardware.cache_instruments()
                instrs = {}
                for mount, pip in self._hardware.attached_instruments.items():
                    if pip:
                        instrs[mount] = {
                            'model': pip['model'],
                            'id': pip.get('pipette_id', '')
                        }
                sim = adapters.SynchronousAdapter.build(
                    API.build_hardware_simulator,
                    instrs, [
                        mod.name()
                        for mod in self._hardware.attached_modules.values()
                    ],
                    strict_attached_instruments=False)
                sim.home()
                bundled_data = None
                bundled_labware = None
                if isinstance(self._protocol, PythonProtocol):
                    bundled_data = self._protocol.bundled_data
                    bundled_labware = self._protocol.bundled_labware
                self._simulating_ctx = ProtocolContext(
                    loop=self._loop,
                    hardware=sim,
                    broker=self._broker,
                    bundled_labware=bundled_labware,
                    bundled_data=bundled_data)
                run_protocol(self._protocol,
                             simulate=True,
                             context=self._simulating_ctx)
            else:
                self._hardware.broker = self._broker
                self._hardware.cache_instrument_models()
                self._hardware.disconnect()
                if isinstance(self._protocol, JsonProtocol):
                    execute_protocol(self._protocol)
                else:
                    exec(self._protocol.contents, {})
        finally:
            # physically attached pipettes are re-cached during robot.connect()
            # which is important, because during a simulation, the robot could
            # think that it holds a pipette model that it actually does not
            if not ff.use_protocol_api_v2():
                self._hardware.connect()
            unsubscribe()

            instruments, containers, modules, interactions = _accumulate(
                [_get_labware(command) for command in commands])

            self._containers.extend(_dedupe(containers))
            self._instruments.extend(_dedupe(instruments))
            self._modules.extend(_dedupe(modules))
            self._interactions.extend(_dedupe(interactions))

            # Labware calibration happens after simulation and before run, so
            # we have to clear the tips if they are left on after simulation
            # to ensure that the instruments are in the expected state at the
            # beginning of the labware calibration flow
            if not ff.use_protocol_api_v2():
                self._hardware.clear_tips()

        return res
Esempio n. 16
0
def execute(protocol_file: TextIO,
            propagate_logs: bool = False,
            log_level: str = 'warning',
            emit_runlog: Callable[[Dict[str, Any]], None] = None):
    """
    Run the protocol itself.

    This is a one-stop function to run a protocol, whether python or json,
    no matter the api verson, from external (i.e. not bound up in other
    internal server infrastructure) sources.

    To run an opentrons protocol from other places, pass in a file like
    object as protocol_file; this function either returns (if the run has no
    problems) or raises an exception.

    To call from the command line use either the autogenerated entrypoint
    ``opentrons_execute`` or ``python -m opentrons.execute``.

    If the protocol is using Opentrons Protocol API V1, it does not need to
    explicitly call :py:meth:`.Robot.connect`
    or :py:meth:`.Robot.discover_modules`, or
    :py:meth:`.Robot.cache_instrument_models`.

    :param file-like protocol_file: The protocol file to execute
    :param propagate_logs: Whether this function should allow logs from the
                           Opentrons stack to propagate up to the root handler.
                           This can be useful if you're integrating this
                           function in a larger application, but most logs that
                           occur during protocol simulation are best associated
                           with the actions in the protocol that cause them.
                           Default: ``False``
    :type propagate_logs: bool
    :param log_level: The level of logs to emit on the command line.. Default:
                      'warning'
    :type log_level: 'debug', 'info', 'warning', or 'error'
    :param emit_runlog: A callback for printing the runlog. If specified, this
                        will be called whenever a command adds an entry to the
                        runlog, which can be used for display and progress
                        estimation. If specified, the callback should take a
                        single argument (the name doesn't matter) which will
                        be a dictionary (see below). Default: ``None``

    The format of the runlog entries is as follows:

    .. code-block:: python

        {
            'name': command_name,
            'payload': {
                 'text': string_command_text,
                  # The rest of this struct is command-dependent; see
                  # opentrons.commands.commands. Its keys match format
                  # keys in 'text', so that
                  # entry['payload']['text'].format(**entry['payload'])
                  # will produce a string with information filled in
             }
        }


    """
    stack_logger = logging.getLogger('opentrons')
    stack_logger.propagate = propagate_logs
    stack_logger.setLevel(getattr(logging, log_level.upper(), logging.WARNING))
    contents = protocol_file.read()
    protocol = parse(contents, protocol_file.name)
    if ff.use_protocol_api_v2():
        context = get_protocol_api(
            bundled_labware=getattr(protocol, 'bundled_labware', None),
            bundled_data=getattr(protocol, 'bundled_data', None))
        if emit_runlog:
            context.broker.subscribe(commands.command_types.COMMAND,
                                     emit_runlog)
        context.home()
        execute_apiv2.run_protocol(protocol, simulate=False, context=context)
    else:
        robot.connect()
        robot.cache_instrument_models()
        robot.discover_modules()
        robot.home()
        if emit_runlog:
            robot.broker.subscribe(commands.command_types.COMMAND, emit_runlog)
        if isinstance(protocol, JsonProtocol):
            legacy_api.protocols.execute_protocol(protocol)
        else:
            exec(protocol.contents, {})
Esempio n. 17
0
def test_execute_ok(protocol, protocol_file, ensure_api2, loop):
    proto = parse(protocol.text, protocol.filename)
    ctx = ProtocolContext(loop)
    execute.run_protocol(proto, context=ctx)
Esempio n. 18
0
def test_execute_v1_imports(protocol, ensure_api2):
    proto = parse(protocol)
    execute.run_protocol(proto)