Exemplo n.º 1
0
async def get_attached_pipettes(request):
    """
    Query robot for model strings on 'left' and 'right' mounts, and return a
    dict with the results keyed by mount. By default, this endpoint provides
    cached values, which will not interrupt a running session. WARNING: if the
    caller supplies the "refresh=true" query parameter, this method will
    interrupt a sequence of Smoothie operations that are in progress, such as a
    protocol run.

    Example:

    ```
    {
      'left': {
        'model': 'p300_single_v1',
        'tip_length': 51.7,
        'mount_axis': 'z',
        'plunger_axis': 'b'
      },
      'right': {
        'model': 'p10_multi_v1',
        'tip_length': 40,
        'mount_axis': 'a',
        'plunger_axis': 'c'
      }
    }
    ```

    If a pipette is "uncommissioned" (e.g.: does not have a model string
    written to on-board memory), or if no pipette is present, the corresponding
    mount will report `'model': null`
    """
    if request.url.query.get('refresh') == 'true':
        robot.cache_instrument_models()
    return web.json_response(robot.get_attached_pipettes())
Exemplo n.º 2
0
def test_cache_instruments(monkeypatch):
    # Test that smoothie runtime configs are set at run when
    # cache_instrument_models is called
    robot.reset()
    fake_pip = {
        'left': {
            'model': 'p10_single_v1.3',
            'id': 'FakePip2',
            'name': 'p10_single'
        },
        'right': {
            'model': 'p300_single_v2.0',
            'id': 'FakePip',
            'name': 'p300_single_gen2'
        }
    }
    monkeypatch.setattr(robot, 'model_by_mount', fake_pip)

    def fake_func1(value):
        return value

    def fake_func2(mount, value):
        return mount, value

    def fake_read(mount):
        return robot.model_by_mount[mount]['model']

    # With nothing specified at init or expected, we should have nothing
    robot._driver.update_steps_per_mm = mock.Mock(fake_func1)
    robot._driver.update_pipette_config = mock.Mock(fake_func2)
    monkeypatch.setattr(robot._driver, 'read_pipette_model', fake_read)
    robot.cache_instrument_models()
    steps_mm_calls = [mock.call({'B': 768}), mock.call({'C': 3200})]
    pip_config_calls = [
        mock.call('Z', {'home': 220}),
        mock.call('A', {'home': 172.15}),
        mock.call('B', {'max_travel': 30}),
        mock.call('C', {'max_travel': 60})
    ]
    robot._driver.update_steps_per_mm.assert_has_calls(steps_mm_calls,
                                                       any_order=True)
    robot._driver.update_pipette_config.assert_has_calls(pip_config_calls,
                                                         any_order=True)
Exemplo n.º 3
0
    def _run(self):
        def on_command(message):
            if message['$'] == 'before':
                self.log_append()
            if message['name'] == command_types.PAUSE:
                self.set_state('paused')
            if message['name'] == command_types.RESUME:
                self.set_state('running')

        self._reset()

        _unsubscribe = self._broker.subscribe(command_types.COMMAND,
                                              on_command)

        self.startTime = now()
        self.set_state('running')

        try:
            if self._use_v2:
                self.resume()
                self._pre_run_hooks()
                self._hardware.cache_instruments()
                ctx = ProtocolContext.build_using(self._protocol,
                                                  loop=self._loop,
                                                  broker=self._broker,
                                                  extra_labware=getattr(
                                                      self._protocol,
                                                      'extra_labware', {}))
                ctx.connect(self._hardware)
                ctx.home()
                run_protocol(self._protocol, context=ctx)
            else:
                robot.broker = self._broker
                assert isinstance(self._protocol, PythonProtocol),\
                    'Internal error: v1 should only be used for python'
                if not robot.is_connected():
                    robot.connect()
                self.resume()
                self._pre_run_hooks()
                robot.cache_instrument_models()
                robot.discover_modules()
                exec(self._protocol.contents, {})

            # If the last command in a protocol was a pause, the protocol
            # will immediately finish executing because there's no smoothie
            # command to block... except the home that's about to happen,
            # which will confuse the app and lock it up. So we need to
            # do our own pause here, and sleep the thread until/unless the
            # app resumes us.
            #
            # Cancelling from the app during this pause will result in the
            # smoothie giving us an error during the subsequent home, which
            # is tragic but expected.
            while self.state == 'paused':
                sleep(0.1)
            self.set_state('finished')
            self._hw_iface().home()
        except SmoothieAlarm:
            log.info("Protocol cancelled")
        except Exception as e:
            log.exception("Exception during run:")
            self.error_append(e)
            self.set_state('error')
            raise e
        finally:
            _unsubscribe()
Exemplo n.º 4
0
    def _simulate(self):
        self._reset()

        stack = []
        res = []
        commands = []

        self._containers.clear()
        self._instruments.clear()
        self._modules.clear()
        self._interactions.clear()

        def on_command(message):
            payload = message['payload']
            description = payload.get('text', '').format(**payload)

            if message['$'] == 'before':
                level = len(stack)

                stack.append(message)
                commands.append(payload)

                res.append({
                    'level': level,
                    'description': description,
                    'id': len(res)
                })
            else:
                stack.pop()

        unsubscribe = self._broker.subscribe(command_types.COMMAND, on_command)
        old_robot_connect = robot.connect

        try:
            # ensure actual pipettes are cached before driver is disconnected
            self._hardware.cache_instruments()
            if self._use_v2:
                instrs = {}
                for mount, pip in self._hardware.attached_instruments.items():
                    if pip:
                        instrs[mount] = {
                            'model': pip['model'],
                            'id': pip.get('pipette_id', '')
                        }
                sim = adapters.SynchronousAdapter.build(
                    API.build_hardware_simulator,
                    instrs,
                    [mod.name() for mod in self._hardware.attached_modules],
                    strict_attached_instruments=False)
                sim.home()
                self._simulating_ctx = ProtocolContext.build_using(
                    self._protocol,
                    loop=self._loop,
                    hardware=sim,
                    broker=self._broker,
                    extra_labware=getattr(self._protocol, 'extra_labware', {}))
                run_protocol(self._protocol, context=self._simulating_ctx)
            else:
                robot.broker = self._broker
                # we don't rely on being connected anymore so make sure we are
                robot.connect()
                robot.cache_instrument_models()
                robot.disconnect()

                def robot_connect_error(port=None, options=None):
                    raise RuntimeError(
                        'Protocols executed through the Opentrons App may not '
                        'use robot.connect(). Allowing this call would cause '
                        'the robot to execute commands during simulation, and '
                        'then raise an error on execution.')

                robot.connect = robot_connect_error
                exec(self._protocol.contents, {})
        finally:
            # physically attached pipettes are re-cached during robot.connect()
            # which is important, because during a simulation, the robot could
            # think that it holds a pipette model that it actually does not
            if not self._use_v2:
                robot.connect = old_robot_connect
                robot.connect()

            unsubscribe()

            instruments, containers, modules, interactions = _accumulate(
                [_get_labware(command) for command in commands])

            self._containers.extend(_dedupe(containers))
            self._instruments.extend(
                _dedupe(
                    instruments +
                    list(self._simulating_ctx.loaded_instruments.values())))
            self._modules.extend(
                _dedupe(modules + [
                    m._geometry
                    for m in self._simulating_ctx.loaded_modules.values()
                ]))
            self._interactions.extend(_dedupe(interactions))

            # Labware calibration happens after simulation and before run, so
            # we have to clear the tips if they are left on after simulation
            # to ensure that the instruments are in the expected state at the
            # beginning of the labware calibration flow
            if not self._use_v2:
                robot.clear_tips()

        return res
Exemplo n.º 5
0
def execute(protocol_file: TextIO,
            propagate_logs: bool = False,
            log_level: str = 'warning',
            emit_runlog: Callable[[Dict[str, Any]], None] = None):
    """
    Run the protocol itself.

    This is a one-stop function to run a protocol, whether python or json,
    no matter the api verson, from external (i.e. not bound up in other
    internal server infrastructure) sources.

    To run an opentrons protocol from other places, pass in a file like
    object as protocol_file; this function either returns (if the run has no
    problems) or raises an exception.

    To call from the command line use either the autogenerated entrypoint
    ``opentrons_execute`` or ``python -m opentrons.execute``.

    If the protocol is using Opentrons Protocol API V1, it does not need to
    explicitly call :py:meth:`.Robot.connect`
    or :py:meth:`.Robot.discover_modules`, or
    :py:meth:`.Robot.cache_instrument_models`.

    :param file-like protocol_file: The protocol file to execute
    :param propagate_logs: Whether this function should allow logs from the
                           Opentrons stack to propagate up to the root handler.
                           This can be useful if you're integrating this
                           function in a larger application, but most logs that
                           occur during protocol simulation are best associated
                           with the actions in the protocol that cause them.
                           Default: ``False``
    :type propagate_logs: bool
    :param log_level: The level of logs to emit on the command line.. Default:
                      'warning'
    :type log_level: 'debug', 'info', 'warning', or 'error'
    :param emit_runlog: A callback for printing the runlog. If specified, this
                        will be called whenever a command adds an entry to the
                        runlog, which can be used for display and progress
                        estimation. If specified, the callback should take a
                        single argument (the name doesn't matter) which will
                        be a dictionary (see below). Default: ``None``

    The format of the runlog entries is as follows:

    .. code-block:: python

        {
            'name': command_name,
            'payload': {
                 'text': string_command_text,
                  # The rest of this struct is command-dependent; see
                  # opentrons.commands.commands. Its keys match format
                  # keys in 'text', so that
                  # entry['payload']['text'].format(**entry['payload'])
                  # will produce a string with information filled in
             }
        }


    """
    stack_logger = logging.getLogger('opentrons')
    stack_logger.propagate = propagate_logs
    stack_logger.setLevel(getattr(logging, log_level.upper(), logging.WARNING))
    contents = protocol_file.read()
    protocol = parse(contents, protocol_file.name)
    if ff.use_protocol_api_v2():
        context = get_protocol_api(
            bundled_labware=getattr(protocol, 'bundled_labware', None),
            bundled_data=getattr(protocol, 'bundled_data', None))
        if emit_runlog:
            context.broker.subscribe(commands.command_types.COMMAND,
                                     emit_runlog)
        context.home()
        execute_apiv2.run_protocol(protocol, simulate=False, context=context)
    else:
        robot.connect()
        robot.cache_instrument_models()
        robot.discover_modules()
        robot.home()
        if emit_runlog:
            robot.broker.subscribe(commands.command_types.COMMAND, emit_runlog)
        if isinstance(protocol, JsonProtocol):
            legacy_api.protocols.execute_protocol(protocol)
        else:
            exec(protocol.contents, {})
Exemplo n.º 6
0
    def _simulate(self):
        self._reset()

        stack = []
        res = []
        commands = []

        self._containers.clear()
        self._instruments.clear()
        self._modules.clear()
        self._interactions.clear()

        def on_command(message):
            payload = message['payload']
            description = payload.get('text', '').format(
                **payload
            )

            if message['$'] == 'before':
                level = len(stack)

                stack.append(message)
                commands.append(payload)

                res.append(
                    {
                        'level': level,
                        'description': description,
                        'id': len(res)})
            else:
                stack.pop()

        unsubscribe = subscribe(types.COMMAND, on_command)

        try:
            # ensure actual pipettes are cached before driver is disconnected
            robot.cache_instrument_models()

            # TODO (artyom, 20171005): this will go away
            # once robot / driver simulation flow is fixed
            robot.disconnect()
            if self._is_json_protocol:
                execute_protocol(self._protocol)
            else:
                exec(self._protocol, {})
        finally:
            # physically attached pipettes are re-cached during robot.connect()
            # which is important, because during a simulation, the robot could
            # think that it holds a pipette model that it actually does not
            robot.connect()
            unsubscribe()

            instruments, containers, modules, interactions = _accumulate(
                [_get_labware(command) for command in commands])

            self._containers.extend(_dedupe(containers))
            self._instruments.extend(_dedupe(instruments))
            self._modules.extend(_dedupe(modules))
            self._interactions.extend(_dedupe(interactions))

        return res