Beispiel #1
0
class ClientMachine(object):
    """
    I am state machine that implements the "secret handshake"
    cryptographic handshake as described in the paper: Designing a
    Secret Handshake: Authenticated Key Exchange as a Capability
    System by Dominic Tarr

    This state machine doesn't perform any IO and therefore could be
    used with a non-twisted networking API.
    """
    _machine = automat.MethodicalMachine()
Beispiel #2
0
class Simple(object):
    """                                                                                                                     
    """
    _m = automat.MethodicalMachine()

    @_m.input()
    def one(self, data):
        "some input data"

    @_m.state(initial=True)
    def waiting(self):
        "patiently"

    @_m.output()
    def boom(self, data):
        pass

    waiting.upon(
        one,
        enter=waiting,
        outputs=[boom],
    )
Beispiel #3
0
class _SocksMachine(object):
    """
    trying to prototype the SOCKS state-machine in automat

    This is a SOCKS state machine to make a single request.
    """

    _machine = automat.MethodicalMachine()
    SUCCEEDED = 0x00
    REPLY_IPV4 = 0x01
    REPLY_HOST = 0x03
    REPLY_IPV6 = 0x04

    # XXX address = (host, port) instead
    def __init__(self,
                 req_type,
                 host,
                 port=0,
                 on_disconnect=None,
                 on_data=None,
                 create_connection=None):
        if req_type not in self._dispatch:
            raise ValueError("Unknown request type '{}'".format(req_type))
        if req_type == 'CONNECT' and create_connection is None:
            raise ValueError(
                "create_connection function required for '{}'".format(
                    req_type))
        if not isinstance(host, (bytes, str, six.text_type)):
            raise ValueError("'host' must be text".format(type(host)))
        # XXX what if addr is None?
        self._req_type = req_type
        self._addr = _create_ip_address(six.text_type(host), port)
        self._data = b''
        self._on_disconnect = on_disconnect
        self._create_connection = create_connection
        # XXX FIXME do *one* of these:
        self._on_data = on_data
        self._outgoing_data = []
        # the other side of our proxy
        self._sender = None
        self._when_done = util.SingleObserver()

    def when_done(self):
        """
        Returns a Deferred that fires when we're done
        """
        return self._when_done.when_fired()

    def _data_to_send(self, data):
        if self._on_data:
            self._on_data(data)
        else:
            self._outgoing_data.append(data)

    def send_data(self, callback):
        """
        drain all pending data by calling `callback()` on it
        """
        # a "for x in self._outgoing_data" would potentially be more
        # efficient, but then there's no good way to bubble exceptions
        # from callback() out without lying about how much data we
        # processed .. or eat the exceptions in here.
        while len(self._outgoing_data):
            data = self._outgoing_data.pop(0)
            callback(data)

    def feed_data(self, data):
        # I feel like maybe i'm doing all this buffering-stuff
        # wrong. but I also don't want a bunch of "received 1 byte"
        # etc states hanging off everything that can "get data"
        self._data += data
        self.got_data()

    @_machine.output()
    def _parse_version_reply(self):
        "waiting for a version reply"
        if len(self._data) >= 2:
            reply = self._data[:2]
            self._data = self._data[2:]
            (version, method) = struct.unpack('BB', reply)
            if version == 5 and method in [0x00, 0x02]:
                self.version_reply(method)
            else:
                if version != 5:
                    self.version_error(
                        SocksError(
                            "Expected version 5, got {}".format(version)))
                else:
                    self.version_error(
                        SocksError(
                            "Wanted method 0 or 2, got {}".format(method)))

    def _parse_ipv4_reply(self):
        if len(self._data) >= 10:
            addr = inet_ntoa(self._data[4:8])
            port = struct.unpack('H', self._data[8:10])[0]
            self._data = self._data[10:]
            if self._req_type == 'CONNECT':
                self.reply_ipv4(addr, port)
            else:
                self.reply_domain_name(addr)

    def _parse_ipv6_reply(self):
        if len(self._data) >= 22:
            addr = self._data[4:20]
            port = struct.unpack('H', self._data[20:22])[0]
            self._data = self._data[22:]
            self.reply_ipv6(addr, port)

    def _parse_domain_name_reply(self):
        assert len(self._data) >= 8  # _parse_request_reply checks this
        addrlen = struct.unpack('B', self._data[4:5])[0]
        # may simply not have received enough data yet...
        if len(self._data) < (5 + addrlen + 2):
            return
        addr = self._data[5:5 + addrlen]
        # port = struct.unpack('H', self._data[5 + addrlen:5 + addrlen + 2])[0]
        self._data = self._data[5 + addrlen + 2:]
        self.reply_domain_name(addr)

    @_machine.output()
    def _parse_request_reply(self):
        "waiting for a reply to our request"
        # we need at least 6 bytes of data: 4 for the "header", such
        # as it is, and 2 more if it's DOMAINNAME (for the size) or 4
        # or 16 more if it's an IPv4/6 address reply. plus there's 2
        # bytes on the end for the bound port.
        if len(self._data) < 8:
            return
        msg = self._data[:4]

        # not changing self._data yet, in case we've not got
        # enough bytes so far.
        (version, reply, _, typ) = struct.unpack('BBBB', msg)

        if version != 5:
            self.reply_error(
                SocksError("Expected version 5, got {}".format(version)))
            return

        if reply != self.SUCCEEDED:
            self.reply_error(_create_socks_error(reply))
            return

        reply_dispatcher = {
            self.REPLY_IPV4: self._parse_ipv4_reply,
            self.REPLY_HOST: self._parse_domain_name_reply,
            self.REPLY_IPV6: self._parse_ipv6_reply,
        }
        try:
            method = reply_dispatcher[typ]
        except KeyError:
            self.reply_error(
                SocksError("Unexpected response type {}".format(typ)))
            return
        method()

    @_machine.output()
    def _make_connection(self, addr, port):
        "make our proxy connection"
        sender = self._create_connection(addr, port)
        # XXX look out! we're depending on this "sender" implementing
        # certain Twisted APIs, and the state-machine shouldn't depend
        # on that.

        # XXX also, if sender implements producer/consumer stuff, we
        # should register ourselves (and implement it to) -- but this
        # should really be taking place outside the state-machine in
        # "the I/O-doing" stuff
        self._sender = sender
        self._when_done.fire(sender)

    @_machine.output()
    def _domain_name_resolved(self, domain):
        self._when_done.fire(domain)

    @_machine.input()
    def connection(self):
        "begin the protocol (i.e. connection made)"

    @_machine.input()
    def disconnected(self, error):
        "the connection has gone away"

    @_machine.input()
    def got_data(self):
        "we recevied some data and buffered it"

    @_machine.input()
    def version_reply(self, auth_method):
        "the SOCKS server replied with a version"

    @_machine.input()
    def version_error(self, error):
        "the SOCKS server replied, but we don't understand"

    @_machine.input()
    def reply_error(self, error):
        "the SOCKS server replied with an error"

    @_machine.input()
    def reply_ipv4(self, addr, port):
        "the SOCKS server told me an IPv4 addr, port"

    @_machine.input()
    def reply_ipv6(self, addr, port):
        "the SOCKS server told me an IPv6 addr, port"

    @_machine.input()
    def reply_domain_name(self, domain):
        "the SOCKS server told me a domain-name"

    @_machine.input()
    def answer(self):
        "the SOCKS server replied with an answer"

    @_machine.output()
    def _send_version(self):
        "sends a SOCKS version reply"
        self._data_to_send(
            # for anonymous(0) *and* authenticated (2): struct.pack('BBBB', 5, 2, 0, 2)
            struct.pack('BBB', 5, 1, 0))

    @_machine.output()
    def _disconnect(self, error):
        "done"
        if self._on_disconnect:
            self._on_disconnect(str(error))
        if self._sender:
            self._sender.connectionLost(Failure(error))
        self._when_done.fire(Failure(error))

    @_machine.output()
    def _send_request(self, auth_method):
        "send the request (connect, resolve or resolve_ptr)"
        assert auth_method == 0x00  # "no authentication required"
        return self._dispatch[self._req_type](self)

    @_machine.output()
    def _relay_data(self):
        "relay any data we have"
        if self._data:
            d = self._data
            self._data = b''
            # XXX this is "doing I/O" in the state-machine and it
            # really shouldn't be ... probably want a passed-in
            # "relay_data" callback or similar?
            self._sender.dataReceived(d)

    def _send_connect_request(self):
        "sends CONNECT request"
        # XXX needs to support v6 ... or something else does
        host = self._addr.host
        port = self._addr.port

        if isinstance(self._addr, (IPv4Address, IPv6Address)):
            is_v6 = isinstance(self._addr, IPv6Address)
            self._data_to_send(
                struct.pack(
                    '!BBBB4sH',
                    5,  # version
                    0x01,  # command
                    0x00,  # reserved
                    0x04 if is_v6 else 0x01,
                    inet_pton(AF_INET6 if is_v6 else AF_INET, host),
                    port,
                ))
        else:
            host = host.encode('ascii')
            self._data_to_send(
                struct.pack(
                    '!BBBBB{}sH'.format(len(host)),
                    5,  # version
                    0x01,  # command
                    0x00,  # reserved
                    0x03,
                    len(host),
                    host,
                    port,
                ))

    @_machine.output()
    def _send_resolve_request(self):
        "sends RESOLVE_PTR request (Tor custom)"
        host = self._addr.host.encode()
        self._data_to_send(
            struct.pack(
                '!BBBBB{}sH'.format(len(host)),
                5,  # version
                0xF0,  # command
                0x00,  # reserved
                0x03,  # DOMAINNAME
                len(host),
                host,
                0,  # self._addr.port?
            ))

    @_machine.output()
    def _send_resolve_ptr_request(self):
        "sends RESOLVE_PTR request (Tor custom)"
        addr_type = 0x04 if isinstance(self._addr,
                                       ipaddress.IPv4Address) else 0x01
        encoded_host = inet_aton(self._addr.host)
        self._data_to_send(
            struct.pack(
                '!BBBB4sH',
                5,  # version
                0xF1,  # command
                0x00,  # reserved
                addr_type,
                encoded_host,
                0,  # port; unused? SOCKS is fun
            ))

    @_machine.state(initial=True)
    def unconnected(self):
        "not yet connected"

    @_machine.state()
    def sent_version(self):
        "we've sent our version request"

    @_machine.state()
    def sent_request(self):
        "we've sent our stream/etc request"

    @_machine.state()
    def relaying(self):
        "received our response, now we can relay"

    @_machine.state()
    def abort(self, error_message):
        "we've encountered an error"

    @_machine.state()
    def done(self):
        "operations complete"

    unconnected.upon(
        connection,
        enter=sent_version,
        outputs=[_send_version],
    )

    sent_version.upon(
        got_data,
        enter=sent_version,
        outputs=[_parse_version_reply],
    )
    sent_version.upon(
        version_error,
        enter=abort,
        outputs=[_disconnect],
    )
    sent_version.upon(
        version_reply,
        enter=sent_request,
        outputs=[_send_request],
    )
    sent_version.upon(disconnected, enter=unconnected, outputs=[_disconnect])

    sent_request.upon(
        got_data,
        enter=sent_request,
        outputs=[_parse_request_reply],
    )
    sent_request.upon(
        reply_ipv4,
        enter=relaying,
        outputs=[_make_connection],
    )
    sent_request.upon(
        reply_ipv6,
        enter=relaying,
        outputs=[_make_connection],
    )
    # XXX this isn't always a _domain_name_resolved -- if we're a
    # req_type CONNECT then it's _make_connection_domain ...
    sent_request.upon(
        reply_domain_name,
        enter=done,
        outputs=[_domain_name_resolved],
    )
    sent_request.upon(
        reply_error,
        enter=abort,
        outputs=[_disconnect],
    )
    # XXX FIXME this needs a test
    sent_request.upon(
        disconnected,
        enter=abort,
        outputs=[_disconnect],  # ... or is this redundant?
    )

    relaying.upon(
        got_data,
        enter=relaying,
        outputs=[_relay_data],
    )
    relaying.upon(
        disconnected,
        enter=done,
        outputs=[_disconnect],
    )

    abort.upon(
        got_data,
        enter=abort,
        outputs=[],
    )
    abort.upon(
        disconnected,
        enter=abort,
        outputs=[],
    )

    done.upon(
        disconnected,
        enter=done,
        outputs=[],
    )

    _dispatch = {
        'CONNECT': _send_connect_request,
        'RESOLVE': _send_resolve_request,
        'RESOLVE_PTR': _send_resolve_ptr_request,
    }
Beispiel #4
0
class EmailStatus:

    _machine = automat.MethodicalMachine()

    def __init__(self, email_message):
        self._email_message = email_message

    # States

    @_machine.state(initial=True, serialized=EmailStatuses.Accepted.value)
    def accepted(self):
        """
        In this state, the email has been accepted, but nothing else.
        """

    @_machine.state(serialized=EmailStatuses.Delivered.value)
    def delivered(self):
        """
        In this state, the email has successfully been delivered to the
        recipient.
        """

    @_machine.state(serialized=EmailStatuses.Bounced.value)
    def bounced(self):
        """
        In this state, the email has bounced when delivery was attempted.
        """

    @_machine.state(serialized=EmailStatuses.SoftBounced.value)
    def soft_bounced(self):
        """
        In this state, the email soft bounced, we can continue sending email
        to this address.
        """

    @_machine.state(serialized=EmailStatuses.Complained.value)
    def complained(self):
        """
        In this state, the user has gotten the email, but they've complained
        that we are sending them spam.
        """

    # Inputs
    @_machine.input()
    def deliver(self):
        """
        We have received an event stating that the email has been delivered.
        """

    @_machine.input()
    def bounce(self):
        """
        Emails can bounce!
        """

    @_machine.input()
    def soft_bounce(self):
        """
        Emails can bounce, but only transiently so.
        """

    @_machine.input()
    def complain(self):
        """
        A recipient can complain about our emaill :(
        """

    # Outputs
    @_machine.output()
    def _handle_bounce(self):
        email = self._get_email()
        if email is not None:
            email.verified = False
            email.unverify_reason = UnverifyReasons.HardBounce

    @_machine.output()
    def _handle_complaint(self):
        email = self._get_email()
        if email is not None:
            email.verified = False
            email.unverify_reason = UnverifyReasons.SpamComplaint

    @_machine.output()
    def _incr_transient_bounce(self):
        email = self._get_email()
        if email is not None:
            email.transient_bounces += 1

            if email.transient_bounces > MAX_TRANSIENT_BOUNCES:
                email.verified = False
                email.unverify_reason = UnverifyReasons.SoftBounce

    @_machine.output()
    def _reset_transient_bounce(self):
        email = self._get_email()
        if email is not None:
            email.transient_bounces = 0

    # Transitions

    accepted.upon(
        deliver,
        enter=delivered,
        outputs=[_reset_transient_bounce],
        collector=lambda iterable: list(iterable)[-1],
    )
    accepted.upon(
        bounce,
        enter=bounced,
        outputs=[_reset_transient_bounce, _handle_bounce],
        collector=lambda iterable: list(iterable)[-1],
    )
    accepted.upon(
        soft_bounce,
        enter=soft_bounced,
        outputs=[_incr_transient_bounce],
        collector=lambda iterable: list(iterable)[-1],
    )

    # This is an OOTO response, it's techincally a bounce, but we don't
    # really want to treat this as a bounce. We'll record the event
    # for posterity though.
    delivered.upon(soft_bounce, enter=delivered, outputs=[])
    delivered.upon(
        bounce,
        enter=bounced,
        outputs=[_reset_transient_bounce, _handle_bounce],
        collector=lambda iterable: list(iterable)[-1],
    )
    delivered.upon(
        complain,
        enter=complained,
        outputs=[_handle_complaint],
        collector=lambda iterable: list(iterable)[-1],
    )

    # Serialization / Deserialization

    @_machine.serializer()
    def _save(self, state):
        return state

    @_machine.unserializer()
    def _restore(self, state):
        return state

    def save(self):
        self._email_message.status = EmailStatuses(self._save())
        return self._email_message

    @classmethod
    def load(cls, email_message):
        self = cls(email_message)
        self._restore(email_message.status.value)
        return self

    # Helper methods
    def _get_email(self):
        # If the email was missing previously, then we don't want subsequent
        # events to re-add it, so we'll just skip them.
        if self._email_message.missing:
            return

        db = object_session(self._email_message)
        email = (db.query(EmailAddress).filter(
            EmailAddress.email == self._email_message.to).first())

        # If our email is None, then we'll mark our log so that when we're
        # viewing the log, we can tell that it wasn't recorded.
        if email is None:
            self._email_message.missing = True

        return email
Beispiel #5
0
class TransitServerState(object):
    """
    Encapsulates the state-machine of the server side of a transit
    relay connection.

    Once the protocol has been told to relay (or to relay for a side)
    it starts passing all received bytes to the other side until it
    closes.
    """

    _machine = automat.MethodicalMachine()
    _client = None
    _buddy = None
    _token = None
    _side = None
    _first = None
    _mood = "empty"
    _total_sent = 0

    def __init__(self, pending_requests, usage_recorder):
        self._pending_requests = pending_requests
        self._usage = usage_recorder

    def get_token(self):
        """
        :returns str: a string describing our token. This will be "-" if
            we have no token yet, or "{16 chars}-<unsided>" if we have
            just a token or "{16 chars}-{16 chars}" if we have a token and
            a side.
        """
        d = "-"
        if self._token is not None:
            d = self._token[:16].decode("ascii")

            if self._side is not None:
                d += "-" + self._side.decode("ascii")
            else:
                d += "-<unsided>"
        return d

    @_machine.input()
    def connection_made(self, client):
        """
        A client has connected. May only be called once.

        :param ITransitClient client: our client.
        """
        # NB: the "only called once" is enforced by the state-machine;
        # this input is only valid for the "listening" state, to which
        # we never return.

    @_machine.input()
    def please_relay(self, token):
        """
        A 'please relay X' message has been received (the original version
        of the protocol).
        """

    @_machine.input()
    def please_relay_for_side(self, token, side):
        """
        A 'please relay X for side Y' message has been received (the
        second version of the protocol).
        """

    @_machine.input()
    def bad_token(self):
        """
        A bad token / relay line was received (e.g. couldn't be parsed)
        """

    @_machine.input()
    def got_partner(self, client):
        """
        The partner for this relay session has been found
        """

    @_machine.input()
    def connection_lost(self):
        """
        Our transport has failed.
        """

    @_machine.input()
    def partner_connection_lost(self):
        """
        Our partner's transport has failed.
        """

    @_machine.input()
    def got_bytes(self, data):
        """
        Some bytes have arrived (that aren't part of the handshake)
        """

    @_machine.output()
    def _remember_client(self, client):
        self._client = client

    # note that there is no corresponding "_forget_client" because we
    # may still want to access it after it is gone .. for example, to
    # get the .started_time for logging purposes

    @_machine.output()
    def _register_token(self, token):
        return self._real_register_token_for_side(token, None)

    @_machine.output()
    def _register_token_for_side(self, token, side):
        return self._real_register_token_for_side(token, side)

    @_machine.output()
    def _unregister(self):
        """
        remove us from the thing that remembers tokens and sides
        """
        return self._pending_requests.unregister(self._token, self._side, self)

    @_machine.output()
    def _send_bad(self):
        self._mood = "errory"
        self._client.send(b"bad handshake\n")
        if self._client.factory.log_requests:
            log.msg("transit handshake failure")

    @_machine.output()
    def _send_ok(self):
        self._client.send(b"ok\n")

    @_machine.output()
    def _send_impatient(self):
        self._client.send(b"impatient\n")
        if self._client.factory.log_requests:
            log.msg("transit impatience failure")

    @_machine.output()
    def _count_bytes(self, data):
        self._total_sent += len(data)

    @_machine.output()
    def _send_to_partner(self, data):
        self._buddy._client.send(data)

    @_machine.output()
    def _connect_partner(self, client):
        self._buddy = client
        self._client.connect_partner(client)

    @_machine.output()
    def _disconnect(self):
        self._client.disconnect()

    @_machine.output()
    def _disconnect_partner(self):
        self._client.disconnect_partner()

    # some outputs to record "usage" information ..
    @_machine.output()
    def _record_usage(self):
        if self._mood == "jilted":
            if self._buddy and self._buddy._mood == "happy":
                return
        self._usage.record(started=self._client.started_time,
                           buddy_started=self._buddy._client.started_time
                           if self._buddy is not None else None,
                           result=self._mood,
                           bytes_sent=self._total_sent,
                           buddy_bytes=self._buddy._total_sent
                           if self._buddy is not None else None)

    # some outputs to record the "mood" ..
    @_machine.output()
    def _mood_happy(self):
        self._mood = "happy"

    @_machine.output()
    def _mood_lonely(self):
        self._mood = "lonely"

    @_machine.output()
    def _mood_redundant(self):
        self._mood = "redundant"

    @_machine.output()
    def _mood_impatient(self):
        self._mood = "impatient"

    @_machine.output()
    def _mood_errory(self):
        self._mood = "errory"

    @_machine.output()
    def _mood_happy_if_first(self):
        """
        We disconnected first so we're only happy if we also connected
        first.
        """
        if self._first:
            self._mood = "happy"
        else:
            self._mood = "jilted"

    def _real_register_token_for_side(self, token, side):
        """
        A client has connected and sent a valid version 1 or version 2
        handshake. If the former, `side` will be None.

        In either case, we remember the tokens and register
        ourselves. This might result in 'got_partner' notifications to
        two state-machines if this is the second side for a given token.

        :param bytes token: the token
        :param bytes side: The side token (or None)
        """
        self._token = token
        self._side = side
        self._first = self._pending_requests.register(token, side, self)

    @_machine.state(initial=True)
    def listening(self):
        """
        Initial state, awaiting connection.
        """

    @_machine.state()
    def wait_relay(self):
        """
        Waiting for a 'relay' message
        """

    @_machine.state()
    def wait_partner(self):
        """
        Waiting for our partner to connect
        """

    @_machine.state()
    def relaying(self):
        """
        Relaying bytes to our partner
        """

    @_machine.state()
    def done(self):
        """
        Terminal state
        """

    listening.upon(
        connection_made,
        enter=wait_relay,
        outputs=[_remember_client],
    )
    listening.upon(
        connection_lost,
        enter=done,
        outputs=[_mood_errory],
    )

    wait_relay.upon(
        please_relay,
        enter=wait_partner,
        outputs=[_mood_lonely, _register_token],
    )
    wait_relay.upon(
        please_relay_for_side,
        enter=wait_partner,
        outputs=[_mood_lonely, _register_token_for_side],
    )
    wait_relay.upon(
        bad_token,
        enter=done,
        outputs=[_mood_errory, _send_bad, _disconnect, _record_usage],
    )
    wait_relay.upon(
        got_bytes,
        enter=done,
        outputs=[_count_bytes, _mood_errory, _disconnect, _record_usage],
    )
    wait_relay.upon(
        connection_lost,
        enter=done,
        outputs=[_disconnect, _record_usage],
    )

    wait_partner.upon(
        got_partner,
        enter=relaying,
        outputs=[_mood_happy, _send_ok, _connect_partner],
    )
    wait_partner.upon(
        connection_lost,
        enter=done,
        outputs=[_mood_lonely, _unregister, _record_usage],
    )
    wait_partner.upon(
        got_bytes,
        enter=done,
        outputs=[
            _mood_impatient, _send_impatient, _disconnect, _unregister,
            _record_usage
        ],
    )
    wait_partner.upon(
        partner_connection_lost,
        enter=done,
        outputs=[_mood_redundant, _disconnect, _record_usage],
    )

    relaying.upon(
        got_bytes,
        enter=relaying,
        outputs=[_count_bytes, _send_to_partner],
    )
    relaying.upon(
        connection_lost,
        enter=done,
        outputs=[
            _mood_happy_if_first, _disconnect_partner, _unregister,
            _record_usage
        ],
    )

    done.upon(
        connection_lost,
        enter=done,
        outputs=[],
    )
    done.upon(
        partner_connection_lost,
        enter=done,
        outputs=[],
    )
Beispiel #6
0
class ClientMachine(object):
    """
    I am client-side state machine that implements the "secret handshake",
    a cryptographic handshake protocol as described in the paper:
    Designing a Secret Handshake: Authenticated Key Exchange as a
    Capability System by Dominic Tarr
    """
    _machine = automat.MethodicalMachine()
    envelope_factory = attr.ib(
        validator=attr.validators.instance_of(SecretHandshakeEnvelopeFactory))
    notify_connected_handler = attr.ib(
        validator=attr.validators.instance_of(types.FunctionType))
    send_datagram_handler = attr.ib(
        validator=attr.validators.instance_of(types.FunctionType))
    receive_message_handler = attr.ib(
        validator=attr.validators.instance_of(types.FunctionType))
    disconnect_handler = attr.ib(
        validator=attr.validators.instance_of(types.FunctionType))

    # inputs

    @_machine.input()
    def start(self):
        "the machine connects"

    @_machine.input()
    def stop(self):
        "disconnet the machine"

    @_machine.input()
    def datagram_received(self, datagram):
        "the machine receives data"

    @_machine.input()
    def send(self, datagram):
        "send a datagram"

    # outputs

    @_machine.output()
    def _send_disconnect(self):
        close_command = {
            "type": "disconnect",
        }
        disconnect_envelope = self.envelope_factory.datagram_encrypt(
            cbor.dumps(close_command))
        self.send_datagram_handler(disconnect_envelope)
        self.disconnect_handler()

    @_machine.output()
    def _send_client_challenge(self):
        client_challenge = self.envelope_factory.create_client_challenge()
        self.send_datagram_handler(client_challenge)

    @_machine.output()
    def _verify_server_challenge(self, datagram):
        self.envelope_factory.is_server_challenge_verified(datagram)

        # send client auth envelope
        client_auth = self.envelope_factory.create_client_auth()
        self.send_datagram_handler(client_auth)

    @_machine.output()
    def _verify_server_accept(self, datagram):
        self.envelope_factory.verify_server_accept(datagram)
        self.notify_connected_handler()

    @_machine.output()
    def _send_datagram(self, datagram):
        """
        send datagram, first serialize, then encrypt and finally pass
        to our send datagram handler.
        """
        datagram_message = {"type": "datagram", "payload": datagram}
        datagram_envelope = self.envelope_factory.upstream_box.encrypt(
            cbor.dumps(datagram_message))
        self.send_datagram_handler(datagram_envelope)

    @_machine.output()
    def _receive_datagram(self, datagram):
        """
        post-handshake: decrypt received datagrams, deserialize and
        then forward datagram payload upstream if message type is
        "datagram", if type is "disconnect" then call our disconnect
        handler.
        """
        serialized_message = self.envelope_factory.upstream_box.decrypt(
            datagram)
        message = cbor.loads(serialized_message)
        if message["type"] == "datagram":
            self.receive_message_handler(message["payload"])
        if message["type"] == "disconnect":
            self.disconnect_handler()

    # states

    @_machine.state(initial=True)
    def unconnected(self):
        "connection not yet initiated"

    @_machine.state()
    def challenge_sent(self):
        "challenge envelope sent"

    @_machine.state()
    def client_auth_sent(self):
        "cleint auth envelope sent"

    @_machine.state()
    def connected(self):
        "accept envelope received"

    @_machine.state()
    def disconnected(self):
        "disconnected state"

    unconnected.upon(start,
                     enter=challenge_sent,
                     outputs=[_send_client_challenge])
    challenge_sent.upon(datagram_received,
                        enter=client_auth_sent,
                        outputs=[_verify_server_challenge])
    client_auth_sent.upon(datagram_received,
                          enter=connected,
                          outputs=[_verify_server_accept])
    connected.upon(datagram_received,
                   enter=connected,
                   outputs=[_receive_datagram])
    connected.upon(send, enter=connected, outputs=[_send_datagram])
    connected.upon(stop, enter=disconnected, outputs=[_send_disconnect])
class MagicFile(object):
    """
    A single file in a single magic-folder.

    The API methods here ultimately drive a state-machine implemented
    using the Automat library. This will automatically produce Dot
    diagrams describing the state-machine. To produce and see the
    diagram:

        automat-visualize magic_folder.magic_file
        feh -ZF .automat_visualize/magic_folder.magic_file.MagicFile._machine.dot.png

    When debugging (potential) issues with the state-machine, it is
    useful to turn on tracing (or see the Eliot logs which also logs
    the state-transitions). See _debug_state_machine above.

    If the outcome of some operate is to inject a new input into the
    state-machine, it should be done with self._call_later(...)
    This allows the state transition to "happen fully" (i.e. any
    following outputs run) before a new input is injected. For
    testing, _call_later() can be made synchronous.
    """

    _path = attr.ib()  # FilePath
    _relpath = attr.ib()  # text, relative-path inside our magic-folder
    _factory = attr.ib(validator=attr.validators.instance_of(MagicFileFactory))
    _action = attr.ib()
    _queue_local = attr.ib(default=attr.Factory(list))
    _queue_remote = attr.ib(default=attr.Factory(list))
    _is_working = attr.ib(default=None)

    # to facilitate testing, sometimes we _don't_ want to use the real
    # reactor to wait one turn, or to delay for retry purposes.
    _call_later = attr.ib(default=None)  # Callable to schedule work at least one reactor turn later
    _delay_later = attr.ib(default=None)  # Callable to return a Deferred that fires "later"

    _machine = automat.MethodicalMachine()

    # debug
    set_trace = _machine._setTrace

    def __attrs_post_init__(self):
        from twisted.internet import reactor

        if self._call_later is None:

            def next_turn(f, *args, **kwargs):
                return reactor.callLater(0, f, *args, **kwargs)
            self._call_later = next_turn

        if self._delay_later is None:

            def delay(seconds, f, *args, **kwargs):
                d = deferLater(reactor, seconds, f, *args, **kwargs)
                self._factory._delays.append(d)

                def remove(arg):
                    self._factory._delays.remove(d)
                    return arg
                d.addBoth(remove)
                return d
            self._delay_later = delay

    # these are API methods intended to be called by other code in
    # magic-folder

    def create_update(self):
        """
        Creates a new local change, reading the content from our (local)
        file (regardless of whether it has 'actually' changed or
        not). If the file doesn't exist (any more), a 'deletion'
        snapshot is created. The file contents are stashed and the
        state-database updated before the returned Deferred fires.

        Eventually, the snapshot will be uploaded. Even if our process
        re-starts, the LocalSnapshot will be recovered from local
        state and further attempts to upload will be made.

        :returns Deferred: fires with None when our local state is
            persisted. This is before the upload has occurred.
        """
        return self._local_update()

    def found_new_remote(self, remote_snapshot):
        """
        A RemoteSnapshot that doesn't match our existing database entry
        has been found. It will be downloaded and applied (possibly
        resulting in conflicts).

        :param RemoteSnapshot remote_snapshot: the newly-discovered remote
        """
        self._remote_update(remote_snapshot)
        return self.when_idle()

    def local_snapshot_exists(self, local_snapshot):
        """
        State describing a LocalSnapshot exists. This should be the
        'youngest' one if there are multiple snapshots for this file.

        :returns None:
        """
        d = self._existing_local_snapshot(local_snapshot)
        return d

    @inline_callbacks
    def when_idle(self):
        """
        Wait until we are in an 'idle' state (up_to_date or conflicted or
        failed).
        """
        if self._is_working is None:
            return
        yield self._is_working
        return

    # all below methods are state-machine methods
    #
    # use "automat-visualize magic_folder" to get a nice diagram of
    # the state-machine which should make it easier to read through
    # this code.

    @_machine.state(initial=True)
    def _up_to_date(self):
        """
        This file is up-to-date (our local state matches all remotes and
        our Personal DMD matches our local state.
        """

    @_machine.state()
    def _downloading(self):
        """
        We are retrieving a remote update
        """

    @_machine.state()
    def _download_checking_ancestor(self):
        """
        We've found a remote update; check its parentage
        """

    @_machine.state()
    def _download_checking_local(self):
        """
        We're about to make local changes; make sure a filesystem change
        didn't sneak in.
        """

    @_machine.state()
    def _creating_snapshot(self):
        """
        We are creating a LocalSnapshot for a given update
        """

    @_machine.state()
    def _uploading(self):
        """
        We are uploading a LocalSnapshot
        """

    @_machine.state()
    def _checking_for_local_work(self):
        """
        Examining our queue of work for uploads
        """

    @_machine.state()
    def _checking_for_remote_work(self):
        """
        Examining ou queue for work for downloads
        """

    @_machine.state()
    def _updating_personal_dmd_upload(self):
        """
        We are updating Tahoe state after an upload
        """

    @_machine.state()
    def _updating_personal_dmd_download(self):
        """
        We are updating Tahoe state after a download
        """

    @_machine.state()
    def _conflicted(self):
        """
        There is a conflit that must be resolved
        """

    @_machine.state()
    def _failed(self):
        """
        Something has gone completely wrong.
        """

    @_machine.input()
    def _local_update(self):
        """
        The file is changed locally (created or updated or deleted)
        """

    @_machine.input()
    def _queued_upload(self, snapshot):
        """
        We finished one upload, but there is more
        """

    @_machine.input()
    def _no_upload_work(self, snapshot):
        """
        We finished one upload and there is no more
        """

    @_machine.input()
    def _download_mismatch(self, snapshot, staged_path):
        """
        The local file does not match what we expect given database state
        """

    @_machine.input()
    def _download_matches(self, snapshot, staged_path, local_pathstate):
        """
        The local file (if any) matches what we expect given database
        state
        """

    @_machine.input()
    def _remote_update(self, snapshot):
        """
        The file has a remote update.

        XXX should this be 'snapshots' for multiple participant updates 'at once'?
        XXX does this include deletes?
        """

    @_machine.input()
    def _existing_local_snapshot(self, snapshot):
        """
        One or more LocalSnapshot instances already exist for this path.

        :param LocalSnapshot snapshot: the snapshot (which should be
            the 'youngest' if multiple linked snapshots exist).
        """

    @_machine.input()
    def _existing_conflict(self):
        """
        This path is already conflicted. Used when initializing a fresh
        file from the database.
        """

    @_machine.input()
    def _download_completed(self, snapshot, staged_path):
        """
        A remote Snapshot has been downloaded
        """

    @_machine.input()
    def _snapshot_completed(self, snapshot):
        """
        A LocalSnapshot for this update is created
        """

    @_machine.input()
    def _upload_completed(self, snapshot):
        """
        A LocalSnapshot has been turned into a RemoteSnapshot
        """

    @_machine.input()
    def _personal_dmd_updated(self, snapshot):
        """
        An update to our Personal DMD has been completed
        """

    @_machine.input()
    def _fatal_error_download(self, snapshot):
        """
        An error has occurred with no other recovery path.
        """

    @_machine.input()
    def _queued_download(self, snapshot):
        """
        There is queued RemoteSnapshot work
        """

    @_machine.input()
    def _no_download_work(self, snapshot):
        """
        There is no queued RemoteSnapshot work
        """

    @_machine.input()
    def _conflict_resolution(self, snapshot):
        """
        A conflicted file has been resolved
        """

    @_machine.input()
    def _ancestor_matches(self, snapshot, staged_path):
        """
        snapshot is our ancestor
        """

    @_machine.input()
    def _ancestor_mismatch(self, snapshot, staged_path):
        """
        snapshot is not our ancestor
        """

    @_machine.input()
    def _ancestor_we_are_newer(self, snapshot, staged_path):
        """
        The proposed update is _our_ ancestor.
        """

    @_machine.input()
    def _cancel(self, snapshot):
        """
        We have been cancelled
        """

    @_machine.output()
    def _begin_download(self, snapshot):
        """
        Download a given Snapshot (including its content)
        """

        def downloaded(staged_path):
            self._call_later(self._download_completed, snapshot, staged_path)

        retry_delay_sequence = _delay_sequence()

        def error(f):
            if f.check(CancelledError):
                self._factory._folder_status.error_occurred(
                    "Cancelled: {}".format(self._relpath)
                )
                self._call_later(self._cancel, snapshot)
                return

            self._factory._folder_status.error_occurred(
                "Failed to download snapshot for '{}'.".format(
                    self._relpath,
                )
            )
            with self._action.context():
                write_failure(f)
            delay_amt = next(retry_delay_sequence)
            delay = self._delay_later(delay_amt, perform_download)
            delay.addErrback(error)
            return None

        @inline_callbacks
        def perform_download():
            if snapshot.content_cap is None:
                d = succeed(None)
            else:
                d = self._factory._download_parallel.acquire()

                def work(ignore):
                    self._factory._folder_status.download_started(self._relpath)
                    return maybeDeferred(
                        self._factory._magic_fs.download_content_to_staging,
                        snapshot.relpath,
                        snapshot.content_cap,
                        self._factory._tahoe_client,
                    )
                d.addCallback(work)

                def clean(arg):
                    self._factory._download_parallel.release()
                    return arg
                d.addBoth(clean)

            d.addCallback(downloaded)
            d.addErrback(error)
            return d

        return perform_download()

    @_machine.output()
    def _check_local_update(self, snapshot, staged_path):
        """
        Detect a 'last minute' change by comparing the state of our local
        file to that of the database.

        In case of a brand new file we've never seen: the database
        will have no entry, and there is no local file.

        In case of an updated: the pathinfo of the file right now must
        match what's in the database.
        """
        try:
            current_pathstate = self._factory._config.get_currentsnapshot_pathstate(self._relpath)
        except KeyError:
            current_pathstate = None

        # we give this downstream to the mark-overwrite, ultimately,
        # so it can double-check that there was no last-millisecond
        # change to the local path (note local_pathinfo.state will be
        # None if there is no file at all here)
        local_pathinfo = get_pathinfo(self._path)

        # if we got a local-update during the "download" branch, we
        # will have queued it .. but it should be impossible for a
        # snapshot to be in our local database
        try:
            self._factory._config.get_local_snapshot(self._relpath)
            assert False, "unexpected local snapshot; state-machine inconsistency?"
        except KeyError:
            pass

        # now, determine if we've found a local update
        if current_pathstate is None:
            if local_pathinfo.exists:
                self._call_later(self._download_mismatch, snapshot, staged_path)
                return
        else:
            # we've seen this file before so its pathstate should
            # match what we expect according to the database .. or
            # else some update happened meantime.
            if current_pathstate != local_pathinfo.state:
                self._call_later(self._download_mismatch, snapshot, staged_path)
                return

        self._call_later(self._download_matches, snapshot, staged_path, local_pathinfo.state)

    @_machine.output()
    def _check_ancestor(self, snapshot, staged_path):
        """
        Check if the ancestor for this remote update is correct or not.
        """
        try:
            remote_cap = self._factory._config.get_remotesnapshot(snapshot.relpath)
        except KeyError:
            remote_cap = None

        # if remote_cap is None, we've never seen this before (so the
        # ancestor is always correct)
        if remote_cap is not None:
            ancestor = self._factory._remote_cache.is_ancestor_of(remote_cap, snapshot.capability)
            if not ancestor:
                # if the incoming remotesnapshot is actually an
                # ancestor of _our_ snapshot, then we have nothing to
                # do because we are newer
                if self._factory._remote_cache.is_ancestor_of(snapshot.capability, remote_cap):
                    self._call_later(self._ancestor_we_are_newer, snapshot, staged_path)
                    return
                Message.log(
                    message_type="ancestor_mismatch",
                )
                self._call_later(self._ancestor_mismatch, snapshot, staged_path)
                return
        self._call_later(self._ancestor_matches, snapshot, staged_path)
        return

    @_machine.output()
    def _perform_remote_update(self, snapshot, staged_path, local_pathstate):
        """
        Resolve a remote update locally

        :param PathState local_pathstate: the PathState of the local
            file as it existed _right_ before we concluded it was fine
            (None if there was no local file before now)
        """
        # between when we checked for a local conflict while in the
        # _download_checking_local and when we _actually_ overwrite
        # the file (inside .mark_overwrite) there is an additional
        # window for last-second changes to happen .. we do the
        # equivalent of the dance described in detail in
        # https://magic-folder.readthedocs.io/en/latest/proposed/magic-folder/remote-to-local-sync.html#earth-dragons-collisions-between-local-filesystem-operations-and-downloads
        # although that spec doesn't include when to remove the
        # ".backup" files -- we use local_pathstate to double-check
        # that.

        if snapshot.content_cap is None:
            self._factory._magic_fs.mark_delete(snapshot.relpath)
            path_state = None
        else:
            try:
                path_state = self._factory._magic_fs.mark_overwrite(
                    snapshot.relpath,
                    snapshot.metadata["modification_time"],
                    staged_path,
                    local_pathstate,
                )
            except OSError as e:
                self._factory._folder_status.error_occurred(
                    "Failed to overwrite file '{}': {}".format(snapshot.relpath, str(e))
                )
                with self._action.context():
                    write_traceback()
                self._call_later(self._fatal_error_download, snapshot)
                return
            except BackupRetainedError as e:
                # this means that the mark_overwrite() code has
                # noticed some mismatch to the replaced file or its
                # .snaptmp version -- so this is a conflict, but we
                # didn't detect it in the _download_check_local since
                # it happened in the window _after_ that check.
                self._factory._folder_status.error_occurred(
                    "Unexpected content in '{}': {}".format(snapshot.relpath, str(e))
                )
                with self._action.context():
                    write_traceback()
                # mark as a conflict -- we use the retained tmpfile as
                # the original "staged" path here, causing "our"
                # emergency data to be in the conflict file .. maybe
                # this should just be the original tmpfile and we
                # shouldn't mess with it further?
                self._call_later(self._download_mismatch, snapshot, e.path)
                return

        # Note, if we crash here (after moving the file into place but
        # before noting that in our database) then we could produce
        # LocalSnapshots referencing the wrong parent. We will no
        # longer produce snapshots with the wrong parent once we
        # re-run and get past this point.

        # remember the last remote we've downloaded
        self._factory._config.store_downloaded_snapshot(
            snapshot.relpath, snapshot, path_state
        )

        def updated_snapshot(arg):
            self._factory._config.store_currentsnapshot_state(
                snapshot.relpath,
                path_state,
            )
            self._call_later(self._personal_dmd_updated, snapshot)
            return

        retry_delay_sequence = _delay_sequence()

        # It probably makes sense to have a separate state for this
        # part ("update remote dmd"). If we crash here (e.g. Tahoe is
        # down, keep retrying, but subsequently crash) and then
        # restart, we just won't update the remote DMD. So "something"
        # should notice at startup that 'store_downloaded_snapshot'
        # has run but not this part (because the database has a
        # different entry than the remote DMD) and inject an event to
        # get us here.

        def error(f):
            # XXX really need to "more visibly" log things like syntax
            # errors etc...
            write_failure(f)
            if f.check(CancelledError):
                self._factory._folder_status.error_occurred(
                    "Cancelled: {}".format(self._relpath)
                )
                self._call_later(self._cancel, snapshot)
                return

            self._factory._folder_status.error_occurred(
                "Error updating personal DMD: {}".format(f.getErrorMessage())
            )
            with self._action.context():
                write_failure(f)
            delay_amt = next(retry_delay_sequence)
            delay = self._delay_later(delay_amt, perform_update)
            delay.addErrback(error)
            return None

        def perform_update():
            d = maybeDeferred(
                self._factory._write_participant.update_snapshot,
                snapshot.relpath,
                snapshot.capability,
            )
            d.addCallback(updated_snapshot)
            d.addErrback(error)
            return d

        d = perform_update()
        return d

    @_machine.output()
    def _status_upload_queued(self):
        self._factory._folder_status.upload_queued(self._relpath)

    # the uploader does this status because only it knows when the
    # item is out of the queue and "actually" starts uploading..
    # @_machine.output()
    # def _status_upload_started(self):
    #     self._factory._folder_status.upload_started(self._relpath)

    @_machine.output()
    def _status_upload_finished(self):
        self._factory._folder_status.upload_finished(self._relpath)

    @_machine.output()
    def _status_download_queued(self):
        self._factory._folder_status.download_queued(self._relpath)

    @_machine.output()
    def _status_download_finished(self):
        self._factory._folder_status.download_finished(self._relpath)

    @_machine.output()
    def _cancel_queued_work(self):
        for d in self._queue_local:
            d.cancel()
        for d in self._queue_remote:
            d.cancel()

    @_machine.output()
    def _create_local_snapshot(self):
        """
        Create a LocalSnapshot for this update
        """
        d = self._factory._local_snapshot_service.add_file(self._path)

        # when the local snapshot gets created, it _should_ have the
        # next thing in our queue (if any) as its parent (see assert below)

        def completed(snap):
            # _queue_local contains Deferreds .. but ideally we'd
            # check if "the thing those deferreds resolves to" is the
            # right one .. namely, the _next_ thing in the queue
            # should be (one of) "snap"'s parents
            self._call_later(self._snapshot_completed, snap)
            return snap

        d.addCallback(completed)
        # errback? (re-try?)  XXX probably have to have a 'failed'
        # state? or might re-trying work eventually? (I guess
        # .. maybe? Like if the disk is currently too full to copy but
        # it might eventually get less-full?)
        return d

    @_machine.output()
    def _begin_upload(self, snapshot):
        """
        Begin uploading a LocalSnapshot (to create a RemoteSnapshot)
        """
        assert snapshot is not None, "Internal inconsistency: no snapshot _begin_upload"
        with self._action.context():
            d = self._factory._uploader.upload_snapshot(snapshot)

            retry_delay_sequence = _delay_sequence()

            def upload_error(f, snap):
                write_failure(f)
                if f.check(CancelledError):
                    self._factory._folder_status.error_occurred(
                        "Cancelled: {}".format(self._relpath)
                    )
                    self._call_later(self._cancel, snapshot)
                    return
                if f.check(ResponseNeverReceived):
                    for reason in f.value.reasons:
                        if reason.check(CancelledError):
                            self._factory._folder_status.error_occurred(
                                "Cancelled: {}".format(self._relpath)
                            )
                            self._call_later(self._cancel, snapshot)
                            return

                # upon errors, we wait a little and then retry,
                # putting the item back in the uploader queue
                self._factory._folder_status.error_occurred(
                    "Error uploading {}: {}".format(self._relpath, f.getErrorMessage())
                )
                delay_amt = next(retry_delay_sequence)
                delay = self._delay_later(delay_amt, self._factory._uploader.upload_snapshot, snap)
                delay.addCallback(got_remote)
                delay.addErrback(upload_error, snap)
                return delay

            def got_remote(remote):
                # successfully uploaded
                snapshot.remote_snapshot = remote
                self._factory._remote_cache._cached_snapshots[remote.capability.danger_real_capability_string()] = remote
                self._call_later(self._upload_completed, snapshot)

            d.addCallback(got_remote)
            d.addErrback(upload_error, snapshot)
            return d

    @_machine.output()
    def _mark_download_conflict(self, snapshot, staged_path):
        """
        Mark a conflict for this remote snapshot
        """
        conflict_path = "{}.conflict-{}".format(
            self._relpath,
            snapshot.author.name
        )
        self._factory._magic_fs.mark_conflict(self._relpath, conflict_path, staged_path)
        self._factory._config.add_conflict(snapshot)

    @_machine.output()
    def _update_personal_dmd_upload(self, snapshot):
        """
        Update our personal DMD (after an upload)
        """

        retry_delay_sequence = _delay_sequence()

        def error(f):
            write_failure(f)
            if f.check(CancelledError):
                self._factory._folder_status.error_occurred(
                    "Cancelled: {}".format(self._relpath)
                )
                self._call_later(self._cancel, snapshot)
                return

            self._factory._folder_status.error_occurred(
                "Error updating personal DMD: {}".format(f.getErrorMessage())
            )
            with self._action.context():
                write_failure(f)
            delay_amt = next(retry_delay_sequence)
            delay = self._delay_later(delay_amt, update_personal_dmd)
            delay.addErrback(error)
            return None

        @inline_callbacks
        def update_personal_dmd():
            remote_snapshot = snapshot.remote_snapshot
            assert remote_snapshot is not None, "remote-snapshot must exist"
            # update the entry in the DMD
            yield self._factory._write_participant.update_snapshot(
                snapshot.relpath,
                remote_snapshot.capability,
            )

            # if removing the stashed content fails here, we MUST move
            # on to delete the LocalSnapshot because we may not be
            # able to re-create the Snapshot (e.g. maybe the stashed
            # content is "partially deleted" or otherwise unreadable)
            # and we _don't_ want to deserialize the LocalSnapshot if
            # the process restarts
            if snapshot.content_path is not None:
                try:
                    # Remove the local snapshot content from the stash area.
                    snapshot.content_path.remove()
                except Exception as e:
                    self._factory._folder_status.error_occurred(
                        "Failed to remove cache file '{}': {}".format(
                            snapshot.content_path.path,
                            str(e),
                        )
                    )

            # Remove the LocalSnapshot from the db.
            yield self._factory._config.delete_local_snapshot(snapshot, remote_snapshot)

            # Signal ourselves asynchronously so that the machine may
            # finish this output (and possibly more) before dealing
            # with this new input
            self._call_later(self._personal_dmd_updated, snapshot)
        d = update_personal_dmd()
        d.addErrback(error)

    @_machine.output()
    def _queue_local_update(self):
        """
        Save this update for later processing (in _check_for_local_work)
        """
        d = self._factory._local_snapshot_service.add_file(self._path)
        self._queue_local.append(d)

        # ideally, we'd double-check the semantics of this snapshot
        # when it is created: it should have as parents anything else
        # in the queue -- but of course, we can't check until _those_
        # snapshots are themselves created.

        # XXX what do we do if this snapshot fails / errback()s? go to
        # "failed" state?

        # return a "fresh" Deferred, because callers can't be trusted
        # not to mess with our return-value
        ret_d = Deferred()

        def failed(f):
            # this still works for CancelledError right?
            ret_d.errback(f)

        def got_snap(snap):
            ret_d.callback(snap)
            return snap
        d.addCallbacks(got_snap, failed)
        return ret_d

    @_machine.output()
    def _queue_remote_update(self, snapshot):
        """
        Save this remote snapshot for later processing (in _check_for_remote_work)
        """
        d = Deferred()
        self._queue_remote.append((d, snapshot))
        return d

    @_machine.output()
    def _check_for_local_work(self):
        """
        Inject any queued local updates
        """
        if self._queue_local:
            snapshot_d = self._queue_local.pop(0)

            def got_snapshot(snap):
                self._call_later(self._snapshot_completed, snap)
                return snap
            snapshot_d.addCallback(got_snapshot)
            return
        self._call_later(self._no_upload_work, None)

    @_machine.output()
    def _check_for_remote_work(self):
        """
        Inject any saved remote updates.
        """
        if self._queue_remote:
            d, snapshot = self._queue_remote.pop(0)

            def do_remote_update(done_d, snap):
                update_d = self._queued_download(snap)
                update_d.addBoth(done_d.callback)
            self._call_later(do_remote_update, d, snapshot)
            return
        self._call_later(self._no_download_work, None)

    @_machine.output()
    def _working(self):
        """
        We are doing some work so this file is not currently updated.
        """
        assert self._is_working is None, "Internal inconsistency"
        self._is_working = Deferred()

    @_machine.output()
    def _done_working(self):
        """
        Alert any listeners awaiting for this file to become idle (note
        that 'confliced' is also an idle state)
        """
        assert self._is_working is not None, "Internal inconsistency"
        d = self._is_working
        self._is_working = None
        d.callback(None)

    _up_to_date.upon(
        _remote_update,
        enter=_downloading,
        outputs=[_working, _status_download_queued, _begin_download],
        collector=_last_one,
    )

    _download_checking_ancestor.upon(
        _remote_update,
        enter=_download_checking_ancestor,
        outputs=[_status_download_queued, _queue_remote_update],
        collector=_last_one,
    )

    _download_checking_ancestor.upon(
        _ancestor_mismatch,
        enter=_conflicted,
        outputs=[_mark_download_conflict, _status_download_finished, _done_working],
        collector=_last_one,
    )
    _download_checking_ancestor.upon(
        _ancestor_matches,
        enter=_download_checking_local,
        outputs=[_check_local_update],
        collector=_last_one,
    )
    _download_checking_ancestor.upon(
        _ancestor_we_are_newer,
        enter=_up_to_date,
        outputs=[_status_download_finished, _done_working],
        collector=_last_one,
    )

    _downloading.upon(
        _download_completed,
        enter=_download_checking_ancestor,
        outputs=[_check_ancestor],
        collector=_last_one,
    )
    _downloading.upon(
        _remote_update,
        enter=_downloading,
        outputs=[_status_download_queued, _queue_remote_update],
        collector=_last_one,
    )
    _downloading.upon(
        _cancel,
        enter=_failed,
        outputs=[_cancel_queued_work, _status_download_finished, _done_working],
        collector=_last_one,
    )

    _download_checking_local.upon(
        _download_matches,
        enter=_updating_personal_dmd_download,
        outputs=[_perform_remote_update],
        collector=_last_one,
    )
    _download_checking_local.upon(
        _download_mismatch,
        enter=_conflicted,
        outputs=[_mark_download_conflict, _status_download_finished, _done_working],
        collector=_last_one,
    )

    _up_to_date.upon(
        _local_update,
        enter=_creating_snapshot,
        outputs=[_working, _status_upload_queued, _create_local_snapshot],
        collector=_last_one,
    )
    _up_to_date.upon(
        _existing_conflict,
        enter=_conflicted,
        outputs=[],  # up_to_date and conflicted are both "idle" states
    )
    _up_to_date.upon(
        _existing_local_snapshot,
        enter=_uploading,
        outputs=[_working, _status_upload_queued, _begin_upload],
        collector=_last_one,
    )
    _creating_snapshot.upon(
        _snapshot_completed,
        enter=_uploading,
        outputs=[_begin_upload],
        collector=_last_one,
    )

    # XXX actually .. we should maybe re-start this snapshot? This
    # means we've found a change _while_ we're trying to create the
    # snapshot .. what if it's like half-created?
    # (hmm, should be impossible if we wait for the snapshot in the
    # scanner...but window for the API to hit it still ..)
    _creating_snapshot.upon(
        _local_update,
        enter=_creating_snapshot,
        outputs=[_queue_local_update],
        collector=_last_one,
    )
    _uploading.upon(
        _upload_completed,
        enter=_updating_personal_dmd_upload,
        outputs=[_update_personal_dmd_upload],
        collector=_last_one,
    )
    _uploading.upon(
        _local_update,
        enter=_uploading,
        outputs=[_queue_local_update],
        collector=_last_one,
    )
    _uploading.upon(
        _cancel,
        enter=_failed,
        outputs=[_cancel_queued_work, _status_upload_finished, _done_working],
        collector=_last_one,
    )

    # there is async-work done by _update_personal_dmd_upload, after
    # which personal_dmd_updated is input back to the machine
    _updating_personal_dmd_upload.upon(
        _personal_dmd_updated,
        enter=_checking_for_local_work,
        outputs=[_status_upload_finished, _check_for_local_work],
        collector=_last_one,
    )
    _updating_personal_dmd_upload.upon(
        _cancel,
        enter=_failed,
        outputs=[_cancel_queued_work, _status_upload_finished, _done_working],
        collector=_last_one,
    )

    # downloader updates
    _updating_personal_dmd_download.upon(
        _personal_dmd_updated,
        enter=_checking_for_local_work,
        outputs=[_status_download_finished, _check_for_local_work],
        collector=_last_one,
    )
    _updating_personal_dmd_download.upon(
        _cancel,
        enter=_failed,
        outputs=[_cancel_queued_work, _status_download_finished, _done_working],
        collector=_last_one,
    )
    _updating_personal_dmd_download.upon(
        _fatal_error_download,
        enter=_failed,
        outputs=[_status_download_finished, _done_working],
        collector=_last_one,
    )
    # this is the "last-minute" conflict window -- that is, when
    # .mark_overwrite() determines something wrote to the tempfile (or
    # wrote to the "real" file immediately after the state-machine
    # check)
    _updating_personal_dmd_download.upon(
        _download_mismatch,
        enter=_conflicted,
        outputs=[_mark_download_conflict, _status_download_finished, _done_working],
        collector=_last_one,
    )

    _checking_for_local_work.upon(
        _snapshot_completed,
        enter=_uploading,
        outputs=[_status_upload_queued, _begin_upload],
        collector=_last_one,
    )
    _checking_for_local_work.upon(
        _no_upload_work,
        enter=_checking_for_remote_work,
        outputs=[_check_for_remote_work],
        collector=_last_one,
    )
    _checking_for_local_work.upon(
        _remote_update,
        enter=_checking_for_local_work,
        outputs=[_queue_remote_update],
        collector=_last_one,
    )

    _checking_for_remote_work.upon(
        _queued_download,
        enter=_downloading,
        outputs=[_begin_download],
        collector=_last_one,
    )
    _checking_for_remote_work.upon(
        _no_download_work,
        enter=_up_to_date,
        outputs=[_done_working],
        collector=_last_one,
    )
    _checking_for_remote_work.upon(
        _local_update,
        enter=_checking_for_remote_work,
        outputs=[_queue_local_update],
        collector=_last_one,
    )

    # if we get a remote-update while we're in
    # "updating_personal_dmd_upload" we will enter _conflicted, then
    # the DMD will be updated (good), and we get the
    # "personal_dmd_updated" notification ..
    _conflicted.upon(
        _personal_dmd_updated,
        enter=_conflicted,
        outputs=[_status_upload_finished],
        collector=_last_one,
    )

    # in these transitions we queue them (instead of going straight to
    # 'conflicted') so we download the content, which is required for
    # the subsequent conflict-file (which we'll hit because the
    # snapshot we're creating can't have the right parent yet)
    _creating_snapshot.upon(
        _remote_update,
        enter=_creating_snapshot,
        outputs=[_status_download_queued, _queue_remote_update],
        collector=_last_one,
    )
    _uploading.upon(
        _remote_update,
        enter=_uploading,
        outputs=[_status_download_queued, _queue_remote_update],
        collector=_last_one,
    )
    _downloading.upon(
        _local_update,
        enter=_downloading,
        outputs=[_queue_local_update],
        collector=_last_one,
    )
    _updating_personal_dmd_download.upon(
        _remote_update,
        enter=_updating_personal_dmd_download,
        outputs=[_status_download_queued, _queue_remote_update],
        collector=_last_one,
    )
    _updating_personal_dmd_download.upon(
        _local_update,
        enter=_updating_personal_dmd_download,
        outputs=[_queue_local_update],  # XXX or, can/should we go straight to conflicted?
        collector=_last_one,
    )

    _updating_personal_dmd_upload.upon(
        _remote_update,
        enter=_updating_personal_dmd_upload,
        outputs=[_status_download_queued, _queue_remote_update],
        collector=_last_one,
    )
    _updating_personal_dmd_upload.upon(
        _local_update,
        enter=_updating_personal_dmd_upload,
        outputs=[_queue_local_update],
        collector=_last_one,
    )

    _conflicted.upon(
        _conflict_resolution,
        enter=_uploading,
        outputs=[_begin_upload],
        collector=_last_one,
    )
    _conflicted.upon(
        _remote_update,
        enter=_conflicted,
        outputs=[],  # probably want to .. do something? remember it?
    )
    _conflicted.upon(
        _local_update,
        enter=_conflicted,
        outputs=[],  # nothing, likely: user messing with resolution file?
    )

    _failed.upon(
        _local_update,
        enter=_failed,
        outputs=[],  # should perhaps record (another) error?
    )
    _failed.upon(
        _remote_update,
        enter=_failed,
        outputs=[],  # should perhaps record (another) error?
    )