Esempio n. 1
0
 def test_multiple_ports(self):
     n = EmptyNode()
     n.basedir = self.mktemp()
     n.config_fname = os.path.join(n.basedir, "tahoe.cfg")
     os.mkdir(n.basedir)
     os.mkdir(os.path.join(n.basedir, "private"))
     port1 = iputil.allocate_tcp_port()
     port2 = iputil.allocate_tcp_port()
     port = ("tcp:%d:interface=127.0.0.1,tcp:%d:interface=127.0.0.1" %
             (port1, port2))
     location = "tcp:localhost:%d,tcp:localhost:%d" % (port1, port2)
     with open(n.config_fname, "w") as f:
         f.write(BASE_CONFIG)
         f.write("tub.port = %s\n" % port)
         f.write("tub.location = %s\n" % location)
     # we're doing a lot of calling-into-setup-methods here, it might be
     # better to just create a real Node instance, I'm not sure.
     n.config = read_config(n.basedir, "client.port")
     n.check_privacy()
     n.services = []
     n.create_i2p_provider()
     n.create_tor_provider()
     n.init_connections()
     n.set_tub_options()
     t = FakeTub()
     with mock.patch("allmydata.node.Tub", return_value=t):
         n.create_main_tub()
     self.assertEqual(t.listening_ports, [
         "tcp:%d:interface=127.0.0.1" % port1,
         "tcp:%d:interface=127.0.0.1" % port2
     ])
Esempio n. 2
0
 def test_multiple_ports(self):
     n = EmptyNode()
     n.basedir = self.mktemp()
     n.config_fname = os.path.join(n.basedir, "tahoe.cfg")
     os.mkdir(n.basedir)
     os.mkdir(os.path.join(n.basedir, "private"))
     port1 = iputil.allocate_tcp_port()
     port2 = iputil.allocate_tcp_port()
     port = ("tcp:%d:interface=127.0.0.1,tcp:%d:interface=127.0.0.1" %
             (port1, port2))
     location = "tcp:localhost:%d,tcp:localhost:%d" % (port1, port2)
     with open(n.config_fname, "w") as f:
         f.write(BASE_CONFIG)
         f.write("tub.port = %s\n" % port)
         f.write("tub.location = %s\n" % location)
     # we're doing a lot of calling-into-setup-methods here, it might be
     # better to just create a real Node instance, I'm not sure.
     n.read_config()
     n.check_privacy()
     n.services = []
     n.create_i2p_provider()
     n.create_tor_provider()
     n.init_connections()
     n.set_tub_options()
     t = FakeTub()
     with mock.patch("allmydata.node.Tub", return_value=t):
         n.create_main_tub()
     self.assertEqual(t.listening_ports,
                      ["tcp:%d:interface=127.0.0.1" % port1,
                       "tcp:%d:interface=127.0.0.1" % port2])
Esempio n. 3
0
    def test_multiple_ports(self):
        basedir = self.mktemp()
        create_node_dir(basedir, "testing")
        port1 = iputil.allocate_tcp_port()
        port2 = iputil.allocate_tcp_port()
        port = ("tcp:%d:interface=127.0.0.1,tcp:%d:interface=127.0.0.1" %
                (port1, port2))
        location = "tcp:localhost:%d,tcp:localhost:%d" % (port1, port2)
        with open(os.path.join(basedir, "tahoe.cfg"), "w") as f:
            f.write(BASE_CONFIG)
            f.write("[node]\n")
            f.write("tub.port = %s\n" % port)
            f.write("tub.location = %s\n" % location)

        config = client.read_config(basedir, "client.port")
        i2p_provider = mock.Mock()
        tor_provider = mock.Mock()
        dfh, fch = create_connection_handlers(None, config, i2p_provider, tor_provider)
        tub_options = create_tub_options(config)
        t = FakeTub()

        with mock.patch("allmydata.node.Tub", return_value=t):
            create_main_tub(config, tub_options, dfh, fch, i2p_provider, tor_provider)
        self.assertEqual(t.listening_ports,
                         ["tcp:%d:interface=127.0.0.1" % port1,
                          "tcp:%d:interface=127.0.0.1" % port2])
Esempio n. 4
0
    def test_multiple_ports(self):
        basedir = self.mktemp()
        create_node_dir(basedir, "testing")
        port1 = iputil.allocate_tcp_port()
        port2 = iputil.allocate_tcp_port()
        port = ("tcp:%d:interface=127.0.0.1,tcp:%d:interface=127.0.0.1" %
                (port1, port2))
        location = "tcp:localhost:%d,tcp:localhost:%d" % (port1, port2)
        with open(os.path.join(basedir, "tahoe.cfg"), "w") as f:
            f.write(BASE_CONFIG)
            f.write("tub.port = %s\n" % port)
            f.write("tub.location = %s\n" % location)

        config = client.read_config(basedir, "client.port")
        i2p_provider = mock.Mock()
        tor_provider = mock.Mock()
        dfh, fch = create_connection_handlers(None, config, i2p_provider, tor_provider)
        tub_options = create_tub_options(config)
        t = FakeTub()

        with mock.patch("allmydata.node.Tub", return_value=t):
            create_main_tub(config, tub_options, dfh, fch, i2p_provider, tor_provider)
        self.assertEqual(t.listening_ports,
                         ["tcp:%d:interface=127.0.0.1" % port1,
                          "tcp:%d:interface=127.0.0.1" % port2])
Esempio n. 5
0
def create_stats_gatherer(config):
    err = config.stderr
    basedir = config['basedir']
    # This should always be called with an absolute Unicode basedir.
    precondition(isinstance(basedir, unicode), basedir)

    if os.path.exists(basedir):
        if listdir_unicode(basedir):
            print("The base directory %s is not empty." %
                  quote_output(basedir),
                  file=err)
            print("To avoid clobbering anything, I am going to quit now.",
                  file=err)
            print("Please use a different directory, or empty this one.",
                  file=err)
            return -1
        # we're willing to use an empty directory
    else:
        os.mkdir(basedir)
    write_tac(basedir, "stats-gatherer")
    if config["hostname"]:
        portnum = iputil.allocate_tcp_port()
        location = "tcp:%s:%d" % (config["hostname"], portnum)
        port = "tcp:%d" % portnum
    else:
        location = config["location"]
        port = config["port"]
    fileutil.write(os.path.join(basedir, "location"), location + "\n")
    fileutil.write(os.path.join(basedir, "port"), port + "\n")
    return 0
Esempio n. 6
0
def create_stats_gatherer(config):
    err = config.stderr
    basedir = config['basedir']
    # This should always be called with an absolute Unicode basedir.
    precondition(isinstance(basedir, unicode), basedir)

    if os.path.exists(basedir):
        if listdir_unicode(basedir):
            print("The base directory %s is not empty." % quote_output(basedir), file=err)
            print("To avoid clobbering anything, I am going to quit now.", file=err)
            print("Please use a different directory, or empty this one.", file=err)
            return -1
        # we're willing to use an empty directory
    else:
        os.mkdir(basedir)
    write_tac(basedir, "stats-gatherer")
    if config["hostname"]:
        portnum = iputil.allocate_tcp_port()
        location = "tcp:%s:%d" % (config["hostname"], portnum)
        port = "tcp:%d" % portnum
    else:
        location = config["location"]
        port = config["port"]
    fileutil.write(os.path.join(basedir, "location"), location+"\n")
    fileutil.write(os.path.join(basedir, "port"), port+"\n")
    return 0
Esempio n. 7
0
def write_node_config(c, config):
    # this is shared between clients and introducers
    c.write("# -*- mode: conf; coding: utf-8 -*-\n")
    c.write("\n")
    c.write("# This file controls the configuration of the Tahoe node that\n")
    c.write("# lives in this directory. It is only read at node startup.\n")
    c.write("# For details about the keys that can be set here, please\n")
    c.write("# read the 'docs/configuration.rst' file that came with your\n")
    c.write("# Tahoe installation.\n")
    c.write("\n\n")

    if config["hide-ip"]:
        c.write("[connections]\n")
        c.write("tcp = tor\n")

    c.write("[node]\n")
    nickname = argv_to_unicode(config.get("nickname") or "")
    c.write("nickname = %s\n" % (nickname.encode("utf-8"),))
    if config["hide-ip"]:
        c.write("reveal-IP-address = false\n")
    else:
        c.write("reveal-IP-address = true\n")

    # TODO: validate webport
    webport = argv_to_unicode(config.get("webport") or "none")
    if webport.lower() == "none":
        webport = ""
    c.write("web.port = %s\n" % (webport.encode("utf-8"),))
    c.write("web.static = public_html\n")

    listeners = config["listen"].split(",")
    if listeners == ["none"]:
        c.write("tub.port = disabled\n")
        c.write("tub.location = disabled\n")
    else:
        if "tor" in listeners:
            raise NotImplementedError("--listen=tor is under development, " "see ticket #2490 for details")
        if "i2p" in listeners:
            raise NotImplementedError("--listen=i2p is under development, " "see ticket #2490 for details")
        if "tcp" in listeners:
            if config["port"]:  # --port/--location are a pair
                c.write("tub.port = %s\n" % config["port"].encode("utf-8"))
                c.write("tub.location = %s\n" % config["location"].encode("utf-8"))
            else:
                assert "hostname" in config
                hostname = config["hostname"]
                new_port = iputil.allocate_tcp_port()
                c.write("tub.port = tcp:%s\n" % new_port)
                c.write("tub.location = tcp:%s:%s\n" % (hostname.encode("utf-8"), new_port))

    c.write("#log_gatherer.furl =\n")
    c.write("#timeout.keepalive =\n")
    c.write("#timeout.disconnect =\n")
    c.write("#ssh.port = 8022\n")
    c.write("#ssh.authorized_keys_file = ~/.ssh/authorized_keys\n")
    c.write("\n")
Esempio n. 8
0
 def create_control_tub(self):
     # the control port uses a localhost-only ephemeral Tub, with no
     # control over the listening port or location
     self.control_tub = Tub()
     portnum = iputil.allocate_tcp_port()
     port = "tcp:%d:interface=127.0.0.1" % portnum
     location = "tcp:127.0.0.1:%d" % portnum
     self.control_tub.listenOn(port)
     self.control_tub.setLocation(location)
     self.log("Control Tub location set to %s" % (location, ))
     self.control_tub.setServiceParent(self)
Esempio n. 9
0
 def create_control_tub(self):
     # the control port uses a localhost-only ephemeral Tub, with no
     # control over the listening port or location
     self.control_tub = Tub()
     portnum = iputil.allocate_tcp_port()
     port = "tcp:%d:interface=127.0.0.1" % portnum
     location = "tcp:127.0.0.1:%d" % portnum
     self.control_tub.listenOn(port)
     self.control_tub.setLocation(location)
     self.log("Control Tub location set to %s" % (location,))
     self.control_tub.setServiceParent(self)
Esempio n. 10
0
 def create_tub(self, portnum=None):
     tubfile = os.path.join(self.basedir, "tub.pem")
     self.central_tub = tub = Tub(certFile=tubfile)
     #tub.setOption("logLocalFailures", True)
     #tub.setOption("logRemoteFailures", True)
     tub.setOption("expose-remote-exception-types", False)
     tub.setServiceParent(self.parent)
     if portnum is None:
         portnum = iputil.allocate_tcp_port()
     tub.listenOn("tcp:%d" % portnum)
     self.central_portnum = portnum
     tub.setLocation("localhost:%d" % self.central_portnum)
Esempio n. 11
0
 def create_tub(self, portnum=None):
     tubfile = os.path.join(self.basedir, "tub.pem")
     self.central_tub = tub = Tub(certFile=tubfile)
     #tub.setOption("logLocalFailures", True)
     #tub.setOption("logRemoteFailures", True)
     tub.setOption("expose-remote-exception-types", False)
     tub.setServiceParent(self.parent)
     if portnum is None:
         portnum = iputil.allocate_tcp_port()
     tub.listenOn("tcp:%d" % portnum)
     self.central_portnum = portnum
     tub.setLocation("localhost:%d" % self.central_portnum)
Esempio n. 12
0
 def get_tub_port(self):
     # return a descriptor string
     cfg_tubport = self.get_config("node", "tub.port", "")
     if cfg_tubport:
         return self._convert_tub_port(cfg_tubport)
     # For 'tub.port', tahoe.cfg overrides the individual file on disk. So
     # only read self._portnumfile if tahoe.cfg doesn't provide a value.
     if os.path.exists(self._portnumfile):
         file_tubport = fileutil.read(self._portnumfile).strip()
         return self._convert_tub_port(file_tubport)
     tubport = "tcp:%d" % iputil.allocate_tcp_port()
     fileutil.write_atomically(self._portnumfile, tubport + "\n", mode="")
     return tubport
Esempio n. 13
0
 def get_tub_port(self):
     # return a descriptor string
     cfg_tubport = self.get_config("node", "tub.port", "")
     if cfg_tubport:
         return self._convert_tub_port(cfg_tubport)
     # For 'tub.port', tahoe.cfg overrides the individual file on disk. So
     # only read self._portnumfile if tahoe.cfg doesn't provide a value.
     if os.path.exists(self._portnumfile):
         file_tubport = fileutil.read(self._portnumfile).strip()
         return self._convert_tub_port(file_tubport)
     tubport = "tcp:%d" % iputil.allocate_tcp_port()
     fileutil.write_atomically(self._portnumfile, tubport + "\n", mode="")
     return tubport
Esempio n. 14
0
 def create_log_tub(self):
     # The logport uses a localhost-only ephemeral Tub, with no control
     # over the listening port or location. This might change if we
     # discover a compelling reason for it in the future (e.g. being able
     # to use "flogtool tail" against a remote server), but for now I
     # think we can live without it.
     self.log_tub = Tub()
     portnum = iputil.allocate_tcp_port()
     port = "tcp:%d:interface=127.0.0.1" % portnum
     location = "tcp:127.0.0.1:%d" % portnum
     self.log_tub.listenOn(port)
     self.log_tub.setLocation(location)
     self.log("Log Tub location set to %s" % (location,))
     self.log_tub.setServiceParent(self)
Esempio n. 15
0
def create_control_tub():
    """
    Creates a Foolscap Tub for use by the control port. This is a
    localhost-only ephemeral Tub, with no control over the listening
    port or location
    """
    control_tub = Tub()
    portnum = iputil.allocate_tcp_port()
    port = "tcp:%d:interface=127.0.0.1" % portnum
    location = "tcp:127.0.0.1:%d" % portnum
    control_tub.listenOn(port)
    control_tub.setLocation(location)
    log.msg("Control Tub location set to %s" % (location,))
    return control_tub
Esempio n. 16
0
 def create_log_tub(self):
     # The logport uses a localhost-only ephemeral Tub, with no control
     # over the listening port or location. This might change if we
     # discover a compelling reason for it in the future (e.g. being able
     # to use "flogtool tail" against a remote server), but for now I
     # think we can live without it.
     self.log_tub = Tub()
     portnum = iputil.allocate_tcp_port()
     port = "tcp:%d:interface=127.0.0.1" % portnum
     location = "tcp:127.0.0.1:%d" % portnum
     self.log_tub.listenOn(port)
     self.log_tub.setLocation(location)
     self.log("Log Tub location set to %s" % (location, ))
     self.log_tub.setServiceParent(self)
Esempio n. 17
0
    def assign(self, reactor):
        """
        Make a new streaming server endpoint and return its string description.

        This is intended to help write config files that will then be read and
        used in this process.

        :param reactor: The reactor which will be used to listen with the
            resulting endpoint.  If it provides ``IReactorSocket`` then
            resulting reliability will be extremely high.  If it doesn't,
            resulting reliability will be pretty alright.

        :return: A two-tuple of (location hint, port endpoint description) as
            strings.
        """
        if IReactorSocket.providedBy(reactor):
            # On this platform, we can reliable pre-allocate a listening port.
            # Once it is bound we know it will not fail later with EADDRINUSE.
            s = socket(AF_INET, SOCK_STREAM)
            # We need to keep ``s`` alive as long as the file descriptor we put in
            # this string might still be used.  We could dup() the descriptor
            # instead but then we've only inverted the cleanup problem: gone from
            # don't-close-too-soon to close-just-late-enough.  So we'll leave
            # ``s`` alive and use it as the cleanup mechanism.
            self._cleanups.append(s.close)
            s.setblocking(False)
            really_bind(s, ("127.0.0.1", 0))
            s.listen(SOMAXCONN)
            host, port = s.getsockname()
            location_hint = "tcp:%s:%d" % (host, port)
            port_endpoint = "adopt-socket:fd=%d" % (s.fileno(), )
            # Make sure `adopt-socket` is recognized.  We do this instead of
            # providing a dropin because we don't want to make this endpoint
            # available to random other applications.
            self._patch_plugins()
        else:
            # On other platforms, we blindly guess and hope we get lucky.
            portnum = iputil.allocate_tcp_port()
            location_hint = "tcp:127.0.0.1:%d" % (portnum, )
            port_endpoint = "tcp:%d:interface=127.0.0.1" % (portnum, )

        return location_hint, port_endpoint
Esempio n. 18
0
    def test_failure(self):
        self.basedir = "introducer/NonV1Server/failure"
        os.makedirs(self.basedir)
        self.create_tub()
        i = TooNewServer()
        i.setServiceParent(self.parent)
        self.introducer_furl = self.central_tub.registerReference(i)

        tub = Tub()
        tub.setOption("expose-remote-exception-types", False)
        tub.setServiceParent(self.parent)
        portnum = iputil.allocate_tcp_port()
        tub.listenOn("tcp:%d" % portnum)
        tub.setLocation("localhost:%d" % portnum)

        c = IntroducerClient(tub, self.introducer_furl, u"nickname-client",
                             "version", "oldest", {}, fakeseq,
                             FilePath(self.mktemp()))
        announcements = {}

        def got(key_s, ann):
            announcements[key_s] = ann

        c.subscribe_to("storage", got)

        c.setServiceParent(self.parent)

        # now we wait for it to connect and notice the bad version

        def _got_bad():
            return bool(c._introducer_error) or bool(c._publisher)

        d = self.poll(_got_bad)

        def _done(res):
            self.failUnless(c._introducer_error)
            self.failUnless(
                c._introducer_error.check(InsufficientVersionError),
                c._introducer_error)

        d.addCallback(_done)
        return d
Esempio n. 19
0
    def assign(self, reactor):
        """
        Make a new streaming server endpoint and return its string description.

        This is intended to help write config files that will then be read and
        used in this process.

        :param reactor: The reactor which will be used to listen with the
            resulting endpoint.  If it provides ``IReactorSocket`` then
            resulting reliability will be extremely high.  If it doesn't,
            resulting reliability will be pretty alright.

        :return: A two-tuple of (location hint, port endpoint description) as
            strings.
        """
        if IReactorSocket.providedBy(reactor):
            # On this platform, we can reliable pre-allocate a listening port.
            # Once it is bound we know it will not fail later with EADDRINUSE.
            s = socket(AF_INET, SOCK_STREAM)
            # We need to keep ``s`` alive as long as the file descriptor we put in
            # this string might still be used.  We could dup() the descriptor
            # instead but then we've only inverted the cleanup problem: gone from
            # don't-close-too-soon to close-just-late-enough.  So we'll leave
            # ``s`` alive and use it as the cleanup mechanism.
            self._cleanups.append(s.close)
            s.setblocking(False)
            really_bind(s, ("127.0.0.1", 0))
            s.listen(SOMAXCONN)
            host, port = s.getsockname()
            location_hint = "tcp:%s:%d" % (host, port)
            port_endpoint = "adopt-socket:fd=%d" % (s.fileno(),)
            # Make sure `adopt-socket` is recognized.  We do this instead of
            # providing a dropin because we don't want to make this endpoint
            # available to random other applications.
            self._patch_plugins()
        else:
            # On other platforms, we blindly guess and hope we get lucky.
            portnum = iputil.allocate_tcp_port()
            location_hint = "tcp:127.0.0.1:%d" % (portnum,)
            port_endpoint = "tcp:%d:interface=127.0.0.1" % (portnum,)

        return location_hint, port_endpoint
Esempio n. 20
0
def foolscapEndpointForPortNumber(portnum):
    """
    Create an endpoint that can be passed to ``Tub.listen``.

    :param portnum: Either an integer port number indicating which TCP/IPv4
        port number the endpoint should bind or ``None`` to automatically
        allocate such a port number.

    :return: A two-tuple of the integer port number allocated and a
        Foolscap-compatible endpoint object.
    """
    if portnum is None:
        # Bury this reactor import here to minimize the chances of it having
        # the effect of installing the default reactor.
        from twisted.internet import reactor
        if fcntl is not None and IReactorSocket.providedBy(reactor):
            # On POSIX we can take this very safe approach of binding the
            # actual socket to an address.  Once the bind succeeds here, we're
            # no longer subject to any future EADDRINUSE problems.
            s = socket()
            try:
                s.bind(('', 0))
                portnum = s.getsockname()[1]
                s.listen(1)
                fd = os.dup(s.fileno())
                flags = fcntl.fcntl(fd, fcntl.F_GETFD)
                flags = flags | os.O_NONBLOCK | fcntl.FD_CLOEXEC
                fcntl.fcntl(fd, fcntl.F_SETFD, flags)
                return (
                    portnum,
                    AdoptedStreamServerEndpoint(reactor, fd, AF_INET),
                )
            finally:
                s.close()
        else:
            # Get a random port number and fall through.  This is necessary on
            # Windows where Twisted doesn't offer IReactorSocket.  This
            # approach is error prone for the reasons described on
            # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2787
            portnum = iputil.allocate_tcp_port()
    return (portnum, "tcp:%d" % (portnum, ))
Esempio n. 21
0
    def test_failure(self):
        self.basedir = "introducer/NonV1Server/failure"
        os.makedirs(self.basedir)
        self.create_tub()
        i = TooNewServer()
        i.setServiceParent(self.parent)
        self.introducer_furl = self.central_tub.registerReference(i)

        tub = Tub()
        tub.setOption("expose-remote-exception-types", False)
        tub.setServiceParent(self.parent)
        portnum = iputil.allocate_tcp_port()
        tub.listenOn("tcp:%d" % portnum)
        tub.setLocation("localhost:%d" % portnum)

        c = IntroducerClient(tub, self.introducer_furl,
                             u"nickname-client", "version", "oldest", {},
                             fakeseq, FilePath(self.mktemp()))
        announcements = {}
        def got(key_s, ann):
            announcements[key_s] = ann
        c.subscribe_to("storage", got)

        c.setServiceParent(self.parent)

        # now we wait for it to connect and notice the bad version

        def _got_bad():
            return bool(c._introducer_error) or bool(c._publisher)
        d = self.poll(_got_bad)
        def _done(res):
            self.failUnless(c._introducer_error)
            self.failUnless(c._introducer_error.check(InsufficientVersionError),
                            c._introducer_error)
        d.addCallback(_done)
        return d
Esempio n. 22
0
    def do_system_test(self):
        self.create_tub()
        introducer = IntroducerService()
        introducer.setServiceParent(self.parent)
        iff = os.path.join(self.basedir, "introducer.furl")
        tub = self.central_tub
        ifurl = self.central_tub.registerReference(introducer, furlFile=iff)
        self.introducer_furl = ifurl

        # we have 5 clients who publish themselves as storage servers, and a
        # sixth which does which not. All 6 clients subscriber to hear about
        # storage. When the connections are fully established, all six nodes
        # should have 5 connections each.
        NUM_STORAGE = 5
        NUM_CLIENTS = 6

        clients = []
        tubs = {}
        received_announcements = {}
        subscribing_clients = []
        publishing_clients = []
        printable_serverids = {}
        self.the_introducer = introducer
        privkeys = {}
        pubkeys = {}
        expected_announcements = [0 for c in range(NUM_CLIENTS)]

        for i in range(NUM_CLIENTS):
            tub = Tub()
            #tub.setOption("logLocalFailures", True)
            #tub.setOption("logRemoteFailures", True)
            tub.setOption("expose-remote-exception-types", False)
            tub.setServiceParent(self.parent)
            portnum = iputil.allocate_tcp_port()
            tub.listenOn("tcp:%d" % portnum)
            tub.setLocation("localhost:%d" % portnum)

            log.msg("creating client %d: %s" % (i, tub.getShortTubID()))
            c = IntroducerClient(tub, self.introducer_furl,
                                 NICKNAME % str(i),
                                 "version", "oldest",
                                 {"component": "component-v1"}, fakeseq,
                                 FilePath(self.mktemp()))
            received_announcements[c] = {}
            def got(key_s_or_tubid, ann, announcements):
                index = key_s_or_tubid or get_tubid_string_from_ann(ann)
                announcements[index] = ann
            c.subscribe_to("storage", got, received_announcements[c])
            subscribing_clients.append(c)
            expected_announcements[i] += 1 # all expect a 'storage' announcement

            node_furl = tub.registerReference(Referenceable())
            privkey_s, pubkey_s = keyutil.make_keypair()
            privkey, _ignored = keyutil.parse_privkey(privkey_s)
            privkeys[i] = privkey
            pubkeys[i] = pubkey_s

            if i < NUM_STORAGE:
                # sign all announcements
                c.publish("storage", make_ann(node_furl), privkey)
                assert pubkey_s.startswith("pub-")
                printable_serverids[i] = pubkey_s[len("pub-"):]
                publishing_clients.append(c)
            else:
                # the last one does not publish anything
                pass

            if i == 2:
                # also publish something that nobody cares about
                boring_furl = tub.registerReference(Referenceable())
                c.publish("boring", make_ann(boring_furl), privkey)

            c.setServiceParent(self.parent)
            clients.append(c)
            tubs[c] = tub


        def _wait_for_connected(ign):
            def _connected():
                for c in clients:
                    if not c.connected_to_introducer():
                        return False
                return True
            return self.poll(_connected)

        # we watch the clients to determine when the system has settled down.
        # Then we can look inside the server to assert things about its
        # state.

        def _wait_for_expected_announcements(ign):
            def _got_expected_announcements():
                for i,c in enumerate(subscribing_clients):
                    if len(received_announcements[c]) < expected_announcements[i]:
                        return False
                return True
            return self.poll(_got_expected_announcements)

        # before shutting down any Tub, we'd like to know that there are no
        # messages outstanding

        def _wait_until_idle(ign):
            def _idle():
                for c in subscribing_clients + publishing_clients:
                    if c._debug_outstanding:
                        return False
                if self.the_introducer._debug_outstanding:
                    return False
                return True
            return self.poll(_idle)

        d = defer.succeed(None)
        d.addCallback(_wait_for_connected)
        d.addCallback(_wait_for_expected_announcements)
        d.addCallback(_wait_until_idle)

        def _check1(res):
            log.msg("doing _check1")
            dc = self.the_introducer._debug_counts
            # each storage server publishes a record. There is also one
            # "boring"
            self.failUnlessEqual(dc["inbound_message"], NUM_STORAGE+1)
            self.failUnlessEqual(dc["inbound_duplicate"], 0)
            self.failUnlessEqual(dc["inbound_update"], 0)
            self.failUnlessEqual(dc["inbound_subscribe"], NUM_CLIENTS)
            # the number of outbound messages is tricky.. I think it depends
            # upon a race between the publish and the subscribe messages.
            self.failUnless(dc["outbound_message"] > 0)
            # each client subscribes to "storage", and each server publishes
            self.failUnlessEqual(dc["outbound_announcements"],
                                 NUM_STORAGE*NUM_CLIENTS)

            for c in subscribing_clients:
                cdc = c._debug_counts
                self.failUnless(cdc["inbound_message"])
                self.failUnlessEqual(cdc["inbound_announcement"],
                                     NUM_STORAGE)
                self.failUnlessEqual(cdc["wrong_service"], 0)
                self.failUnlessEqual(cdc["duplicate_announcement"], 0)
                self.failUnlessEqual(cdc["update"], 0)
                self.failUnlessEqual(cdc["new_announcement"],
                                     NUM_STORAGE)
                anns = received_announcements[c]
                self.failUnlessEqual(len(anns), NUM_STORAGE)

                serverid0 = printable_serverids[0]
                ann = anns[serverid0]
                nick = ann["nickname"]
                self.failUnlessEqual(type(nick), unicode)
                self.failUnlessEqual(nick, NICKNAME % "0")
            for c in publishing_clients:
                cdc = c._debug_counts
                expected = 1
                if c in [clients[2], # boring
                         ]:
                    expected = 2
                self.failUnlessEqual(cdc["outbound_message"], expected)
            # now check the web status, make sure it renders without error
            ir = introweb.IntroducerRoot(self.parent)
            self.parent.nodeid = "NODEID"
            text = ir.renderSynchronously().decode("utf-8")
            self.failUnlessIn(NICKNAME % "0", text) # a v2 client
            self.failUnlessIn(NICKNAME % "1", text) # another v2 client
            for i in range(NUM_STORAGE):
                self.failUnlessIn(printable_serverids[i], text,
                                  (i,printable_serverids[i],text))
                # make sure there isn't a double-base32ed string too
                self.failIfIn(idlib.nodeid_b2a(printable_serverids[i]), text,
                              (i,printable_serverids[i],text))
            log.msg("_check1 done")
        d.addCallback(_check1)

        # force an introducer reconnect, by shutting down the Tub it's using
        # and starting a new Tub (with the old introducer). Everybody should
        # reconnect and republish, but the introducer should ignore the
        # republishes as duplicates. However, because the server doesn't know
        # what each client does and does not know, it will send them a copy
        # of the current announcement table anyway.

        d.addCallback(lambda _ign: log.msg("shutting down introducer's Tub"))
        d.addCallback(lambda _ign: self.central_tub.disownServiceParent())

        def _wait_for_introducer_loss(ign):
            def _introducer_lost():
                for c in clients:
                    if c.connected_to_introducer():
                        return False
                return True
            return self.poll(_introducer_lost)
        d.addCallback(_wait_for_introducer_loss)

        def _restart_introducer_tub(_ign):
            log.msg("restarting introducer's Tub")
            # reset counters
            for i in range(NUM_CLIENTS):
                c = subscribing_clients[i]
                for k in c._debug_counts:
                    c._debug_counts[k] = 0
            for k in self.the_introducer._debug_counts:
                self.the_introducer._debug_counts[k] = 0
            expected_announcements[i] += 1 # new 'storage' for everyone
            self.create_tub(self.central_portnum)
            newfurl = self.central_tub.registerReference(self.the_introducer,
                                                         furlFile=iff)
            assert newfurl == self.introducer_furl
        d.addCallback(_restart_introducer_tub)

        d.addCallback(_wait_for_connected)
        d.addCallback(_wait_for_expected_announcements)
        d.addCallback(_wait_until_idle)
        d.addCallback(lambda _ign: log.msg(" reconnected"))

        # TODO: publish something while the introducer is offline, then
        # confirm it gets delivered when the connection is reestablished
        def _check2(res):
            log.msg("doing _check2")
            # assert that the introducer sent out new messages, one per
            # subscriber
            dc = self.the_introducer._debug_counts
            self.failUnlessEqual(dc["outbound_announcements"],
                                 NUM_STORAGE*NUM_CLIENTS)
            self.failUnless(dc["outbound_message"] > 0)
            self.failUnlessEqual(dc["inbound_subscribe"], NUM_CLIENTS)
            for c in subscribing_clients:
                cdc = c._debug_counts
                self.failUnlessEqual(cdc["inbound_message"], 1)
                self.failUnlessEqual(cdc["inbound_announcement"], NUM_STORAGE)
                self.failUnlessEqual(cdc["new_announcement"], 0)
                self.failUnlessEqual(cdc["wrong_service"], 0)
                self.failUnlessEqual(cdc["duplicate_announcement"], NUM_STORAGE)
        d.addCallback(_check2)

        # Then force an introducer restart, by shutting down the Tub,
        # destroying the old introducer, and starting a new Tub+Introducer.
        # Everybody should reconnect and republish, and the (new) introducer
        # will distribute the new announcements, but the clients should
        # ignore the republishes as duplicates.

        d.addCallback(lambda _ign: log.msg("shutting down introducer"))
        d.addCallback(lambda _ign: self.central_tub.disownServiceParent())
        d.addCallback(_wait_for_introducer_loss)
        d.addCallback(lambda _ign: log.msg("introducer lost"))

        def _restart_introducer(_ign):
            log.msg("restarting introducer")
            self.create_tub(self.central_portnum)
            # reset counters
            for i in range(NUM_CLIENTS):
                c = subscribing_clients[i]
                for k in c._debug_counts:
                    c._debug_counts[k] = 0
            expected_announcements[i] += 1 # new 'storage' for everyone
            introducer = IntroducerService()
            self.the_introducer = introducer
            newfurl = self.central_tub.registerReference(self.the_introducer,
                                                         furlFile=iff)
            assert newfurl == self.introducer_furl
        d.addCallback(_restart_introducer)

        d.addCallback(_wait_for_connected)
        d.addCallback(_wait_for_expected_announcements)
        d.addCallback(_wait_until_idle)

        def _check3(res):
            log.msg("doing _check3")
            dc = self.the_introducer._debug_counts
            self.failUnlessEqual(dc["outbound_announcements"],
                                 NUM_STORAGE*NUM_CLIENTS)
            self.failUnless(dc["outbound_message"] > 0)
            self.failUnlessEqual(dc["inbound_subscribe"], NUM_CLIENTS)
            for c in subscribing_clients:
                cdc = c._debug_counts
                self.failUnless(cdc["inbound_message"] > 0)
                self.failUnlessEqual(cdc["inbound_announcement"], NUM_STORAGE)
                self.failUnlessEqual(cdc["new_announcement"], 0)
                self.failUnlessEqual(cdc["wrong_service"], 0)
                self.failUnlessEqual(cdc["duplicate_announcement"], NUM_STORAGE)

        d.addCallback(_check3)
        return d
Esempio n. 23
0
def _tub_portlocation(config):
    """
    :returns: None or tuple of (port, location) for the main tub based
        on the given configuration. May raise ValueError or PrivacyError
        if there are problems with the config
    """
    cfg_tubport = config.get_config("node", "tub.port", None)
    cfg_location = config.get_config("node", "tub.location", None)
    reveal_ip = config.get_config("node", "reveal-IP-address", True, boolean=True)
    tubport_disabled = False

    if cfg_tubport is not None:
        cfg_tubport = cfg_tubport.strip()
        if cfg_tubport == "":
            raise ValueError("tub.port must not be empty")
        if cfg_tubport == "disabled":
            tubport_disabled = True

    location_disabled = False
    if cfg_location is not None:
        cfg_location = cfg_location.strip()
        if cfg_location == "":
            raise ValueError("tub.location must not be empty")
        if cfg_location == "disabled":
            location_disabled = True

    if tubport_disabled and location_disabled:
        return None
    if tubport_disabled and not location_disabled:
        raise ValueError("tub.port is disabled, but not tub.location")
    if location_disabled and not tubport_disabled:
        raise ValueError("tub.location is disabled, but not tub.port")

    if cfg_tubport is None:
        # For 'tub.port', tahoe.cfg overrides the individual file on
        # disk. So only read config.portnum_fname if tahoe.cfg doesn't
        # provide a value.
        if os.path.exists(config.portnum_fname):
            file_tubport = fileutil.read(config.portnum_fname).strip()
            tubport = _convert_tub_port(file_tubport)
        else:
            tubport = "tcp:%d" % iputil.allocate_tcp_port()
            fileutil.write_atomically(config.portnum_fname, tubport + "\n",
                                      mode="")
    else:
        tubport = _convert_tub_port(cfg_tubport)

    for port in tubport.split(","):
        if port in ("0", "tcp:0"):
            raise ValueError("tub.port cannot be 0: you must choose")

    if cfg_location is None:
        cfg_location = "AUTO"

    local_portnum = None # needed to hush lgtm.com static analyzer
    # Replace the location "AUTO", if present, with the detected local
    # addresses. Don't probe for local addresses unless necessary.
    split_location = cfg_location.split(",")
    if "AUTO" in split_location:
        if not reveal_ip:
            raise PrivacyError("tub.location uses AUTO")
        local_addresses = iputil.get_local_addresses_sync()
        # tubport must be like "tcp:12345" or "tcp:12345:morestuff"
        local_portnum = int(tubport.split(":")[1])
    new_locations = []
    for loc in split_location:
        if loc == "AUTO":
            new_locations.extend(["tcp:%s:%d" % (ip, local_portnum)
                                  for ip in local_addresses])
        else:
            if not reveal_ip:
                # Legacy hints are "host:port". We use Foolscap's utility
                # function to convert all hints into the modern format
                # ("tcp:host:port") because that's what the receiving
                # client will probably do. We test the converted hint for
                # TCP-ness, but publish the original hint because that
                # was the user's intent.
                from foolscap.connections.tcp import convert_legacy_hint
                converted_hint = convert_legacy_hint(loc)
                hint_type = converted_hint.split(":")[0]
                if hint_type == "tcp":
                    raise PrivacyError("tub.location includes tcp: hint")
            new_locations.append(loc)
    location = ",".join(new_locations)

    return tubport, location
Esempio n. 24
0
 def setUp(self):
     testutil.SignalMixin.setUp(self)
     self.parent = LoggingMultiService()
     self.parent.startService()
     self._available_port = yield iputil.allocate_tcp_port()
Esempio n. 25
0
    def get_tub_portlocation(self, cfg_tubport, cfg_location):
        # return None, or tuple of (port, location)

        tubport_disabled = False
        if cfg_tubport is not None:
            cfg_tubport = cfg_tubport.strip()
            if cfg_tubport == "":
                raise ValueError("tub.port must not be empty")
            if cfg_tubport == "disabled":
                tubport_disabled = True

        location_disabled = False
        if cfg_location is not None:
            cfg_location = cfg_location.strip()
            if cfg_location == "":
                raise ValueError("tub.location must not be empty")
            if cfg_location == "disabled":
                location_disabled = True

        if tubport_disabled and location_disabled:
            return None
        if tubport_disabled and not location_disabled:
            raise ValueError("tub.port is disabled, but not tub.location")
        if location_disabled and not tubport_disabled:
            raise ValueError("tub.location is disabled, but not tub.port")

        if cfg_tubport is None:
            # For 'tub.port', tahoe.cfg overrides the individual file on
            # disk. So only read self._portnumfile if tahoe.cfg doesn't
            # provide a value.
            if os.path.exists(self.config.portnum_fname):
                file_tubport = fileutil.read(self.config.portnum_fname).strip()
                tubport = self._convert_tub_port(file_tubport)
            else:
                tubport = "tcp:%d" % iputil.allocate_tcp_port()
                fileutil.write_atomically(self.config.portnum_fname,
                                          tubport + "\n",
                                          mode="")
        else:
            tubport = self._convert_tub_port(cfg_tubport)

        if cfg_location is None:
            cfg_location = "AUTO"

        local_portnum = None  # needed to hush lgtm.com static analyzer
        # Replace the location "AUTO", if present, with the detected local
        # addresses. Don't probe for local addresses unless necessary.
        split_location = cfg_location.split(",")
        if "AUTO" in split_location:
            if not self._reveal_ip:
                raise PrivacyError("tub.location uses AUTO")
            local_addresses = iputil.get_local_addresses_sync()
            # tubport must be like "tcp:12345" or "tcp:12345:morestuff"
            local_portnum = int(tubport.split(":")[1])
        new_locations = []
        for loc in split_location:
            if loc == "AUTO":
                new_locations.extend([
                    "tcp:%s:%d" % (ip, local_portnum) for ip in local_addresses
                ])
            else:
                if not self._reveal_ip:
                    # Legacy hints are "host:port". We use Foolscap's utility
                    # function to convert all hints into the modern format
                    # ("tcp:host:port") because that's what the receiving
                    # client will probably do. We test the converted hint for
                    # TCP-ness, but publish the original hint because that
                    # was the user's intent.
                    from foolscap.connections.tcp import convert_legacy_hint
                    converted_hint = convert_legacy_hint(loc)
                    hint_type = converted_hint.split(":")[0]
                    if hint_type == "tcp":
                        raise PrivacyError("tub.location includes tcp: hint")
                new_locations.append(loc)
        location = ",".join(new_locations)

        return tubport, location
Esempio n. 26
0
 def setUp(self):
     testutil.SignalMixin.setUp(self)
     self.parent = LoggingMultiService()
     self.parent.startService()
     self._available_port = yield iputil.allocate_tcp_port()
Esempio n. 27
0
    def do_system_test(self):
        self.create_tub()
        introducer = IntroducerService()
        introducer.setServiceParent(self.parent)
        iff = os.path.join(self.basedir, "introducer.furl")
        tub = self.central_tub
        ifurl = self.central_tub.registerReference(introducer, furlFile=iff)
        self.introducer_furl = ifurl

        # we have 5 clients who publish themselves as storage servers, and a
        # sixth which does which not. All 6 clients subscriber to hear about
        # storage. When the connections are fully established, all six nodes
        # should have 5 connections each.
        NUM_STORAGE = 5
        NUM_CLIENTS = 6

        clients = []
        tubs = {}
        received_announcements = {}
        subscribing_clients = []
        publishing_clients = []
        printable_serverids = {}
        self.the_introducer = introducer
        privkeys = {}
        pubkeys = {}
        expected_announcements = [0 for c in range(NUM_CLIENTS)]

        for i in range(NUM_CLIENTS):
            tub = Tub()
            #tub.setOption("logLocalFailures", True)
            #tub.setOption("logRemoteFailures", True)
            tub.setOption("expose-remote-exception-types", False)
            tub.setServiceParent(self.parent)
            portnum = iputil.allocate_tcp_port()
            tub.listenOn("tcp:%d" % portnum)
            tub.setLocation("localhost:%d" % portnum)

            log.msg("creating client %d: %s" % (i, tub.getShortTubID()))
            c = IntroducerClient(tub, self.introducer_furl, NICKNAME % str(i),
                                 "version", "oldest",
                                 {"component": "component-v1"}, fakeseq,
                                 FilePath(self.mktemp()))
            received_announcements[c] = {}

            def got(key_s_or_tubid, ann, announcements):
                index = key_s_or_tubid or get_tubid_string_from_ann(ann)
                announcements[index] = ann

            c.subscribe_to("storage", got, received_announcements[c])
            subscribing_clients.append(c)
            expected_announcements[
                i] += 1  # all expect a 'storage' announcement

            node_furl = tub.registerReference(Referenceable())
            privkey_s, pubkey_s = keyutil.make_keypair()
            privkey, _ignored = keyutil.parse_privkey(privkey_s)
            privkeys[i] = privkey
            pubkeys[i] = pubkey_s

            if i < NUM_STORAGE:
                # sign all announcements
                c.publish("storage", make_ann(node_furl), privkey)
                assert pubkey_s.startswith("pub-")
                printable_serverids[i] = pubkey_s[len("pub-"):]
                publishing_clients.append(c)
            else:
                # the last one does not publish anything
                pass

            if i == 2:
                # also publish something that nobody cares about
                boring_furl = tub.registerReference(Referenceable())
                c.publish("boring", make_ann(boring_furl), privkey)

            c.setServiceParent(self.parent)
            clients.append(c)
            tubs[c] = tub

        def _wait_for_connected(ign):
            def _connected():
                for c in clients:
                    if not c.connected_to_introducer():
                        return False
                return True

            return self.poll(_connected)

        # we watch the clients to determine when the system has settled down.
        # Then we can look inside the server to assert things about its
        # state.

        def _wait_for_expected_announcements(ign):
            def _got_expected_announcements():
                for i, c in enumerate(subscribing_clients):
                    if len(received_announcements[c]
                           ) < expected_announcements[i]:
                        return False
                return True

            return self.poll(_got_expected_announcements)

        # before shutting down any Tub, we'd like to know that there are no
        # messages outstanding

        def _wait_until_idle(ign):
            def _idle():
                for c in subscribing_clients + publishing_clients:
                    if c._debug_outstanding:
                        return False
                if self.the_introducer._debug_outstanding:
                    return False
                return True

            return self.poll(_idle)

        d = defer.succeed(None)
        d.addCallback(_wait_for_connected)
        d.addCallback(_wait_for_expected_announcements)
        d.addCallback(_wait_until_idle)

        def _check1(res):
            log.msg("doing _check1")
            dc = self.the_introducer._debug_counts
            # each storage server publishes a record. There is also one
            # "boring"
            self.failUnlessEqual(dc["inbound_message"], NUM_STORAGE + 1)
            self.failUnlessEqual(dc["inbound_duplicate"], 0)
            self.failUnlessEqual(dc["inbound_update"], 0)
            self.failUnlessEqual(dc["inbound_subscribe"], NUM_CLIENTS)
            # the number of outbound messages is tricky.. I think it depends
            # upon a race between the publish and the subscribe messages.
            self.failUnless(dc["outbound_message"] > 0)
            # each client subscribes to "storage", and each server publishes
            self.failUnlessEqual(dc["outbound_announcements"],
                                 NUM_STORAGE * NUM_CLIENTS)

            for c in subscribing_clients:
                cdc = c._debug_counts
                self.failUnless(cdc["inbound_message"])
                self.failUnlessEqual(cdc["inbound_announcement"], NUM_STORAGE)
                self.failUnlessEqual(cdc["wrong_service"], 0)
                self.failUnlessEqual(cdc["duplicate_announcement"], 0)
                self.failUnlessEqual(cdc["update"], 0)
                self.failUnlessEqual(cdc["new_announcement"], NUM_STORAGE)
                anns = received_announcements[c]
                self.failUnlessEqual(len(anns), NUM_STORAGE)

                serverid0 = printable_serverids[0]
                ann = anns[serverid0]
                nick = ann["nickname"]
                self.failUnlessEqual(type(nick), unicode)
                self.failUnlessEqual(nick, NICKNAME % "0")
            for c in publishing_clients:
                cdc = c._debug_counts
                expected = 1
                if c in [
                        clients[2],  # boring
                ]:
                    expected = 2
                self.failUnlessEqual(cdc["outbound_message"], expected)
            # now check the web status, make sure it renders without error
            ir = introweb.IntroducerRoot(self.parent)
            self.parent.nodeid = "NODEID"
            text = ir.renderSynchronously().decode("utf-8")
            self.failUnlessIn(NICKNAME % "0", text)  # a v2 client
            self.failUnlessIn(NICKNAME % "1", text)  # another v2 client
            for i in range(NUM_STORAGE):
                self.failUnlessIn(printable_serverids[i], text,
                                  (i, printable_serverids[i], text))
                # make sure there isn't a double-base32ed string too
                self.failIfIn(idlib.nodeid_b2a(printable_serverids[i]), text,
                              (i, printable_serverids[i], text))
            log.msg("_check1 done")

        d.addCallback(_check1)

        # force an introducer reconnect, by shutting down the Tub it's using
        # and starting a new Tub (with the old introducer). Everybody should
        # reconnect and republish, but the introducer should ignore the
        # republishes as duplicates. However, because the server doesn't know
        # what each client does and does not know, it will send them a copy
        # of the current announcement table anyway.

        d.addCallback(lambda _ign: log.msg("shutting down introducer's Tub"))
        d.addCallback(lambda _ign: self.central_tub.disownServiceParent())

        def _wait_for_introducer_loss(ign):
            def _introducer_lost():
                for c in clients:
                    if c.connected_to_introducer():
                        return False
                return True

            return self.poll(_introducer_lost)

        d.addCallback(_wait_for_introducer_loss)

        def _restart_introducer_tub(_ign):
            log.msg("restarting introducer's Tub")
            # reset counters
            for i in range(NUM_CLIENTS):
                c = subscribing_clients[i]
                for k in c._debug_counts:
                    c._debug_counts[k] = 0
            for k in self.the_introducer._debug_counts:
                self.the_introducer._debug_counts[k] = 0
            expected_announcements[i] += 1  # new 'storage' for everyone
            self.create_tub(self.central_portnum)
            newfurl = self.central_tub.registerReference(self.the_introducer,
                                                         furlFile=iff)
            assert newfurl == self.introducer_furl

        d.addCallback(_restart_introducer_tub)

        d.addCallback(_wait_for_connected)
        d.addCallback(_wait_for_expected_announcements)
        d.addCallback(_wait_until_idle)
        d.addCallback(lambda _ign: log.msg(" reconnected"))

        # TODO: publish something while the introducer is offline, then
        # confirm it gets delivered when the connection is reestablished
        def _check2(res):
            log.msg("doing _check2")
            # assert that the introducer sent out new messages, one per
            # subscriber
            dc = self.the_introducer._debug_counts
            self.failUnlessEqual(dc["outbound_announcements"],
                                 NUM_STORAGE * NUM_CLIENTS)
            self.failUnless(dc["outbound_message"] > 0)
            self.failUnlessEqual(dc["inbound_subscribe"], NUM_CLIENTS)
            for c in subscribing_clients:
                cdc = c._debug_counts
                self.failUnlessEqual(cdc["inbound_message"], 1)
                self.failUnlessEqual(cdc["inbound_announcement"], NUM_STORAGE)
                self.failUnlessEqual(cdc["new_announcement"], 0)
                self.failUnlessEqual(cdc["wrong_service"], 0)
                self.failUnlessEqual(cdc["duplicate_announcement"],
                                     NUM_STORAGE)

        d.addCallback(_check2)

        # Then force an introducer restart, by shutting down the Tub,
        # destroying the old introducer, and starting a new Tub+Introducer.
        # Everybody should reconnect and republish, and the (new) introducer
        # will distribute the new announcements, but the clients should
        # ignore the republishes as duplicates.

        d.addCallback(lambda _ign: log.msg("shutting down introducer"))
        d.addCallback(lambda _ign: self.central_tub.disownServiceParent())
        d.addCallback(_wait_for_introducer_loss)
        d.addCallback(lambda _ign: log.msg("introducer lost"))

        def _restart_introducer(_ign):
            log.msg("restarting introducer")
            self.create_tub(self.central_portnum)
            # reset counters
            for i in range(NUM_CLIENTS):
                c = subscribing_clients[i]
                for k in c._debug_counts:
                    c._debug_counts[k] = 0
            expected_announcements[i] += 1  # new 'storage' for everyone
            introducer = IntroducerService()
            self.the_introducer = introducer
            newfurl = self.central_tub.registerReference(self.the_introducer,
                                                         furlFile=iff)
            assert newfurl == self.introducer_furl

        d.addCallback(_restart_introducer)

        d.addCallback(_wait_for_connected)
        d.addCallback(_wait_for_expected_announcements)
        d.addCallback(_wait_until_idle)

        def _check3(res):
            log.msg("doing _check3")
            dc = self.the_introducer._debug_counts
            self.failUnlessEqual(dc["outbound_announcements"],
                                 NUM_STORAGE * NUM_CLIENTS)
            self.failUnless(dc["outbound_message"] > 0)
            self.failUnlessEqual(dc["inbound_subscribe"], NUM_CLIENTS)
            for c in subscribing_clients:
                cdc = c._debug_counts
                self.failUnless(cdc["inbound_message"] > 0)
                self.failUnlessEqual(cdc["inbound_announcement"], NUM_STORAGE)
                self.failUnlessEqual(cdc["new_announcement"], 0)
                self.failUnlessEqual(cdc["wrong_service"], 0)
                self.failUnlessEqual(cdc["duplicate_announcement"],
                                     NUM_STORAGE)

        d.addCallback(_check3)
        return d
Esempio n. 28
0
def _tub_portlocation(config):
    """
    :returns: None or tuple of (port, location) for the main tub based
        on the given configuration. May raise ValueError or PrivacyError
        if there are problems with the config
    """
    cfg_tubport = config.get_config("node", "tub.port", None)
    cfg_location = config.get_config("node", "tub.location", None)
    reveal_ip = config.get_config("node",
                                  "reveal-IP-address",
                                  True,
                                  boolean=True)
    tubport_disabled = False

    if cfg_tubport is not None:
        cfg_tubport = cfg_tubport.strip()
        if cfg_tubport == "":
            raise ValueError("tub.port must not be empty")
        if cfg_tubport == "disabled":
            tubport_disabled = True

    location_disabled = False
    if cfg_location is not None:
        cfg_location = cfg_location.strip()
        if cfg_location == "":
            raise ValueError("tub.location must not be empty")
        if cfg_location == "disabled":
            location_disabled = True

    if tubport_disabled and location_disabled:
        return None
    if tubport_disabled and not location_disabled:
        raise ValueError("tub.port is disabled, but not tub.location")
    if location_disabled and not tubport_disabled:
        raise ValueError("tub.location is disabled, but not tub.port")

    if cfg_tubport is None:
        # For 'tub.port', tahoe.cfg overrides the individual file on
        # disk. So only read config.portnum_fname if tahoe.cfg doesn't
        # provide a value.
        if os.path.exists(config.portnum_fname):
            file_tubport = fileutil.read(config.portnum_fname).strip()
            tubport = _convert_tub_port(file_tubport)
        else:
            tubport = "tcp:%d" % iputil.allocate_tcp_port()
            fileutil.write_atomically(config.portnum_fname,
                                      tubport + "\n",
                                      mode="")
    else:
        tubport = _convert_tub_port(cfg_tubport)

    for port in tubport.split(","):
        if port in ("0", "tcp:0"):
            raise ValueError("tub.port cannot be 0: you must choose")

    if cfg_location is None:
        cfg_location = "AUTO"

    local_portnum = None  # needed to hush lgtm.com static analyzer
    # Replace the location "AUTO", if present, with the detected local
    # addresses. Don't probe for local addresses unless necessary.
    split_location = cfg_location.split(",")
    if "AUTO" in split_location:
        if not reveal_ip:
            raise PrivacyError("tub.location uses AUTO")
        local_addresses = iputil.get_local_addresses_sync()
        # tubport must be like "tcp:12345" or "tcp:12345:morestuff"
        local_portnum = int(tubport.split(":")[1])
    new_locations = []
    for loc in split_location:
        if loc == "AUTO":
            new_locations.extend(
                ["tcp:%s:%d" % (ip, local_portnum) for ip in local_addresses])
        else:
            if not reveal_ip:
                # Legacy hints are "host:port". We use Foolscap's utility
                # function to convert all hints into the modern format
                # ("tcp:host:port") because that's what the receiving
                # client will probably do. We test the converted hint for
                # TCP-ness, but publish the original hint because that
                # was the user's intent.
                from foolscap.connections.tcp import convert_legacy_hint
                converted_hint = convert_legacy_hint(loc)
                hint_type = converted_hint.split(":")[0]
                if hint_type == "tcp":
                    raise PrivacyError("tub.location includes tcp: hint")
            new_locations.append(loc)
    location = ",".join(new_locations)

    # Lacking this, Python 2 blows up in Foolscap when it is confused by a
    # Unicode FURL.
    location = location.encode("utf-8")

    return tubport, location
Esempio n. 29
0
    def get_tub_portlocation(self, cfg_tubport, cfg_location):
        # return None, or tuple of (port, location)

        tubport_disabled = False
        if cfg_tubport is not None:
            cfg_tubport = cfg_tubport.strip()
            if cfg_tubport == "":
                raise ValueError("tub.port must not be empty")
            if cfg_tubport == "disabled":
                tubport_disabled = True

        location_disabled = False
        if cfg_location is not None:
            cfg_location = cfg_location.strip()
            if cfg_location == "":
                raise ValueError("tub.location must not be empty")
            if cfg_location == "disabled":
                location_disabled = True

        if tubport_disabled and location_disabled:
            return None
        if tubport_disabled and not location_disabled:
            raise ValueError("tub.port is disabled, but not tub.location")
        if location_disabled and not tubport_disabled:
            raise ValueError("tub.location is disabled, but not tub.port")

        if cfg_tubport is None:
            # For 'tub.port', tahoe.cfg overrides the individual file on
            # disk. So only read self._portnumfile if tahoe.cfg doesn't
            # provide a value.
            if os.path.exists(self._portnumfile):
                file_tubport = fileutil.read(self._portnumfile).strip()
                tubport = self._convert_tub_port(file_tubport)
            else:
                tubport = "tcp:%d" % iputil.allocate_tcp_port()
                fileutil.write_atomically(self._portnumfile, tubport + "\n",
                                          mode="")
        else:
            tubport = self._convert_tub_port(cfg_tubport)

        if cfg_location is None:
            cfg_location = "AUTO"

        # Replace the location "AUTO", if present, with the detected local
        # addresses. Don't probe for local addresses unless necessary.
        split_location = cfg_location.split(",")
        if "AUTO" in split_location:
            if not self._reveal_ip:
                raise PrivacyError("tub.location uses AUTO")
            local_addresses = iputil.get_local_addresses_sync()
            # tubport must be like "tcp:12345" or "tcp:12345:morestuff"
            local_portnum = int(tubport.split(":")[1])
        new_locations = []
        for loc in split_location:
            if loc == "AUTO":
                new_locations.extend(["tcp:%s:%d" % (ip, local_portnum)
                                      for ip in local_addresses])
            else:
                if not self._reveal_ip:
                    # Legacy hints are "host:port". We use Foolscap's utility
                    # function to convert all hints into the modern format
                    # ("tcp:host:port") because that's what the receiving
                    # client will probably do. We test the converted hint for
                    # TCP-ness, but publish the original hint because that
                    # was the user's intent.
                    from foolscap.connections.tcp import convert_legacy_hint
                    converted_hint = convert_legacy_hint(loc)
                    hint_type = converted_hint.split(":")[0]
                    if hint_type == "tcp":
                        raise PrivacyError("tub.location includes tcp: hint")
                new_locations.append(loc)
        location = ",".join(new_locations)

        return tubport, location
Esempio n. 30
0
def write_node_config(c, config):
    # this is shared between clients and introducers
    c.write("# -*- mode: conf; coding: utf-8 -*-\n")
    c.write("\n")
    c.write("# This file controls the configuration of the Tahoe node that\n")
    c.write("# lives in this directory. It is only read at node startup.\n")
    c.write("# For details about the keys that can be set here, please\n")
    c.write("# read the 'docs/configuration.rst' file that came with your\n")
    c.write("# Tahoe installation.\n")
    c.write("\n\n")

    if config["hide-ip"]:
        c.write("[connections]\n")
        if tor_provider._import_txtorcon():
            c.write("tcp = tor\n")
        else:
            c.write("tcp = disabled\n")
        c.write("\n")

    c.write("[node]\n")
    nickname = argv_to_unicode(config.get("nickname") or "")
    c.write("nickname = %s\n" % (nickname.encode('utf-8'), ))
    if config["hide-ip"]:
        c.write("reveal-IP-address = false\n")
    else:
        c.write("reveal-IP-address = true\n")

    # TODO: validate webport
    webport = argv_to_unicode(config.get("webport") or "none")
    if webport.lower() == "none":
        webport = ""
    c.write("web.port = %s\n" % (webport.encode('utf-8'), ))
    c.write("web.static = public_html\n")

    listeners = config['listen'].split(",")

    tor_config = {}
    i2p_config = {}
    tub_ports = []
    tub_locations = []
    if listeners == ["none"]:
        c.write("tub.port = disabled\n")
        c.write("tub.location = disabled\n")
    else:
        if "tor" in listeners:
            (tor_config, tor_port, tor_location) = \
                         yield tor_provider.create_onion(reactor, config)
            tub_ports.append(tor_port)
            tub_locations.append(tor_location)
        if "i2p" in listeners:
            (i2p_config, i2p_port, i2p_location) = \
                         yield i2p_provider.create_dest(reactor, config)
            tub_ports.append(i2p_port)
            tub_locations.append(i2p_location)
        if "tcp" in listeners:
            if config["port"]:  # --port/--location are a pair
                tub_ports.append(config["port"].encode('utf-8'))
                tub_locations.append(config["location"].encode('utf-8'))
            else:
                assert "hostname" in config
                hostname = config["hostname"]
                new_port = iputil.allocate_tcp_port()
                tub_ports.append("tcp:%s" % new_port)
                tub_locations.append("tcp:%s:%s" %
                                     (hostname.encode('utf-8'), new_port))
        c.write("tub.port = %s\n" % ",".join(tub_ports))
        c.write("tub.location = %s\n" % ",".join(tub_locations))
    c.write("\n")

    c.write("#log_gatherer.furl =\n")
    c.write("#timeout.keepalive =\n")
    c.write("#timeout.disconnect =\n")
    c.write("#ssh.port = 8022\n")
    c.write("#ssh.authorized_keys_file = ~/.ssh/authorized_keys\n")
    c.write("\n")

    if tor_config:
        c.write("[tor]\n")
        for key, value in tor_config.items():
            c.write("%s = %s\n" % (key, value))
        c.write("\n")

    if i2p_config:
        c.write("[i2p]\n")
        for key, value in i2p_config.items():
            c.write("%s = %s\n" % (key, value))
        c.write("\n")
Esempio n. 31
0
def write_node_config(c, config):
    # this is shared between clients and introducers
    c.write("# -*- mode: conf; coding: utf-8 -*-\n")
    c.write("\n")
    c.write("# This file controls the configuration of the Tahoe node that\n")
    c.write("# lives in this directory. It is only read at node startup.\n")
    c.write("# For details about the keys that can be set here, please\n")
    c.write("# read the 'docs/configuration.rst' file that came with your\n")
    c.write("# Tahoe installation.\n")
    c.write("\n\n")

    if config["hide-ip"]:
        c.write("[connections]\n")
        c.write("tcp = tor\n")
        c.write("\n")

    c.write("[node]\n")
    nickname = argv_to_unicode(config.get("nickname") or "")
    c.write("nickname = %s\n" % (nickname.encode('utf-8'),))
    if config["hide-ip"]:
        c.write("reveal-IP-address = false\n")
    else:
        c.write("reveal-IP-address = true\n")

    # TODO: validate webport
    webport = argv_to_unicode(config.get("webport") or "none")
    if webport.lower() == "none":
        webport = ""
    c.write("web.port = %s\n" % (webport.encode('utf-8'),))
    c.write("web.static = public_html\n")

    listeners = config['listen'].split(",")

    tor_config = {}
    i2p_config = {}
    tub_ports = []
    tub_locations = []
    if listeners == ["none"]:
        c.write("tub.port = disabled\n")
        c.write("tub.location = disabled\n")
    else:
        if "tor" in listeners:
            (tor_config, tor_port, tor_location) = \
                         yield tor_provider.create_onion(reactor, config)
            tub_ports.append(tor_port)
            tub_locations.append(tor_location)
        if "i2p" in listeners:
            (i2p_config, i2p_port, i2p_location) = \
                         yield i2p_provider.create_dest(reactor, config)
            tub_ports.append(i2p_port)
            tub_locations.append(i2p_location)
        if "tcp" in listeners:
            if config["port"]: # --port/--location are a pair
                tub_ports.append(config["port"].encode('utf-8'))
                tub_locations.append(config["location"].encode('utf-8'))
            else:
                assert "hostname" in config
                hostname = config["hostname"]
                new_port = iputil.allocate_tcp_port()
                tub_ports.append("tcp:%s" % new_port)
                tub_locations.append("tcp:%s:%s" % (hostname.encode('utf-8'),
                                                    new_port))
        c.write("tub.port = %s\n" % ",".join(tub_ports))
        c.write("tub.location = %s\n" % ",".join(tub_locations))
    c.write("\n")

    c.write("#log_gatherer.furl =\n")
    c.write("#timeout.keepalive =\n")
    c.write("#timeout.disconnect =\n")
    c.write("#ssh.port = 8022\n")
    c.write("#ssh.authorized_keys_file = ~/.ssh/authorized_keys\n")
    c.write("\n")

    if tor_config:
        c.write("[tor]\n")
        for key, value in tor_config.items():
            c.write("%s = %s\n" % (key, value))
        c.write("\n")

    if i2p_config:
        c.write("[i2p]\n")
        for key, value in i2p_config.items():
            c.write("%s = %s\n" % (key, value))
        c.write("\n")