def test_multiple_ports(self, ports): """ When there are multiple listen addresses suggested by the ``tub.port`` and ``tub.location`` configuration, the node's *main* port listens on all of them. """ basedir = self.mktemp() config_fname = os.path.join(basedir, "tahoe.cfg") os.mkdir(basedir) os.mkdir(os.path.join(basedir, "private")) port1, port2 = iter(ports) port = ("tcp:%d:interface=127.0.0.1,tcp:%d:interface=127.0.0.1" % (port1, port2)) location = "tcp:localhost:%d,tcp:localhost:%d" % (port1, port2) with open(config_fname, "w") as f: f.write(BASE_CONFIG) f.write("[node]\n") f.write("tub.port = %s\n" % port) f.write("tub.location = %s\n" % location) config = client.read_config(basedir, "client.port") i2p_provider = mock.Mock() tor_provider = mock.Mock() dfh, fch = create_connection_handlers(None, config, i2p_provider, tor_provider) tub_options = create_tub_options(config) t = FakeTub() with mock.patch("allmydata.node.Tub", return_value=t): create_main_tub(config, tub_options, dfh, fch, i2p_provider, tor_provider) self.assertEqual(t.listening_ports, [ "tcp:%d:interface=127.0.0.1" % port1, "tcp:%d:interface=127.0.0.1" % port2 ])
def test_listen_on_zero(self): """ Trying to listen on port 0 should be an error """ basedir = self.mktemp() create_node_dir(basedir, "testing") with open(os.path.join(basedir, "tahoe.cfg"), "w") as f: f.write(BASE_CONFIG) f.write("[node]\n") f.write("tub.port = tcp:0\n") f.write("tub.location = AUTO\n") config = client.read_config(basedir, "client.port") i2p_provider = mock.Mock() tor_provider = mock.Mock() dfh, fch = create_connection_handlers(None, config, i2p_provider, tor_provider) tub_options = create_tub_options(config) t = FakeTub() with mock.patch("allmydata.node.Tub", return_value=t): with self.assertRaises(ValueError) as ctx: create_main_tub(config, tub_options, dfh, fch, i2p_provider, tor_provider) self.assertIn( "you must choose", str(ctx.exception), )
def _when_connected_enough(self): # start processing the upload queue when we've connected to # enough servers tahoe_config = read_config(self.config.tahoe_node_directory.path, "portnum") threshold = int(tahoe_config.get_config("client", "shares.needed")) @inline_callbacks def enough(): try: welcome_body = yield self.tahoe_client.get_welcome() except Exception: returnValue((False, "Failed to get welcome page")) servers = welcome_body[u"servers"] connected_servers = [ server for server in servers if server["connection_status"].startswith("Connected ") ] message = "Found {} of {} connected servers (want {})".format( len(connected_servers), len(servers), threshold, ) if len(connected_servers) < threshold: returnValue((False, message)) returnValue((True, message)) return poll("connected enough", enough, self.reactor)
def invite(options): if options.parent['node-directory']: basedir = argv_to_abspath(options.parent['node-directory']) else: basedir = get_default_nodedir() config = read_config(basedir, u"") out = options.stdout err = options.stderr try: introducer_furl = get_introducer_furl(basedir, config) except Exception as e: print("Can't find introducer FURL for node '{}': {}".format(basedir, str(e)), file=err) raise SystemExit(1) nick = options['nick'] remote_config = { "shares-needed": options["shares-needed"] or config.get_config('client', 'shares.needed'), "shares-total": options["shares-total"] or config.get_config('client', 'shares.total'), "shares-happy": options["shares-happy"] or config.get_config('client', 'shares.happy'), "nickname": nick, "introducer": introducer_furl, } yield _send_config_via_wormhole(options, remote_config) print("Completed successfully", file=out)
def test_listen_on_zero(self): """ Trying to listen on port 0 should be an error """ basedir = self.mktemp() create_node_dir(basedir, "testing") with open(os.path.join(basedir, "tahoe.cfg"), "w") as f: f.write(BASE_CONFIG) f.write("tub.port = tcp:0\n") f.write("tub.location = AUTO\n") config = client.read_config(basedir, "client.port") i2p_provider = mock.Mock() tor_provider = mock.Mock() dfh, fch = create_connection_handlers(None, config, i2p_provider, tor_provider) tub_options = create_tub_options(config) t = FakeTub() with mock.patch("allmydata.node.Tub", return_value=t): with self.assertRaises(ValueError) as ctx: create_main_tub(config, tub_options, dfh, fch, i2p_provider, tor_provider) self.assertIn( "you must choose", str(ctx.exception), )
def test_tor_i2p_listeners(self): basedir = self.mktemp() config_fname = os.path.join(basedir, "tahoe.cfg") os.mkdir(basedir) os.mkdir(os.path.join(basedir, "private")) with open(config_fname, "w") as f: f.write(BASE_CONFIG) f.write("tub.port = listen:i2p,listen:tor\n") f.write("tub.location = tcp:example.org:1234\n") config = client.read_config(basedir, "client.port") tub_options = create_tub_options(config) t = FakeTub() i2p_provider = mock.Mock() tor_provider = mock.Mock() dfh, fch = create_connection_handlers(None, config, i2p_provider, tor_provider) with mock.patch("allmydata.node.Tub", return_value=t): create_main_tub(config, tub_options, dfh, fch, i2p_provider, tor_provider) self.assertEqual(i2p_provider.get_listener.mock_calls, [mock.call()]) self.assertEqual(tor_provider.get_listener.mock_calls, [mock.call()]) self.assertEqual( t.listening_ports, [ i2p_provider.get_listener(), tor_provider.get_listener(), ] )
def test_tor_i2p_listeners(self): basedir = self.mktemp() config_fname = os.path.join(basedir, "tahoe.cfg") os.mkdir(basedir) os.mkdir(os.path.join(basedir, "private")) with open(config_fname, "w") as f: f.write(BASE_CONFIG) f.write("[node]\n") f.write("tub.port = listen:i2p,listen:tor\n") f.write("tub.location = tcp:example.org:1234\n") config = client.read_config(basedir, "client.port") tub_options = create_tub_options(config) t = FakeTub() i2p_provider = mock.Mock() tor_provider = mock.Mock() dfh, fch = create_connection_handlers(None, config, i2p_provider, tor_provider) with mock.patch("allmydata.node.Tub", return_value=t): create_main_tub(config, tub_options, dfh, fch, i2p_provider, tor_provider) self.assertEqual(i2p_provider.get_listener.mock_calls, [mock.call()]) self.assertEqual(tor_provider.get_listener.mock_calls, [mock.call()]) self.assertEqual( t.listening_ports, [ i2p_provider.get_listener(), tor_provider.get_listener(), ] )
def test_multiple_ports(self): basedir = self.mktemp() create_node_dir(basedir, "testing") port1 = iputil.allocate_tcp_port() port2 = iputil.allocate_tcp_port() port = ("tcp:%d:interface=127.0.0.1,tcp:%d:interface=127.0.0.1" % (port1, port2)) location = "tcp:localhost:%d,tcp:localhost:%d" % (port1, port2) with open(os.path.join(basedir, "tahoe.cfg"), "w") as f: f.write(BASE_CONFIG) f.write("tub.port = %s\n" % port) f.write("tub.location = %s\n" % location) config = client.read_config(basedir, "client.port") i2p_provider = mock.Mock() tor_provider = mock.Mock() dfh, fch = create_connection_handlers(None, config, i2p_provider, tor_provider) tub_options = create_tub_options(config) t = FakeTub() with mock.patch("allmydata.node.Tub", return_value=t): create_main_tub(config, tub_options, dfh, fch, i2p_provider, tor_provider) self.assertEqual(t.listening_ports, ["tcp:%d:interface=127.0.0.1" % port1, "tcp:%d:interface=127.0.0.1" % port2])
def test_multiple_ports(self): basedir = self.mktemp() create_node_dir(basedir, "testing") port1 = iputil.allocate_tcp_port() port2 = iputil.allocate_tcp_port() port = ("tcp:%d:interface=127.0.0.1,tcp:%d:interface=127.0.0.1" % (port1, port2)) location = "tcp:localhost:%d,tcp:localhost:%d" % (port1, port2) with open(os.path.join(basedir, "tahoe.cfg"), "w") as f: f.write(BASE_CONFIG) f.write("[node]\n") f.write("tub.port = %s\n" % port) f.write("tub.location = %s\n" % location) config = client.read_config(basedir, "client.port") i2p_provider = mock.Mock() tor_provider = mock.Mock() dfh, fch = create_connection_handlers(None, config, i2p_provider, tor_provider) tub_options = create_tub_options(config) t = FakeTub() with mock.patch("allmydata.node.Tub", return_value=t): create_main_tub(config, tub_options, dfh, fch, i2p_provider, tor_provider) self.assertEqual(t.listening_ports, ["tcp:%d:interface=127.0.0.1" % port1, "tcp:%d:interface=127.0.0.1" % port2])
def magic_folder_join(invite_code, node_directory, local_dir, name, poll_interval, author_name): """ Join a magic-folder specified by the ``name`` and create the config files. :param unicode invite_code: The code used to join a magic folder. :param unicode node_directory: The path to the Tahoe-LAFS node directory which owns the magic folder in question. :param unicode local_dir: The directory in the local filesystem that holds the files to be synchronized across computers. :param unicode name: The magic-folder name. :param integer poll_interval: Periodic time interval after which the client polls for updates. :param unicode author_name: Our own name for Snapshot authorship :return integer: If the function succeeds, returns 0, else an exception is raised. """ fields = invite_code.split(INVITE_SEPARATOR) if len(fields) != 2: raise usage.UsageError("Invalid invite code.") magic_readonly_cap, dmd_write_cap = fields maybe_upgrade_magic_folders(node_directory) existing_folders = load_magic_folders(node_directory) if name in existing_folders: raise Exception( "This client already has a magic-folder named '{}'".format(name)) author = create_local_author(author_name) config = read_config(node_directory, "portnum") write_local_author(author, name, config) db_fname = os.path.join( node_directory, u"private", u"magicfolder_{}.sqlite".format(name), ) if os.path.exists(db_fname): raise Exception( "Database '{}' already exists; not overwriting".format(db_fname)) folder = { u"directory": local_dir.encode('utf-8'), u"collective_dircap": magic_readonly_cap, u"upload_dircap": dmd_write_cap, u"poll_interval": poll_interval, } existing_folders[name] = folder save_magic_folders(node_directory, existing_folders) return 0
def setUp(self): d = super(TestLocalAuthor, self).setUp() magic_dir = FilePath(mktemp()) self.node = self.useFixture(NodeDirectory(FilePath(mktemp()))) self.node.create_magic_folder( u"default", u"URI:CHK2:{}:{}:1:1:256".format(u"a" * 16, u"a" * 32), u"URI:CHK2:{}:{}:1:1:256".format(u"b" * 16, u"b" * 32), magic_dir, 60, ) self.config = read_config(self.node.path.path, "portnum") return d
def _test_streaming_logs(reactor, temp_dir, alice): cfg = read_config(join(temp_dir, "alice"), "portnum") node_url = cfg.get_config_from_file("node.url") api_auth_token = cfg.get_private_config("api_auth_token") ws_url = node_url.replace("http://", "ws://") log_url = ws_url + "private/logs/v1" print("Connecting to {}".format(log_url)) client = yield _connect_client(reactor, api_auth_token, log_url) print("Connected.") client.on_close = Deferred() client.on_message = Deferred() # Provoke _some_ log event. yield treq.get(node_url) result = yield _race(client.on_close, client.on_message) assert isinstance(result, Right) json.loads(result.value)
def main(reactor): from twisted.python import log log.startLogging(sys.stdout) tahoe_dir = "testgrid/alice" cfg = read_config(tahoe_dir, "portnum") token = cfg.get_private_config("api_auth_token").strip() webport = cfg.get_config("node", "web.port") if webport.startswith("tcp:"): port = webport.split(':')[1] else: port = webport factory = WebSocketClientFactory( url=u"ws://127.0.0.1:{}/private/logs/v1".format(port), headers={ "Authorization": "tahoe-lafs {}".format(token), } ) factory.on_open = Deferred() factory.on_close = Deferred() factory.protocol = TahoeLogProtocol endpoint = HostnameEndpoint(reactor, "127.0.0.1", int(port)) try: port = yield endpoint.connect(factory) except ConnectError as e: print("Connection failed: {}".format(e)) return print("port: {}".format(port)) yield factory.on_open print("opened") yield factory.on_close print("closed")
def create_no_network_client(basedir): """ :return: a Deferred yielding an instance of _Client subclass which does no actual networking but has the same API. """ basedir = abspath_expanduser_unicode(unicode(basedir)) fileutil.make_dirs(os.path.join(basedir, "private"), 0o700) from allmydata.client import read_config config = read_config(basedir, u'client.port') storage_broker = NoNetworkStorageBroker() client = _NoNetworkClient( config, main_tub=None, i2p_provider=None, tor_provider=None, introducer_clients=[], storage_farm_broker=storage_broker, ) # this is a (pre-existing) reference-cycle and also a bad idea, see: # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2949 storage_broker.client = client return defer.succeed(client)
def create_no_network_client(basedir): """ :return: a Deferred yielding an instance of _Client subclass which does no actual networking but has the same API. """ basedir = abspath_expanduser_unicode(unicode(basedir)) fileutil.make_dirs(os.path.join(basedir, "private"), 0o700) from allmydata.client import read_config config = read_config(basedir, u'client.port') storage_broker = NoNetworkStorageBroker() client = _NoNetworkClient( config, main_tub=None, control_tub=None, i2p_provider=None, tor_provider=None, introducer_clients=[], storage_farm_broker=storage_broker, ) # this is a (pre-existing) reference-cycle and also a bad idea, see: # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2949 storage_broker.client = client return defer.succeed(client)
def get_config(self): return client.read_config( self._node_dir, u"portnum", )
def from_node_directory(cls, reactor, nodedir, webport): config = read_config(nodedir, u"client.port") magic_folders = load_magic_folders(nodedir) return cls(reactor, config, webport, magic_folders)