コード例 #1
0
ファイル: client.py プロジェクト: jsgf/tahoe-lafs
 def init_stats_provider(self):
     gatherer_furl = self.get_config("client", "stats_gatherer.furl", None)
     self.stats_provider = StatsProvider(self, gatherer_furl)
     self.add_service(self.stats_provider)
     self.stats_provider.register_producer(self)
コード例 #2
0
ファイル: client.py プロジェクト: tahoe-lafs/tahoe-lafs
 def init_stats_provider(self):
     gatherer_furl = self.config.get_config("client", "stats_gatherer.furl", None)
     self.stats_provider = StatsProvider(self, gatherer_furl)
     self.stats_provider.setServiceParent(self)
     self.stats_provider.register_producer(self)
コード例 #3
0
ファイル: client.py プロジェクト: jsgf/tahoe-lafs
class Client(node.Node, pollmixin.PollMixin):
    implements(IStatsProducer)

    PORTNUMFILE = "client.port"
    STOREDIR = 'storage'
    NODETYPE = "client"
    SUICIDE_PREVENTION_HOTLINE_FILE = "suicide_prevention_hotline"

    # This means that if a storage server treats me as though I were a
    # 1.0.0 storage client, it will work as they expect.
    OLDEST_SUPPORTED_VERSION = "1.0.0"

    # this is a tuple of (needed, desired, total, max_segment_size). 'needed'
    # is the number of shares required to reconstruct a file. 'desired' means
    # that we will abort an upload unless we can allocate space for at least
    # this many. 'total' is the total number of shares created by encoding.
    # If everybody has room then this is is how many we will upload.
    DEFAULT_ENCODING_PARAMETERS = {"k": 3,
                                   "happy": 7,
                                   "n": 10,
                                   "max_segment_size": 128*KiB,
                                   }

    def __init__(self, basedir="."):
        node.Node.__init__(self, basedir)
        self.started_timestamp = time.time()
        self.logSource="Client"
        self.DEFAULT_ENCODING_PARAMETERS = self.DEFAULT_ENCODING_PARAMETERS.copy()
        self.init_introducer_client()
        self.init_stats_provider()
        self.init_secrets()
        self.init_storage()
        self.init_control()
        self.helper = None
        if self.get_config("helper", "enabled", False, boolean=True):
            self.init_helper()
        self._key_generator = KeyGenerator()
        key_gen_furl = self.get_config("client", "key_generator.furl", None)
        if key_gen_furl:
            self.init_key_gen(key_gen_furl)
        self.init_client()
        # ControlServer and Helper are attached after Tub startup
        self.init_ftp_server()
        self.init_sftp_server()
        self.init_drop_uploader()

        hotline_file = os.path.join(self.basedir,
                                    self.SUICIDE_PREVENTION_HOTLINE_FILE)
        if os.path.exists(hotline_file):
            age = time.time() - os.stat(hotline_file)[stat.ST_MTIME]
            self.log("hotline file noticed (%ds old), starting timer" % age)
            hotline = TimerService(1.0, self._check_hotline, hotline_file)
            hotline.setServiceParent(self)

        # this needs to happen last, so it can use getServiceNamed() to
        # acquire references to StorageServer and other web-statusable things
        webport = self.get_config("node", "web.port", None)
        if webport:
            self.init_web(webport) # strports string

    def init_introducer_client(self):
        self.introducer_furl = self.get_config("client", "introducer.furl")
        ic = IntroducerClient(self.tub, self.introducer_furl,
                              self.nickname,
                              str(allmydata.__full_version__),
                              str(self.OLDEST_SUPPORTED_VERSION))
        self.introducer_client = ic
        # hold off on starting the IntroducerClient until our tub has been
        # started, so we'll have a useful address on our RemoteReference, so
        # that the introducer's status page will show us.
        d = self.when_tub_ready()
        def _start_introducer_client(res):
            ic.setServiceParent(self)
        d.addCallback(_start_introducer_client)
        d.addErrback(log.err, facility="tahoe.init",
                     level=log.BAD, umid="URyI5w")

    def init_stats_provider(self):
        gatherer_furl = self.get_config("client", "stats_gatherer.furl", None)
        self.stats_provider = StatsProvider(self, gatherer_furl)
        self.add_service(self.stats_provider)
        self.stats_provider.register_producer(self)

    def get_stats(self):
        return { 'node.uptime': time.time() - self.started_timestamp }

    def init_secrets(self):
        lease_s = self.get_or_create_private_config("secret", _make_secret)
        lease_secret = base32.a2b(lease_s)
        convergence_s = self.get_or_create_private_config('convergence',
                                                          _make_secret)
        self.convergence = base32.a2b(convergence_s)
        self._secret_holder = SecretHolder(lease_secret, self.convergence)

    def init_storage(self):
        # should we run a storage server (and publish it for others to use)?
        if not self.get_config("storage", "enabled", True, boolean=True):
            return
        readonly = self.get_config("storage", "readonly", False, boolean=True)

        storedir = os.path.join(self.basedir, self.STOREDIR)

        data = self.get_config("storage", "reserved_space", None)
        reserved = None
        try:
            reserved = parse_abbreviated_size(data)
        except ValueError:
            log.msg("[storage]reserved_space= contains unparseable value %s"
                    % data)
        if reserved is None:
            reserved = 0
        discard = self.get_config("storage", "debug_discard", False,
                                  boolean=True)

        expire = self.get_config("storage", "expire.enabled", False, boolean=True)
        if expire:
            mode = self.get_config("storage", "expire.mode") # require a mode
        else:
            mode = self.get_config("storage", "expire.mode", "age")

        o_l_d = self.get_config("storage", "expire.override_lease_duration", None)
        if o_l_d is not None:
            o_l_d = parse_duration(o_l_d)

        cutoff_date = None
        if mode == "cutoff-date":
            cutoff_date = self.get_config("storage", "expire.cutoff_date")
            cutoff_date = parse_date(cutoff_date)

        sharetypes = []
        if self.get_config("storage", "expire.immutable", True, boolean=True):
            sharetypes.append("immutable")
        if self.get_config("storage", "expire.mutable", True, boolean=True):
            sharetypes.append("mutable")
        expiration_sharetypes = tuple(sharetypes)

        ss = StorageServer(storedir, self.nodeid,
                           reserved_space=reserved,
                           discard_storage=discard,
                           readonly_storage=readonly,
                           stats_provider=self.stats_provider,
                           expiration_enabled=expire,
                           expiration_mode=mode,
                           expiration_override_lease_duration=o_l_d,
                           expiration_cutoff_date=cutoff_date,
                           expiration_sharetypes=expiration_sharetypes)
        self.add_service(ss)

        d = self.when_tub_ready()
        # we can't do registerReference until the Tub is ready
        def _publish(res):
            furl_file = os.path.join(self.basedir, "private", "storage.furl").encode(get_filesystem_encoding())
            furl = self.tub.registerReference(ss, furlFile=furl_file)
            ri_name = RIStorageServer.__remote_name__
            self.introducer_client.publish(furl, "storage", ri_name)
        d.addCallback(_publish)
        d.addErrback(log.err, facility="tahoe.init",
                     level=log.BAD, umid="aLGBKw")

    def init_client(self):
        helper_furl = self.get_config("client", "helper.furl", None)
        DEP = self.DEFAULT_ENCODING_PARAMETERS
        DEP["k"] = int(self.get_config("client", "shares.needed", DEP["k"]))
        DEP["n"] = int(self.get_config("client", "shares.total", DEP["n"]))
        DEP["happy"] = int(self.get_config("client", "shares.happy", DEP["happy"]))

        self.init_client_storage_broker()
        self.history = History(self.stats_provider)
        self.terminator = Terminator()
        self.terminator.setServiceParent(self)
        self.add_service(Uploader(helper_furl, self.stats_provider,
                                  self.history))
        self.init_stub_client()
        self.init_blacklist()
        self.init_nodemaker()

    def init_client_storage_broker(self):
        # create a StorageFarmBroker object, for use by Uploader/Downloader
        # (and everybody else who wants to use storage servers)
        sb = storage_client.StorageFarmBroker(self.tub, permute_peers=True)
        self.storage_broker = sb

        # load static server specifications from tahoe.cfg, if any.
        # Not quite ready yet.
        #if self.config.has_section("client-server-selection"):
        #    server_params = {} # maps serverid to dict of parameters
        #    for (name, value) in self.config.items("client-server-selection"):
        #        pieces = name.split(".")
        #        if pieces[0] == "server":
        #            serverid = pieces[1]
        #            if serverid not in server_params:
        #                server_params[serverid] = {}
        #            server_params[serverid][pieces[2]] = value
        #    for serverid, params in server_params.items():
        #        server_type = params.pop("type")
        #        if server_type == "tahoe-foolscap":
        #            s = storage_client.NativeStorageClient(*params)
        #        else:
        #            msg = ("unrecognized server type '%s' in "
        #                   "tahoe.cfg [client-server-selection]server.%s.type"
        #                   % (server_type, serverid))
        #            raise storage_client.UnknownServerTypeError(msg)
        #        sb.add_server(s.serverid, s)

        # check to see if we're supposed to use the introducer too
        if self.get_config("client-server-selection", "use_introducer",
                           default=True, boolean=True):
            sb.use_introducer(self.introducer_client)

    def get_storage_broker(self):
        return self.storage_broker

    def init_stub_client(self):
        def _publish(res):
            # we publish an empty object so that the introducer can count how
            # many clients are connected and see what versions they're
            # running.
            sc = StubClient()
            furl = self.tub.registerReference(sc)
            ri_name = RIStubClient.__remote_name__
            self.introducer_client.publish(furl, "stub_client", ri_name)
        d = self.when_tub_ready()
        d.addCallback(_publish)
        d.addErrback(log.err, facility="tahoe.init",
                     level=log.BAD, umid="OEHq3g")

    def init_blacklist(self):
        fn = os.path.join(self.basedir, "access.blacklist")
        self.blacklist = Blacklist(fn)

    def init_nodemaker(self):
        self.nodemaker = NodeMaker(self.storage_broker,
                                   self._secret_holder,
                                   self.get_history(),
                                   self.getServiceNamed("uploader"),
                                   self.terminator,
                                   self.get_encoding_parameters(),
                                   self._key_generator,
                                   self.blacklist)
        default = self.get_config("client", "mutable.format", default="sdmf")
        if default == "mdmf":
            self.mutable_file_default = MDMF_VERSION
        else:
            self.mutable_file_default = SDMF_VERSION

    def get_history(self):
        return self.history

    def init_control(self):
        d = self.when_tub_ready()
        def _publish(res):
            c = ControlServer()
            c.setServiceParent(self)
            control_url = self.tub.registerReference(c)
            self.write_private_config("control.furl", control_url + "\n")
        d.addCallback(_publish)
        d.addErrback(log.err, facility="tahoe.init",
                     level=log.BAD, umid="d3tNXA")

    def init_helper(self):
        d = self.when_tub_ready()
        def _publish(self):
            self.helper = Helper(os.path.join(self.basedir, "helper"),
                                 self.storage_broker, self._secret_holder,
                                 self.stats_provider, self.history)
            # TODO: this is confusing. BASEDIR/private/helper.furl is created
            # by the helper. BASEDIR/helper.furl is consumed by the client
            # who wants to use the helper. I like having the filename be the
            # same, since that makes 'cp' work smoothly, but the difference
            # between config inputs and generated outputs is hard to see.
            helper_furlfile = os.path.join(self.basedir,
                                           "private", "helper.furl").encode(get_filesystem_encoding())
            self.tub.registerReference(self.helper, furlFile=helper_furlfile)
        d.addCallback(_publish)
        d.addErrback(log.err, facility="tahoe.init",
                     level=log.BAD, umid="K0mW5w")

    def init_key_gen(self, key_gen_furl):
        d = self.when_tub_ready()
        def _subscribe(self):
            self.tub.connectTo(key_gen_furl, self._got_key_generator)
        d.addCallback(_subscribe)
        d.addErrback(log.err, facility="tahoe.init",
                     level=log.BAD, umid="z9DMzw")

    def _got_key_generator(self, key_generator):
        self._key_generator.set_remote_generator(key_generator)
        key_generator.notifyOnDisconnect(self._lost_key_generator)

    def _lost_key_generator(self):
        self._key_generator.set_remote_generator(None)

    def set_default_mutable_keysize(self, keysize):
        self._key_generator.set_default_keysize(keysize)

    def init_web(self, webport):
        self.log("init_web(webport=%s)", args=(webport,))

        from allmydata.webish import WebishServer
        nodeurl_path = os.path.join(self.basedir, "node.url")
        staticdir = self.get_config("node", "web.static", "public_html")
        staticdir = os.path.expanduser(staticdir)
        ws = WebishServer(self, webport, nodeurl_path, staticdir)
        self.add_service(ws)

    def init_ftp_server(self):
        if self.get_config("ftpd", "enabled", False, boolean=True):
            accountfile = self.get_config("ftpd", "accounts.file", None)
            accounturl = self.get_config("ftpd", "accounts.url", None)
            ftp_portstr = self.get_config("ftpd", "port", "8021")

            from allmydata.frontends import ftpd
            s = ftpd.FTPServer(self, accountfile, accounturl, ftp_portstr)
            s.setServiceParent(self)

    def init_sftp_server(self):
        if self.get_config("sftpd", "enabled", False, boolean=True):
            accountfile = self.get_config("sftpd", "accounts.file", None)
            accounturl = self.get_config("sftpd", "accounts.url", None)
            sftp_portstr = self.get_config("sftpd", "port", "8022")
            pubkey_file = self.get_config("sftpd", "host_pubkey_file")
            privkey_file = self.get_config("sftpd", "host_privkey_file")

            from allmydata.frontends import sftpd
            s = sftpd.SFTPServer(self, accountfile, accounturl,
                                 sftp_portstr, pubkey_file, privkey_file)
            s.setServiceParent(self)

    def init_drop_uploader(self):
        if self.get_config("drop_upload", "enabled", False, boolean=True):
            upload_dircap = self.get_config("drop_upload", "upload.dircap", None)
            local_dir_utf8 = self.get_config("drop_upload", "local.directory", None)

            if upload_dircap and local_dir_utf8:
                try:
                    from allmydata.frontends import drop_upload
                    s = drop_upload.DropUploader(self, upload_dircap, local_dir_utf8)
                    s.setServiceParent(self)
                    s.startService()
                except Exception, e:
                    self.log("couldn't start drop-uploader: %r", args=(e,))
            else:
                self.log("couldn't start drop-uploader: upload.dircap or local.directory not specified")
コード例 #4
0
ファイル: client.py プロジェクト: pombredanne/tahoe-lafs
class Client(node.Node, pollmixin.PollMixin):
    implements(IStatsProducer)

    PORTNUMFILE = "client.port"
    STOREDIR = "storage"
    NODETYPE = "client"
    EXIT_TRIGGER_FILE = "exit_trigger"

    # This means that if a storage server treats me as though I were a
    # 1.0.0 storage client, it will work as they expect.
    OLDEST_SUPPORTED_VERSION = "1.0.0"

    # This is a dictionary of (needed, desired, total, max_segment_size). 'needed'
    # is the number of shares required to reconstruct a file. 'desired' means
    # that we will abort an upload unless we can allocate space for at least
    # this many. 'total' is the total number of shares created by encoding.
    # If everybody has room then this is is how many we will upload.
    DEFAULT_ENCODING_PARAMETERS = {"k": 3, "happy": 7, "n": 10, "max_segment_size": 128 * KiB}

    def __init__(self, basedir="."):
        node.Node.__init__(self, basedir)
        # All tub.registerReference must happen *after* we upcall, since
        # that's what does tub.setLocation()
        self.started_timestamp = time.time()
        self.logSource = "Client"
        self.encoding_params = self.DEFAULT_ENCODING_PARAMETERS.copy()
        self.init_introducer_client()
        self.init_stats_provider()
        self.init_secrets()
        self.init_node_key()
        self.init_storage()
        self.init_control()
        self._key_generator = KeyGenerator()
        key_gen_furl = self.get_config("client", "key_generator.furl", None)
        if key_gen_furl:
            log.msg("[client]key_generator.furl= is now ignored, see #2783")
        self.init_client()
        self.helper = None
        if self.get_config("helper", "enabled", False, boolean=True):
            self.init_helper()
        self.init_ftp_server()
        self.init_sftp_server()
        self.init_drop_uploader()

        # If the node sees an exit_trigger file, it will poll every second to see
        # whether the file still exists, and what its mtime is. If the file does not
        # exist or has not been modified for a given timeout, the node will exit.
        exit_trigger_file = os.path.join(self.basedir, self.EXIT_TRIGGER_FILE)
        if os.path.exists(exit_trigger_file):
            age = time.time() - os.stat(exit_trigger_file)[stat.ST_MTIME]
            self.log("%s file noticed (%ds old), starting timer" % (self.EXIT_TRIGGER_FILE, age))
            exit_trigger = TimerService(1.0, self._check_exit_trigger, exit_trigger_file)
            exit_trigger.setServiceParent(self)

        # this needs to happen last, so it can use getServiceNamed() to
        # acquire references to StorageServer and other web-statusable things
        webport = self.get_config("node", "web.port", None)
        if webport:
            self.init_web(webport)  # strports string

    def _sequencer(self):
        seqnum_s = self.get_config_from_file("announcement-seqnum")
        if not seqnum_s:
            seqnum_s = "0"
        seqnum = int(seqnum_s.strip())
        seqnum += 1  # increment
        self.write_config("announcement-seqnum", "%d\n" % seqnum)
        nonce = _make_secret().strip()
        return seqnum, nonce

    def init_introducer_client(self):
        self.introducer_furl = self.get_config("client", "introducer.furl")
        introducer_cache_filepath = FilePath(os.path.join(self.basedir, "private", "introducer_cache.yaml"))
        ic = IntroducerClient(
            self.tub,
            self.introducer_furl,
            self.nickname,
            str(allmydata.__full_version__),
            str(self.OLDEST_SUPPORTED_VERSION),
            self.get_app_versions(),
            self._sequencer,
            introducer_cache_filepath,
        )
        self.introducer_client = ic
        ic.setServiceParent(self)

    def init_stats_provider(self):
        gatherer_furl = self.get_config("client", "stats_gatherer.furl", None)
        self.stats_provider = StatsProvider(self, gatherer_furl)
        self.add_service(self.stats_provider)
        self.stats_provider.register_producer(self)

    def get_stats(self):
        return {"node.uptime": time.time() - self.started_timestamp}

    def init_secrets(self):
        lease_s = self.get_or_create_private_config("secret", _make_secret)
        lease_secret = base32.a2b(lease_s)
        convergence_s = self.get_or_create_private_config("convergence", _make_secret)
        self.convergence = base32.a2b(convergence_s)
        self._secret_holder = SecretHolder(lease_secret, self.convergence)

    def init_node_key(self):
        # we only create the key once. On all subsequent runs, we re-use the
        # existing key
        def _make_key():
            sk_vs, vk_vs = keyutil.make_keypair()
            return sk_vs + "\n"

        sk_vs = self.get_or_create_private_config("node.privkey", _make_key)
        sk, vk_vs = keyutil.parse_privkey(sk_vs.strip())
        self.write_config("node.pubkey", vk_vs + "\n")
        self._node_key = sk

    def get_long_nodeid(self):
        # this matches what IServer.get_longname() says about us elsewhere
        vk_bytes = self._node_key.get_verifying_key_bytes()
        return "v0-" + base32.b2a(vk_bytes)

    def get_long_tubid(self):
        return idlib.nodeid_b2a(self.nodeid)

    def _init_permutation_seed(self, ss):
        seed = self.get_config_from_file("permutation-seed")
        if not seed:
            have_shares = ss.have_shares()
            if have_shares:
                # if the server has shares but not a recorded
                # permutation-seed, then it has been around since pre-#466
                # days, and the clients who uploaded those shares used our
                # TubID as a permutation-seed. We should keep using that same
                # seed to keep the shares in the same place in the permuted
                # ring, so those clients don't have to perform excessive
                # searches.
                seed = base32.b2a(self.nodeid)
            else:
                # otherwise, we're free to use the more natural seed of our
                # pubkey-based serverid
                vk_bytes = self._node_key.get_verifying_key_bytes()
                seed = base32.b2a(vk_bytes)
            self.write_config("permutation-seed", seed + "\n")
        return seed.strip()

    def init_storage(self):
        # should we run a storage server (and publish it for others to use)?
        if not self.get_config("storage", "enabled", True, boolean=True):
            return
        readonly = self.get_config("storage", "readonly", False, boolean=True)

        storedir = os.path.join(self.basedir, self.STOREDIR)

        data = self.get_config("storage", "reserved_space", None)
        try:
            reserved = parse_abbreviated_size(data)
        except ValueError:
            log.msg("[storage]reserved_space= contains unparseable value %s" % data)
            raise
        if reserved is None:
            reserved = 0
        discard = self.get_config("storage", "debug_discard", False, boolean=True)

        expire = self.get_config("storage", "expire.enabled", False, boolean=True)
        if expire:
            mode = self.get_config("storage", "expire.mode")  # require a mode
        else:
            mode = self.get_config("storage", "expire.mode", "age")

        o_l_d = self.get_config("storage", "expire.override_lease_duration", None)
        if o_l_d is not None:
            o_l_d = parse_duration(o_l_d)

        cutoff_date = None
        if mode == "cutoff-date":
            cutoff_date = self.get_config("storage", "expire.cutoff_date")
            cutoff_date = parse_date(cutoff_date)

        sharetypes = []
        if self.get_config("storage", "expire.immutable", True, boolean=True):
            sharetypes.append("immutable")
        if self.get_config("storage", "expire.mutable", True, boolean=True):
            sharetypes.append("mutable")
        expiration_sharetypes = tuple(sharetypes)

        ss = StorageServer(
            storedir,
            self.nodeid,
            reserved_space=reserved,
            discard_storage=discard,
            readonly_storage=readonly,
            stats_provider=self.stats_provider,
            expiration_enabled=expire,
            expiration_mode=mode,
            expiration_override_lease_duration=o_l_d,
            expiration_cutoff_date=cutoff_date,
            expiration_sharetypes=expiration_sharetypes,
        )
        self.add_service(ss)

        furl_file = os.path.join(self.basedir, "private", "storage.furl").encode(get_filesystem_encoding())
        furl = self.tub.registerReference(ss, furlFile=furl_file)
        ann = {"anonymous-storage-FURL": furl, "permutation-seed-base32": self._init_permutation_seed(ss)}
        self.introducer_client.publish("storage", ann, self._node_key)

    def init_client(self):
        helper_furl = self.get_config("client", "helper.furl", None)
        if helper_furl in ("None", ""):
            helper_furl = None

        DEP = self.encoding_params
        DEP["k"] = int(self.get_config("client", "shares.needed", DEP["k"]))
        DEP["n"] = int(self.get_config("client", "shares.total", DEP["n"]))
        DEP["happy"] = int(self.get_config("client", "shares.happy", DEP["happy"]))

        # for the CLI to authenticate to local JSON endpoints
        self._create_auth_token()

        self.init_client_storage_broker()
        self.history = History(self.stats_provider)
        self.terminator = Terminator()
        self.terminator.setServiceParent(self)
        self.add_service(Uploader(helper_furl, self.stats_provider, self.history))
        self.init_blacklist()
        self.init_nodemaker()

    def get_auth_token(self):
        """
        This returns a local authentication token, which is just some
        random data in "api_auth_token" which must be echoed to API
        calls.

        Currently only the URI '/magic' for magic-folder status; other
        endpoints are invited to include this as well, as appropriate.
        """
        return self.get_private_config("api_auth_token")

    def _create_auth_token(self):
        """
        Creates new auth-token data written to 'private/api_auth_token'.

        This is intentionally re-created every time the node starts.
        """
        self.write_private_config("api_auth_token", urlsafe_b64encode(os.urandom(32)) + "\n")

    def init_client_storage_broker(self):
        # create a StorageFarmBroker object, for use by Uploader/Downloader
        # (and everybody else who wants to use storage servers)
        ps = self.get_config("client", "peers.preferred", "").split(",")
        preferred_peers = tuple([p.strip() for p in ps if p != ""])
        sb = storage_client.StorageFarmBroker(
            permute_peers=True, preferred_peers=preferred_peers, tub_options=self.tub_options
        )
        self.storage_broker = sb
        sb.setServiceParent(self)

        connection_threshold = min(self.encoding_params["k"], self.encoding_params["happy"] + 1)
        helper = storage_client.ConnectedEnough(sb, connection_threshold)
        self.upload_ready_d = helper.when_connected_enough()

        # load static server specifications from tahoe.cfg, if any.
        # Not quite ready yet.
        # if self.config.has_section("client-server-selection"):
        #    server_params = {} # maps serverid to dict of parameters
        #    for (name, value) in self.config.items("client-server-selection"):
        #        pieces = name.split(".")
        #        if pieces[0] == "server":
        #            serverid = pieces[1]
        #            if serverid not in server_params:
        #                server_params[serverid] = {}
        #            server_params[serverid][pieces[2]] = value
        #    for serverid, params in server_params.items():
        #        server_type = params.pop("type")
        #        if server_type == "tahoe-foolscap":
        #            s = storage_client.NativeStorageClient(*params)
        #        else:
        #            msg = ("unrecognized server type '%s' in "
        #                   "tahoe.cfg [client-server-selection]server.%s.type"
        #                   % (server_type, serverid))
        #            raise storage_client.UnknownServerTypeError(msg)
        #        sb.add_server(s.serverid, s)

        # check to see if we're supposed to use the introducer too
        if self.get_config("client-server-selection", "use_introducer", default=True, boolean=True):
            sb.use_introducer(self.introducer_client)

    def get_storage_broker(self):
        return self.storage_broker

    def init_blacklist(self):
        fn = os.path.join(self.basedir, "access.blacklist")
        self.blacklist = Blacklist(fn)

    def init_nodemaker(self):
        default = self.get_config("client", "mutable.format", default="SDMF")
        if default.upper() == "MDMF":
            self.mutable_file_default = MDMF_VERSION
        else:
            self.mutable_file_default = SDMF_VERSION
        self.nodemaker = NodeMaker(
            self.storage_broker,
            self._secret_holder,
            self.get_history(),
            self.getServiceNamed("uploader"),
            self.terminator,
            self.get_encoding_parameters(),
            self.mutable_file_default,
            self._key_generator,
            self.blacklist,
        )

    def get_history(self):
        return self.history

    def init_control(self):
        c = ControlServer()
        c.setServiceParent(self)
        control_url = self.tub.registerReference(c)
        self.write_private_config("control.furl", control_url + "\n")

    def init_helper(self):
        self.helper = Helper(
            os.path.join(self.basedir, "helper"),
            self.storage_broker,
            self._secret_holder,
            self.stats_provider,
            self.history,
        )
        # TODO: this is confusing. BASEDIR/private/helper.furl is created by
        # the helper. BASEDIR/helper.furl is consumed by the client who wants
        # to use the helper. I like having the filename be the same, since
        # that makes 'cp' work smoothly, but the difference between config
        # inputs and generated outputs is hard to see.
        helper_furlfile = os.path.join(self.basedir, "private", "helper.furl").encode(get_filesystem_encoding())
        self.tub.registerReference(self.helper, furlFile=helper_furlfile)

    def set_default_mutable_keysize(self, keysize):
        self._key_generator.set_default_keysize(keysize)

    def init_web(self, webport):
        self.log("init_web(webport=%s)", args=(webport,))

        from allmydata.webish import WebishServer

        nodeurl_path = os.path.join(self.basedir, "node.url")
        staticdir_config = self.get_config("node", "web.static", "public_html").decode("utf-8")
        staticdir = abspath_expanduser_unicode(staticdir_config, base=self.basedir)
        ws = WebishServer(self, webport, nodeurl_path, staticdir)
        self.add_service(ws)

    def init_ftp_server(self):
        if self.get_config("ftpd", "enabled", False, boolean=True):
            accountfile = from_utf8_or_none(self.get_config("ftpd", "accounts.file", None))
            if accountfile:
                accountfile = abspath_expanduser_unicode(accountfile, base=self.basedir)
            accounturl = self.get_config("ftpd", "accounts.url", None)
            ftp_portstr = self.get_config("ftpd", "port", "8021")

            from allmydata.frontends import ftpd

            s = ftpd.FTPServer(self, accountfile, accounturl, ftp_portstr)
            s.setServiceParent(self)

    def init_sftp_server(self):
        if self.get_config("sftpd", "enabled", False, boolean=True):
            accountfile = from_utf8_or_none(self.get_config("sftpd", "accounts.file", None))
            if accountfile:
                accountfile = abspath_expanduser_unicode(accountfile, base=self.basedir)
            accounturl = self.get_config("sftpd", "accounts.url", None)
            sftp_portstr = self.get_config("sftpd", "port", "8022")
            pubkey_file = from_utf8_or_none(self.get_config("sftpd", "host_pubkey_file"))
            privkey_file = from_utf8_or_none(self.get_config("sftpd", "host_privkey_file"))

            from allmydata.frontends import sftpd

            s = sftpd.SFTPServer(self, accountfile, accounturl, sftp_portstr, pubkey_file, privkey_file)
            s.setServiceParent(self)

    def init_drop_uploader(self):
        if self.get_config("drop_upload", "enabled", False, boolean=True):
            if self.get_config("drop_upload", "upload.dircap", None):
                raise OldConfigOptionError(
                    "The [drop_upload]upload.dircap option is no longer supported; please "
                    "put the cap in a 'private/drop_upload_dircap' file, and delete this option."
                )

            upload_dircap = self.get_or_create_private_config("drop_upload_dircap")
            local_dir_utf8 = self.get_config("drop_upload", "local.directory")

            try:
                from allmydata.frontends import drop_upload

                s = drop_upload.DropUploader(self, upload_dircap, local_dir_utf8)
                s.setServiceParent(self)
                s.startService()

                # start processing the upload queue when we've connected to enough servers
                self.upload_ready_d.addCallback(s.upload_ready)
            except Exception, e:
                self.log("couldn't start drop-uploader: %r", args=(e,))
コード例 #5
0
ファイル: client.py プロジェクト: tahoe-lafs/tahoe-lafs
class _Client(node.Node, pollmixin.PollMixin):

    STOREDIR = 'storage'
    NODETYPE = "client"
    EXIT_TRIGGER_FILE = "exit_trigger"

    # This means that if a storage server treats me as though I were a
    # 1.0.0 storage client, it will work as they expect.
    OLDEST_SUPPORTED_VERSION = "1.0.0"

    # This is a dictionary of (needed, desired, total, max_segment_size). 'needed'
    # is the number of shares required to reconstruct a file. 'desired' means
    # that we will abort an upload unless we can allocate space for at least
    # this many. 'total' is the total number of shares created by encoding.
    # If everybody has room then this is is how many we will upload.
    DEFAULT_ENCODING_PARAMETERS = {"k": 3,
                                   "happy": 7,
                                   "n": 10,
                                   "max_segment_size": DEFAULT_MAX_SEGMENT_SIZE,
                                   }

    def __init__(self, config, main_tub, control_tub, i2p_provider, tor_provider, introducer_clients,
                 storage_farm_broker):
        """
        Use :func:`allmydata.client.create_client` to instantiate one of these.
        """
        node.Node.__init__(self, config, main_tub, control_tub, i2p_provider, tor_provider)

        self._magic_folders = dict()
        self.started_timestamp = time.time()
        self.logSource = "Client"
        self.encoding_params = self.DEFAULT_ENCODING_PARAMETERS.copy()

        self.introducer_clients = introducer_clients
        self.storage_broker = storage_farm_broker

        self.init_stats_provider()
        self.init_secrets()
        self.init_node_key()
        self.init_storage()
        self.init_control()
        self._key_generator = KeyGenerator()
        key_gen_furl = config.get_config("client", "key_generator.furl", None)
        if key_gen_furl:
            log.msg("[client]key_generator.furl= is now ignored, see #2783")
        self.init_client()
        self.load_static_servers()
        self.helper = None
        if config.get_config("helper", "enabled", False, boolean=True):
            if not self._is_tub_listening():
                raise ValueError("config error: helper is enabled, but tub "
                                 "is not listening ('tub.port=' is empty)")
            self.init_helper()
        self.init_ftp_server()
        self.init_sftp_server()
        self.init_magic_folder()

        # If the node sees an exit_trigger file, it will poll every second to see
        # whether the file still exists, and what its mtime is. If the file does not
        # exist or has not been modified for a given timeout, the node will exit.
        exit_trigger_file = config.get_config_path(self.EXIT_TRIGGER_FILE)
        if os.path.exists(exit_trigger_file):
            age = time.time() - os.stat(exit_trigger_file)[stat.ST_MTIME]
            self.log("%s file noticed (%ds old), starting timer" % (self.EXIT_TRIGGER_FILE, age))
            exit_trigger = TimerService(1.0, self._check_exit_trigger, exit_trigger_file)
            exit_trigger.setServiceParent(self)

        # this needs to happen last, so it can use getServiceNamed() to
        # acquire references to StorageServer and other web-statusable things
        webport = config.get_config("node", "web.port", None)
        if webport:
            self.init_web(webport) # strports string

    def init_stats_provider(self):
        gatherer_furl = self.config.get_config("client", "stats_gatherer.furl", None)
        self.stats_provider = StatsProvider(self, gatherer_furl)
        self.stats_provider.setServiceParent(self)
        self.stats_provider.register_producer(self)

    def get_stats(self):
        return { 'node.uptime': time.time() - self.started_timestamp }

    def init_secrets(self):
        lease_s = self.config.get_or_create_private_config("secret", _make_secret)
        lease_secret = base32.a2b(lease_s)
        convergence_s = self.config.get_or_create_private_config('convergence',
                                                                 _make_secret)
        self.convergence = base32.a2b(convergence_s)
        self._secret_holder = SecretHolder(lease_secret, self.convergence)

    def init_node_key(self):
        # we only create the key once. On all subsequent runs, we re-use the
        # existing key
        def _make_key():
            sk_vs,vk_vs = keyutil.make_keypair()
            return sk_vs+"\n"
        sk_vs = self.config.get_or_create_private_config("node.privkey", _make_key)
        sk,vk_vs = keyutil.parse_privkey(sk_vs.strip())
        self.config.write_config_file("node.pubkey", vk_vs+"\n")
        self._node_key = sk

    def get_long_nodeid(self):
        # this matches what IServer.get_longname() says about us elsewhere
        vk_bytes = self._node_key.get_verifying_key_bytes()
        return "v0-"+base32.b2a(vk_bytes)

    def get_long_tubid(self):
        return idlib.nodeid_b2a(self.nodeid)

    def _init_permutation_seed(self, ss):
        seed = self.config.get_config_from_file("permutation-seed")
        if not seed:
            have_shares = ss.have_shares()
            if have_shares:
                # if the server has shares but not a recorded
                # permutation-seed, then it has been around since pre-#466
                # days, and the clients who uploaded those shares used our
                # TubID as a permutation-seed. We should keep using that same
                # seed to keep the shares in the same place in the permuted
                # ring, so those clients don't have to perform excessive
                # searches.
                seed = base32.b2a(self.nodeid)
            else:
                # otherwise, we're free to use the more natural seed of our
                # pubkey-based serverid
                vk_bytes = self._node_key.get_verifying_key_bytes()
                seed = base32.b2a(vk_bytes)
            self.config.write_config_file("permutation-seed", seed+"\n")
        return seed.strip()

    def init_storage(self):
        # should we run a storage server (and publish it for others to use)?
        if not self.config.get_config("storage", "enabled", True, boolean=True):
            return
        if not self._is_tub_listening():
            raise ValueError("config error: storage is enabled, but tub "
                             "is not listening ('tub.port=' is empty)")
        readonly = self.config.get_config("storage", "readonly", False, boolean=True)

        config_storedir = self.get_config(
            "storage", "storage_dir", self.STOREDIR,
        ).decode('utf-8')
        storedir = self.config.get_config_path(config_storedir)

        data = self.config.get_config("storage", "reserved_space", None)
        try:
            reserved = parse_abbreviated_size(data)
        except ValueError:
            log.msg("[storage]reserved_space= contains unparseable value %s"
                    % data)
            raise
        if reserved is None:
            reserved = 0
        discard = self.config.get_config("storage", "debug_discard", False,
                                         boolean=True)

        expire = self.config.get_config("storage", "expire.enabled", False, boolean=True)
        if expire:
            mode = self.config.get_config("storage", "expire.mode") # require a mode
        else:
            mode = self.config.get_config("storage", "expire.mode", "age")

        o_l_d = self.config.get_config("storage", "expire.override_lease_duration", None)
        if o_l_d is not None:
            o_l_d = parse_duration(o_l_d)

        cutoff_date = None
        if mode == "cutoff-date":
            cutoff_date = self.config.get_config("storage", "expire.cutoff_date")
            cutoff_date = parse_date(cutoff_date)

        sharetypes = []
        if self.config.get_config("storage", "expire.immutable", True, boolean=True):
            sharetypes.append("immutable")
        if self.config.get_config("storage", "expire.mutable", True, boolean=True):
            sharetypes.append("mutable")
        expiration_sharetypes = tuple(sharetypes)

        ss = StorageServer(storedir, self.nodeid,
                           reserved_space=reserved,
                           discard_storage=discard,
                           readonly_storage=readonly,
                           stats_provider=self.stats_provider,
                           expiration_enabled=expire,
                           expiration_mode=mode,
                           expiration_override_lease_duration=o_l_d,
                           expiration_cutoff_date=cutoff_date,
                           expiration_sharetypes=expiration_sharetypes)
        ss.setServiceParent(self)

        furl_file = self.config.get_private_path("storage.furl").encode(get_filesystem_encoding())
        furl = self.tub.registerReference(ss, furlFile=furl_file)
        ann = {"anonymous-storage-FURL": furl,
               "permutation-seed-base32": self._init_permutation_seed(ss),
               }
        for ic in self.introducer_clients:
            ic.publish("storage", ann, self._node_key)

    def init_client(self):
        helper_furl = self.config.get_config("client", "helper.furl", None)
        if helper_furl in ("None", ""):
            helper_furl = None

        DEP = self.encoding_params
        DEP["k"] = int(self.config.get_config("client", "shares.needed", DEP["k"]))
        DEP["n"] = int(self.config.get_config("client", "shares.total", DEP["n"]))
        DEP["happy"] = int(self.config.get_config("client", "shares.happy", DEP["happy"]))

        # for the CLI to authenticate to local JSON endpoints
        self._create_auth_token()

        self.history = History(self.stats_provider)
        self.terminator = Terminator()
        self.terminator.setServiceParent(self)
        uploader = Uploader(
            helper_furl,
            self.stats_provider,
            self.history,
        )
        uploader.setServiceParent(self)
        self.init_blacklist()
        self.init_nodemaker()

    def get_auth_token(self):
        """
        This returns a local authentication token, which is just some
        random data in "api_auth_token" which must be echoed to API
        calls.

        Currently only the URI '/magic' for magic-folder status; other
        endpoints are invited to include this as well, as appropriate.
        """
        return self.config.get_private_config('api_auth_token')

    def _create_auth_token(self):
        """
        Creates new auth-token data written to 'private/api_auth_token'.

        This is intentionally re-created every time the node starts.
        """
        self.config.write_private_config(
            'api_auth_token',
            urlsafe_b64encode(os.urandom(32)) + '\n',
        )

    def get_storage_broker(self):
        return self.storage_broker

    def load_static_servers(self):
        """
        Load the servers.yaml file if it exists, and provide the static
        server data to the StorageFarmBroker.
        """
        fn = self.config.get_private_path("servers.yaml")
        servers_filepath = FilePath(fn)
        try:
            with servers_filepath.open() as f:
                servers_yaml = yamlutil.safe_load(f)
            static_servers = servers_yaml.get("storage", {})
            log.msg("found %d static servers in private/servers.yaml" %
                    len(static_servers))
            self.storage_broker.set_static_servers(static_servers)
        except EnvironmentError:
            pass

    def init_blacklist(self):
        fn = self.config.get_config_path("access.blacklist")
        self.blacklist = Blacklist(fn)

    def init_nodemaker(self):
        default = self.config.get_config("client", "mutable.format", default="SDMF")
        if default.upper() == "MDMF":
            self.mutable_file_default = MDMF_VERSION
        else:
            self.mutable_file_default = SDMF_VERSION
        self.nodemaker = NodeMaker(self.storage_broker,
                                   self._secret_holder,
                                   self.get_history(),
                                   self.getServiceNamed("uploader"),
                                   self.terminator,
                                   self.get_encoding_parameters(),
                                   self.mutable_file_default,
                                   self._key_generator,
                                   self.blacklist)

    def get_history(self):
        return self.history

    def init_control(self):
        c = ControlServer()
        c.setServiceParent(self)
        control_url = self.control_tub.registerReference(c)
        self.config.write_private_config("control.furl", control_url + "\n")

    def init_helper(self):
        self.helper = Helper(self.config.get_config_path("helper"),
                             self.storage_broker, self._secret_holder,
                             self.stats_provider, self.history)
        # TODO: this is confusing. BASEDIR/private/helper.furl is created by
        # the helper. BASEDIR/helper.furl is consumed by the client who wants
        # to use the helper. I like having the filename be the same, since
        # that makes 'cp' work smoothly, but the difference between config
        # inputs and generated outputs is hard to see.
        helper_furlfile = self.config.get_private_path("helper.furl").encode(get_filesystem_encoding())
        self.tub.registerReference(self.helper, furlFile=helper_furlfile)

    def set_default_mutable_keysize(self, keysize):
        self._key_generator.set_default_keysize(keysize)

    def init_web(self, webport):
        self.log("init_web(webport=%s)", args=(webport,))

        from allmydata.webish import WebishServer
        nodeurl_path = self.config.get_config_path("node.url")
        staticdir_config = self.config.get_config("node", "web.static", "public_html").decode("utf-8")
        staticdir = self.config.get_config_path(staticdir_config)
        ws = WebishServer(self, webport, nodeurl_path, staticdir)
        ws.setServiceParent(self)

    def init_ftp_server(self):
        if self.config.get_config("ftpd", "enabled", False, boolean=True):
            accountfile = from_utf8_or_none(
                self.config.get_config("ftpd", "accounts.file", None))
            if accountfile:
                accountfile = self.config.get_config_path(accountfile)
            accounturl = self.config.get_config("ftpd", "accounts.url", None)
            ftp_portstr = self.config.get_config("ftpd", "port", "8021")

            from allmydata.frontends import ftpd
            s = ftpd.FTPServer(self, accountfile, accounturl, ftp_portstr)
            s.setServiceParent(self)

    def init_sftp_server(self):
        if self.config.get_config("sftpd", "enabled", False, boolean=True):
            accountfile = from_utf8_or_none(
                self.config.get_config("sftpd", "accounts.file", None))
            if accountfile:
                accountfile = self.config.get_config_path(accountfile)
            accounturl = self.config.get_config("sftpd", "accounts.url", None)
            sftp_portstr = self.config.get_config("sftpd", "port", "8022")
            pubkey_file = from_utf8_or_none(self.config.get_config("sftpd", "host_pubkey_file"))
            privkey_file = from_utf8_or_none(self.config.get_config("sftpd", "host_privkey_file"))

            from allmydata.frontends import sftpd
            s = sftpd.SFTPServer(self, accountfile, accounturl,
                                 sftp_portstr, pubkey_file, privkey_file)
            s.setServiceParent(self)

    def init_magic_folder(self):
        #print "init_magic_folder"
        if self.config.get_config("drop_upload", "enabled", False, boolean=True):
            raise node.OldConfigOptionError(
                "The [drop_upload] section must be renamed to [magic_folder].\n"
                "See docs/frontends/magic-folder.rst for more information."
            )

        if self.config.get_config("magic_folder", "enabled", False, boolean=True):
            from allmydata.frontends import magic_folder

            try:
                magic_folders = magic_folder.load_magic_folders(self.config._basedir)
            except Exception as e:
                log.msg("Error loading magic-folder config: {}".format(e))
                raise

            # start processing the upload queue when we've connected to
            # enough servers
            threshold = min(self.encoding_params["k"],
                            self.encoding_params["happy"] + 1)

            for (name, mf_config) in magic_folders.items():
                self.log("Starting magic_folder '{}'".format(name))
                s = magic_folder.MagicFolder.from_config(self, name, mf_config)
                self._magic_folders[name] = s
                s.setServiceParent(self)

                connected_d = self.storage_broker.when_connected_enough(threshold)
                def connected_enough(ign, mf):
                    mf.ready()  # returns a Deferred we ignore
                    return None
                connected_d.addCallback(connected_enough, s)

    def _check_exit_trigger(self, exit_trigger_file):
        if os.path.exists(exit_trigger_file):
            mtime = os.stat(exit_trigger_file)[stat.ST_MTIME]
            if mtime > time.time() - 120.0:
                return
            else:
                self.log("%s file too old, shutting down" % (self.EXIT_TRIGGER_FILE,))
        else:
            self.log("%s file missing, shutting down" % (self.EXIT_TRIGGER_FILE,))
        reactor.stop()

    def get_encoding_parameters(self):
        return self.encoding_params

    def introducer_connection_statuses(self):
        return [ic.connection_status() for ic in self.introducer_clients]

    def connected_to_introducer(self):
        return any([ic.connected_to_introducer() for ic in self.introducer_clients])

    def get_renewal_secret(self): # this will go away
        return self._secret_holder.get_renewal_secret()

    def get_cancel_secret(self):
        return self._secret_holder.get_cancel_secret()

    def debug_wait_for_client_connections(self, num_clients):
        """Return a Deferred that fires (with None) when we have connections
        to the given number of peers. Useful for tests that set up a
        temporary test network and need to know when it is safe to proceed
        with an upload or download."""
        def _check():
            return len(self.storage_broker.get_connected_servers()) >= num_clients
        d = self.poll(_check, 0.5)
        d.addCallback(lambda res: None)
        return d


    # these four methods are the primitives for creating filenodes and
    # dirnodes. The first takes a URI and produces a filenode or (new-style)
    # dirnode. The other three create brand-new filenodes/dirnodes.

    def create_node_from_uri(self, write_uri, read_uri=None, deep_immutable=False, name="<unknown name>"):
        # This returns synchronously.
        # Note that it does *not* validate the write_uri and read_uri; instead we
        # may get an opaque node if there were any problems.
        return self.nodemaker.create_from_cap(write_uri, read_uri, deep_immutable=deep_immutable, name=name)

    def create_dirnode(self, initial_children={}, version=None):
        d = self.nodemaker.create_new_mutable_directory(initial_children, version=version)
        return d

    def create_immutable_dirnode(self, children, convergence=None):
        return self.nodemaker.create_immutable_directory(children, convergence)

    def create_mutable_file(self, contents=None, keysize=None, version=None):
        return self.nodemaker.create_mutable_file(contents, keysize,
                                                  version=version)

    def upload(self, uploadable, reactor=None):
        uploader = self.getServiceNamed("uploader")
        return uploader.upload(uploadable, reactor=reactor)
コード例 #6
0
class _Client(node.Node, pollmixin.PollMixin):

    STOREDIR = 'storage'
    NODETYPE = "client"
    EXIT_TRIGGER_FILE = "exit_trigger"

    # This means that if a storage server treats me as though I were a
    # 1.0.0 storage client, it will work as they expect.
    OLDEST_SUPPORTED_VERSION = "1.0.0"

    # This is a dictionary of (needed, desired, total, max_segment_size). 'needed'
    # is the number of shares required to reconstruct a file. 'desired' means
    # that we will abort an upload unless we can allocate space for at least
    # this many. 'total' is the total number of shares created by encoding.
    # If everybody has room then this is is how many we will upload.
    DEFAULT_ENCODING_PARAMETERS = {
        "k": 3,
        "happy": 7,
        "n": 10,
        "max_segment_size": DEFAULT_MAX_SEGMENT_SIZE,
    }

    def __init__(self, config, main_tub, control_tub, i2p_provider,
                 tor_provider, introducer_clients, storage_farm_broker):
        """
        Use :func:`allmydata.client.create_client` to instantiate one of these.
        """
        node.Node.__init__(self, config, main_tub, control_tub, i2p_provider,
                           tor_provider)

        self.started_timestamp = time.time()
        self.logSource = "Client"
        self.encoding_params = self.DEFAULT_ENCODING_PARAMETERS.copy()

        self.introducer_clients = introducer_clients
        self.storage_broker = storage_farm_broker

        self.init_stats_provider()
        self.init_secrets()
        self.init_node_key()
        self.init_control()
        self._key_generator = KeyGenerator()
        key_gen_furl = config.get_config("client", "key_generator.furl", None)
        if key_gen_furl:
            log.msg("[client]key_generator.furl= is now ignored, see #2783")
        self.init_client()
        self.load_static_servers()
        self.helper = None
        if config.get_config("helper", "enabled", False, boolean=True):
            if not self._is_tub_listening():
                raise ValueError("config error: helper is enabled, but tub "
                                 "is not listening ('tub.port=' is empty)")
            self.init_helper()
        self.init_ftp_server()
        self.init_sftp_server()

        # If the node sees an exit_trigger file, it will poll every second to see
        # whether the file still exists, and what its mtime is. If the file does not
        # exist or has not been modified for a given timeout, the node will exit.
        exit_trigger_file = config.get_config_path(self.EXIT_TRIGGER_FILE)
        if os.path.exists(exit_trigger_file):
            age = time.time() - os.stat(exit_trigger_file)[stat.ST_MTIME]
            self.log("%s file noticed (%ds old), starting timer" %
                     (self.EXIT_TRIGGER_FILE, age))
            exit_trigger = TimerService(1.0, self._check_exit_trigger,
                                        exit_trigger_file)
            exit_trigger.setServiceParent(self)

        # this needs to happen last, so it can use getServiceNamed() to
        # acquire references to StorageServer and other web-statusable things
        webport = config.get_config("node", "web.port", None)
        if webport:
            self.init_web(webport)  # strports string

    def init_stats_provider(self):
        gatherer_furl = self.config.get_config("client", "stats_gatherer.furl",
                                               None)
        self.stats_provider = StatsProvider(self, gatherer_furl)
        self.stats_provider.setServiceParent(self)
        self.stats_provider.register_producer(self)

    def get_stats(self):
        return {'node.uptime': time.time() - self.started_timestamp}

    def init_secrets(self):
        lease_s = self.config.get_or_create_private_config(
            "secret", _make_secret)
        lease_secret = base32.a2b(lease_s)
        convergence_s = self.config.get_or_create_private_config(
            'convergence', _make_secret)
        self.convergence = base32.a2b(convergence_s)
        self._secret_holder = SecretHolder(lease_secret, self.convergence)

    def init_node_key(self):
        # we only create the key once. On all subsequent runs, we re-use the
        # existing key
        def _make_key():
            private_key, _ = ed25519.create_signing_keypair()
            return ed25519.string_from_signing_key(private_key) + "\n"

        private_key_str = self.config.get_or_create_private_config(
            "node.privkey", _make_key)
        private_key, public_key = ed25519.signing_keypair_from_string(
            private_key_str)
        public_key_str = ed25519.string_from_verifying_key(public_key)
        self.config.write_config_file("node.pubkey", public_key_str + "\n",
                                      "w")
        self._node_private_key = private_key
        self._node_public_key = public_key

    def get_long_nodeid(self):
        # this matches what IServer.get_longname() says about us elsewhere
        vk_string = ed25519.string_from_verifying_key(self._node_public_key)
        return remove_prefix(vk_string, "pub-")

    def get_long_tubid(self):
        return idlib.nodeid_b2a(self.nodeid)

    def get_web_service(self):
        """
        :return: a reference to our web server
        """
        return self.getServiceNamed("webish")

    def _init_permutation_seed(self, ss):
        seed = self.config.get_config_from_file("permutation-seed")
        if not seed:
            have_shares = ss.have_shares()
            if have_shares:
                # if the server has shares but not a recorded
                # permutation-seed, then it has been around since pre-#466
                # days, and the clients who uploaded those shares used our
                # TubID as a permutation-seed. We should keep using that same
                # seed to keep the shares in the same place in the permuted
                # ring, so those clients don't have to perform excessive
                # searches.
                seed = base32.b2a(self.nodeid)
            else:
                # otherwise, we're free to use the more natural seed of our
                # pubkey-based serverid
                vk_string = ed25519.string_from_verifying_key(
                    self._node_public_key)
                vk_bytes = remove_prefix(vk_string, ed25519.PUBLIC_KEY_PREFIX)
                seed = base32.b2a(vk_bytes)
            self.config.write_config_file("permutation-seed", seed + "\n")
        return seed.strip()

    def get_anonymous_storage_server(self):
        """
        Get the anonymous ``IStorageServer`` implementation for this node.

        Note this will return an object even if storage is disabled on this
        node (but the object will not be exposed, peers will not be able to
        access it, and storage will remain disabled).

        The one and only instance for this node is always returned.  It is
        created first if necessary.
        """
        try:
            ss = self.getServiceNamed(StorageServer.name)
        except KeyError:
            pass
        else:
            return ss

        readonly = self.config.get_config("storage",
                                          "readonly",
                                          False,
                                          boolean=True)

        config_storedir = self.get_config(
            "storage",
            "storage_dir",
            self.STOREDIR,
        ).decode('utf-8')
        storedir = self.config.get_config_path(config_storedir)

        data = self.config.get_config("storage", "reserved_space", None)
        try:
            reserved = parse_abbreviated_size(data)
        except ValueError:
            log.msg("[storage]reserved_space= contains unparseable value %s" %
                    data)
            raise
        if reserved is None:
            reserved = 0
        discard = self.config.get_config("storage",
                                         "debug_discard",
                                         False,
                                         boolean=True)

        expire = self.config.get_config("storage",
                                        "expire.enabled",
                                        False,
                                        boolean=True)
        if expire:
            mode = self.config.get_config("storage",
                                          "expire.mode")  # require a mode
        else:
            mode = self.config.get_config("storage", "expire.mode", "age")

        o_l_d = self.config.get_config("storage",
                                       "expire.override_lease_duration", None)
        if o_l_d is not None:
            o_l_d = parse_duration(o_l_d)

        cutoff_date = None
        if mode == "cutoff-date":
            cutoff_date = self.config.get_config("storage",
                                                 "expire.cutoff_date")
            cutoff_date = parse_date(cutoff_date)

        sharetypes = []
        if self.config.get_config("storage",
                                  "expire.immutable",
                                  True,
                                  boolean=True):
            sharetypes.append("immutable")
        if self.config.get_config("storage",
                                  "expire.mutable",
                                  True,
                                  boolean=True):
            sharetypes.append("mutable")
        expiration_sharetypes = tuple(sharetypes)

        ss = StorageServer(storedir,
                           self.nodeid,
                           reserved_space=reserved,
                           discard_storage=discard,
                           readonly_storage=readonly,
                           stats_provider=self.stats_provider,
                           expiration_enabled=expire,
                           expiration_mode=mode,
                           expiration_override_lease_duration=o_l_d,
                           expiration_cutoff_date=cutoff_date,
                           expiration_sharetypes=expiration_sharetypes)
        ss.setServiceParent(self)
        return ss

    def init_storage(self, announceable_storage_servers):
        # should we run a storage server (and publish it for others to use)?
        if not storage_enabled(self.config):
            return
        if not self._is_tub_listening():
            raise ValueError("config error: storage is enabled, but tub "
                             "is not listening ('tub.port=' is empty)")

        ss = self.get_anonymous_storage_server()
        announcement = {
            "permutation-seed-base32": self._init_permutation_seed(ss),
        }

        if anonymous_storage_enabled(self.config):
            furl_file = self.config.get_private_path("storage.furl").encode(
                get_filesystem_encoding())
            furl = self.tub.registerReference(ss, furlFile=furl_file)
            announcement["anonymous-storage-FURL"] = furl

        enabled_storage_servers = self._enable_storage_servers(
            announceable_storage_servers, )
        storage_options = list(storage_server.announcement
                               for storage_server in enabled_storage_servers)
        plugins_announcement = {}
        if storage_options:
            # Only add the new key if there are any plugins enabled.
            plugins_announcement[u"storage-options"] = storage_options

        announcement.update(plugins_announcement)

        for ic in self.introducer_clients:
            ic.publish("storage", announcement, self._node_private_key)

    def get_client_storage_plugin_web_resources(self):
        """
        Get all of the client-side ``IResource`` implementations provided by
        enabled storage plugins.

        :return dict[bytes, IResource provider]: The implementations.
        """
        return self.storage_broker.get_client_storage_plugin_web_resources(
            self.config, )

    def _enable_storage_servers(self, announceable_storage_servers):
        """
        Register and announce the given storage servers.
        """
        for announceable in announceable_storage_servers:
            yield self._enable_storage_server(announceable)

    def _enable_storage_server(self, announceable_storage_server):
        """
        Register a storage server.
        """
        config_key = b"storage-plugin.{}.furl".format(
            # Oops, why don't I have a better handle on this value?
            announceable_storage_server.announcement[u"name"], )
        furl = _register_reference(
            config_key,
            self.config,
            self.tub,
            announceable_storage_server.storage_server,
        )
        announceable_storage_server = _add_to_announcement(
            {u"storage-server-FURL": furl},
            announceable_storage_server,
        )
        return announceable_storage_server

    def init_client(self):
        helper_furl = self.config.get_config("client", "helper.furl", None)
        if helper_furl in ("None", ""):
            helper_furl = None

        DEP = self.encoding_params
        DEP["k"] = int(
            self.config.get_config("client", "shares.needed", DEP["k"]))
        DEP["n"] = int(
            self.config.get_config("client", "shares.total", DEP["n"]))
        DEP["happy"] = int(
            self.config.get_config("client", "shares.happy", DEP["happy"]))

        # for the CLI to authenticate to local JSON endpoints
        self._create_auth_token()

        self.history = History(self.stats_provider)
        self.terminator = Terminator()
        self.terminator.setServiceParent(self)
        uploader = Uploader(
            helper_furl,
            self.stats_provider,
            self.history,
        )
        uploader.setServiceParent(self)
        self.init_blacklist()
        self.init_nodemaker()

    def get_auth_token(self):
        """
        This returns a local authentication token, which is just some
        random data in "api_auth_token" which must be echoed to API
        calls.
        """
        return self.config.get_private_config('api_auth_token')

    def _create_auth_token(self):
        """
        Creates new auth-token data written to 'private/api_auth_token'.

        This is intentionally re-created every time the node starts.
        """
        self.config.write_private_config(
            'api_auth_token',
            urlsafe_b64encode(os.urandom(32)) + '\n',
        )

    def get_storage_broker(self):
        return self.storage_broker

    def load_static_servers(self):
        """
        Load the servers.yaml file if it exists, and provide the static
        server data to the StorageFarmBroker.
        """
        fn = self.config.get_private_path("servers.yaml")
        servers_filepath = FilePath(fn)
        try:
            with servers_filepath.open() as f:
                servers_yaml = yamlutil.safe_load(f)
            static_servers = servers_yaml.get("storage", {})
            log.msg("found %d static servers in private/servers.yaml" %
                    len(static_servers))
            self.storage_broker.set_static_servers(static_servers)
        except EnvironmentError:
            pass

    def init_blacklist(self):
        fn = self.config.get_config_path("access.blacklist")
        self.blacklist = Blacklist(fn)

    def init_nodemaker(self):
        default = self.config.get_config("client",
                                         "mutable.format",
                                         default="SDMF")
        if default.upper() == "MDMF":
            self.mutable_file_default = MDMF_VERSION
        else:
            self.mutable_file_default = SDMF_VERSION
        self.nodemaker = NodeMaker(self.storage_broker, self._secret_holder,
                                   self.get_history(),
                                   self.getServiceNamed("uploader"),
                                   self.terminator,
                                   self.get_encoding_parameters(),
                                   self.mutable_file_default,
                                   self._key_generator, self.blacklist)

    def get_history(self):
        return self.history

    def init_control(self):
        c = ControlServer()
        c.setServiceParent(self)
        control_url = self.control_tub.registerReference(c)
        self.config.write_private_config("control.furl", control_url + "\n")

    def init_helper(self):
        self.helper = Helper(self.config.get_config_path("helper"),
                             self.storage_broker, self._secret_holder,
                             self.stats_provider, self.history)
        # TODO: this is confusing. BASEDIR/private/helper.furl is created by
        # the helper. BASEDIR/helper.furl is consumed by the client who wants
        # to use the helper. I like having the filename be the same, since
        # that makes 'cp' work smoothly, but the difference between config
        # inputs and generated outputs is hard to see.
        helper_furlfile = self.config.get_private_path("helper.furl").encode(
            get_filesystem_encoding())
        self.tub.registerReference(self.helper, furlFile=helper_furlfile)

    def set_default_mutable_keysize(self, keysize):
        self._key_generator.set_default_keysize(keysize)

    def init_web(self, webport):
        self.log("init_web(webport=%s)", args=(webport, ))

        from allmydata.webish import WebishServer
        nodeurl_path = self.config.get_config_path("node.url")
        staticdir_config = self.config.get_config(
            "node", "web.static", "public_html").decode("utf-8")
        staticdir = self.config.get_config_path(staticdir_config)
        ws = WebishServer(self, webport, nodeurl_path, staticdir)
        ws.setServiceParent(self)

    def init_ftp_server(self):
        if self.config.get_config("ftpd", "enabled", False, boolean=True):
            accountfile = from_utf8_or_none(
                self.config.get_config("ftpd", "accounts.file", None))
            if accountfile:
                accountfile = self.config.get_config_path(accountfile)
            accounturl = self.config.get_config("ftpd", "accounts.url", None)
            ftp_portstr = self.config.get_config("ftpd", "port", "8021")

            from allmydata.frontends import ftpd
            s = ftpd.FTPServer(self, accountfile, accounturl, ftp_portstr)
            s.setServiceParent(self)

    def init_sftp_server(self):
        if self.config.get_config("sftpd", "enabled", False, boolean=True):
            accountfile = from_utf8_or_none(
                self.config.get_config("sftpd", "accounts.file", None))
            if accountfile:
                accountfile = self.config.get_config_path(accountfile)
            accounturl = self.config.get_config("sftpd", "accounts.url", None)
            sftp_portstr = self.config.get_config("sftpd", "port", "8022")
            pubkey_file = from_utf8_or_none(
                self.config.get_config("sftpd", "host_pubkey_file"))
            privkey_file = from_utf8_or_none(
                self.config.get_config("sftpd", "host_privkey_file"))

            from allmydata.frontends import sftpd
            s = sftpd.SFTPServer(self, accountfile, accounturl, sftp_portstr,
                                 pubkey_file, privkey_file)
            s.setServiceParent(self)

    def _check_exit_trigger(self, exit_trigger_file):
        if os.path.exists(exit_trigger_file):
            mtime = os.stat(exit_trigger_file)[stat.ST_MTIME]
            if mtime > time.time() - 120.0:
                return
            else:
                self.log("%s file too old, shutting down" %
                         (self.EXIT_TRIGGER_FILE, ))
        else:
            self.log("%s file missing, shutting down" %
                     (self.EXIT_TRIGGER_FILE, ))
        reactor.stop()

    def get_encoding_parameters(self):
        return self.encoding_params

    def introducer_connection_statuses(self):
        return [ic.connection_status() for ic in self.introducer_clients]

    def connected_to_introducer(self):
        return any(
            [ic.connected_to_introducer() for ic in self.introducer_clients])

    def get_renewal_secret(self):  # this will go away
        return self._secret_holder.get_renewal_secret()

    def get_cancel_secret(self):
        return self._secret_holder.get_cancel_secret()

    def debug_wait_for_client_connections(self, num_clients):
        """Return a Deferred that fires (with None) when we have connections
        to the given number of peers. Useful for tests that set up a
        temporary test network and need to know when it is safe to proceed
        with an upload or download."""
        def _check():
            return len(
                self.storage_broker.get_connected_servers()) >= num_clients

        d = self.poll(_check, 0.5)
        d.addCallback(lambda res: None)
        return d

    # these four methods are the primitives for creating filenodes and
    # dirnodes. The first takes a URI and produces a filenode or (new-style)
    # dirnode. The other three create brand-new filenodes/dirnodes.

    def create_node_from_uri(self,
                             write_uri,
                             read_uri=None,
                             deep_immutable=False,
                             name="<unknown name>"):
        # This returns synchronously.
        # Note that it does *not* validate the write_uri and read_uri; instead we
        # may get an opaque node if there were any problems.
        return self.nodemaker.create_from_cap(write_uri,
                                              read_uri,
                                              deep_immutable=deep_immutable,
                                              name=name)

    def create_dirnode(self, initial_children={}, version=None):
        d = self.nodemaker.create_new_mutable_directory(initial_children,
                                                        version=version)
        return d

    def create_immutable_dirnode(self, children, convergence=None):
        return self.nodemaker.create_immutable_directory(children, convergence)

    def create_mutable_file(self, contents=None, keysize=None, version=None):
        return self.nodemaker.create_mutable_file(contents,
                                                  keysize,
                                                  version=version)

    def upload(self, uploadable, reactor=None):
        uploader = self.getServiceNamed("uploader")
        return uploader.upload(uploadable, reactor=reactor)
コード例 #7
0
 def init_stats_provider(self):
     gatherer_furl = self.config.get_config("client", "stats_gatherer.furl",
                                            None)
     self.stats_provider = StatsProvider(self, gatherer_furl)
     self.stats_provider.setServiceParent(self)
     self.stats_provider.register_producer(self)
コード例 #8
0
ファイル: client.py プロジェクト: trel/tahoe-lafs
 def init_stats_provider(self):
     gatherer_furl = self.get_config("client", "stats_gatherer.furl", None)
     self.stats_provider = StatsProvider(self, gatherer_furl)
     self.add_service(self.stats_provider)
     self.stats_provider.register_producer(self)
コード例 #9
0
ファイル: client.py プロジェクト: trel/tahoe-lafs
class Client(node.Node, pollmixin.PollMixin):
    implements(IStatsProducer)

    PORTNUMFILE = "client.port"
    STOREDIR = 'storage'
    NODETYPE = "client"
    SUICIDE_PREVENTION_HOTLINE_FILE = "suicide_prevention_hotline"

    # This means that if a storage server treats me as though I were a
    # 1.0.0 storage client, it will work as they expect.
    OLDEST_SUPPORTED_VERSION = "1.0.0"

    # this is a tuple of (needed, desired, total, max_segment_size). 'needed'
    # is the number of shares required to reconstruct a file. 'desired' means
    # that we will abort an upload unless we can allocate space for at least
    # this many. 'total' is the total number of shares created by encoding.
    # If everybody has room then this is is how many we will upload.
    DEFAULT_ENCODING_PARAMETERS = {
        "k": 3,
        "happy": 7,
        "n": 10,
        "max_segment_size": 128 * KiB,
    }

    def __init__(self, basedir="."):
        node.Node.__init__(self, basedir)
        self.started_timestamp = time.time()
        self.logSource = "Client"
        self.DEFAULT_ENCODING_PARAMETERS = self.DEFAULT_ENCODING_PARAMETERS.copy(
        )
        self.init_introducer_client()
        self.init_stats_provider()
        self.init_secrets()
        self.init_storage()
        self.init_control()
        self.helper = None
        if self.get_config("helper", "enabled", False, boolean=True):
            self.init_helper()
        self._key_generator = KeyGenerator()
        key_gen_furl = self.get_config("client", "key_generator.furl", None)
        if key_gen_furl:
            self.init_key_gen(key_gen_furl)
        self.init_client()
        # ControlServer and Helper are attached after Tub startup
        self.init_ftp_server()
        self.init_sftp_server()
        self.init_drop_uploader()

        hotline_file = os.path.join(self.basedir,
                                    self.SUICIDE_PREVENTION_HOTLINE_FILE)
        if os.path.exists(hotline_file):
            age = time.time() - os.stat(hotline_file)[stat.ST_MTIME]
            self.log("hotline file noticed (%ds old), starting timer" % age)
            hotline = TimerService(1.0, self._check_hotline, hotline_file)
            hotline.setServiceParent(self)

        # this needs to happen last, so it can use getServiceNamed() to
        # acquire references to StorageServer and other web-statusable things
        webport = self.get_config("node", "web.port", None)
        if webport:
            self.init_web(webport)  # strports string

    def init_introducer_client(self):
        self.introducer_furl = self.get_config("client", "introducer.furl")
        ic = IntroducerClient(self.tub, self.introducer_furl, self.nickname,
                              str(allmydata.__full_version__),
                              str(self.OLDEST_SUPPORTED_VERSION))
        self.introducer_client = ic
        # hold off on starting the IntroducerClient until our tub has been
        # started, so we'll have a useful address on our RemoteReference, so
        # that the introducer's status page will show us.
        d = self.when_tub_ready()

        def _start_introducer_client(res):
            ic.setServiceParent(self)

        d.addCallback(_start_introducer_client)
        d.addErrback(log.err,
                     facility="tahoe.init",
                     level=log.BAD,
                     umid="URyI5w")

    def init_stats_provider(self):
        gatherer_furl = self.get_config("client", "stats_gatherer.furl", None)
        self.stats_provider = StatsProvider(self, gatherer_furl)
        self.add_service(self.stats_provider)
        self.stats_provider.register_producer(self)

    def get_stats(self):
        return {'node.uptime': time.time() - self.started_timestamp}

    def init_secrets(self):
        lease_s = self.get_or_create_private_config("secret", _make_secret)
        lease_secret = base32.a2b(lease_s)
        convergence_s = self.get_or_create_private_config(
            'convergence', _make_secret)
        self.convergence = base32.a2b(convergence_s)
        self._secret_holder = SecretHolder(lease_secret, self.convergence)

    def init_storage(self):
        # should we run a storage server (and publish it for others to use)?
        if not self.get_config("storage", "enabled", True, boolean=True):
            return
        readonly = self.get_config("storage", "readonly", False, boolean=True)

        storedir = os.path.join(self.basedir, self.STOREDIR)

        data = self.get_config("storage", "reserved_space", None)
        reserved = None
        try:
            reserved = parse_abbreviated_size(data)
        except ValueError:
            log.msg("[storage]reserved_space= contains unparseable value %s" %
                    data)
        if reserved is None:
            reserved = 0
        discard = self.get_config("storage",
                                  "debug_discard",
                                  False,
                                  boolean=True)

        expire = self.get_config("storage",
                                 "expire.enabled",
                                 False,
                                 boolean=True)
        if expire:
            mode = self.get_config("storage", "expire.mode")  # require a mode
        else:
            mode = self.get_config("storage", "expire.mode", "age")

        o_l_d = self.get_config("storage", "expire.override_lease_duration",
                                None)
        if o_l_d is not None:
            o_l_d = parse_duration(o_l_d)

        cutoff_date = None
        if mode == "cutoff-date":
            cutoff_date = self.get_config("storage", "expire.cutoff_date")
            cutoff_date = parse_date(cutoff_date)

        sharetypes = []
        if self.get_config("storage", "expire.immutable", True, boolean=True):
            sharetypes.append("immutable")
        if self.get_config("storage", "expire.mutable", True, boolean=True):
            sharetypes.append("mutable")
        expiration_sharetypes = tuple(sharetypes)

        ss = StorageServer(storedir,
                           self.nodeid,
                           reserved_space=reserved,
                           discard_storage=discard,
                           readonly_storage=readonly,
                           stats_provider=self.stats_provider,
                           expiration_enabled=expire,
                           expiration_mode=mode,
                           expiration_override_lease_duration=o_l_d,
                           expiration_cutoff_date=cutoff_date,
                           expiration_sharetypes=expiration_sharetypes)
        self.add_service(ss)

        d = self.when_tub_ready()

        # we can't do registerReference until the Tub is ready
        def _publish(res):
            furl_file = os.path.join(self.basedir, "private",
                                     "storage.furl").encode(
                                         get_filesystem_encoding())
            furl = self.tub.registerReference(ss, furlFile=furl_file)
            ri_name = RIStorageServer.__remote_name__
            self.introducer_client.publish(furl, "storage", ri_name)

        d.addCallback(_publish)
        d.addErrback(log.err,
                     facility="tahoe.init",
                     level=log.BAD,
                     umid="aLGBKw")

    def init_client(self):
        helper_furl = self.get_config("client", "helper.furl", None)
        DEP = self.DEFAULT_ENCODING_PARAMETERS
        DEP["k"] = int(self.get_config("client", "shares.needed", DEP["k"]))
        DEP["n"] = int(self.get_config("client", "shares.total", DEP["n"]))
        DEP["happy"] = int(
            self.get_config("client", "shares.happy", DEP["happy"]))

        self.init_client_storage_broker()
        self.history = History(self.stats_provider)
        self.terminator = Terminator()
        self.terminator.setServiceParent(self)
        self.add_service(
            Uploader(helper_furl, self.stats_provider, self.history))
        self.init_stub_client()
        self.init_blacklist()
        self.init_nodemaker()

    def init_client_storage_broker(self):
        # create a StorageFarmBroker object, for use by Uploader/Downloader
        # (and everybody else who wants to use storage servers)
        sb = storage_client.StorageFarmBroker(self.tub, permute_peers=True)
        self.storage_broker = sb

        # load static server specifications from tahoe.cfg, if any.
        # Not quite ready yet.
        #if self.config.has_section("client-server-selection"):
        #    server_params = {} # maps serverid to dict of parameters
        #    for (name, value) in self.config.items("client-server-selection"):
        #        pieces = name.split(".")
        #        if pieces[0] == "server":
        #            serverid = pieces[1]
        #            if serverid not in server_params:
        #                server_params[serverid] = {}
        #            server_params[serverid][pieces[2]] = value
        #    for serverid, params in server_params.items():
        #        server_type = params.pop("type")
        #        if server_type == "tahoe-foolscap":
        #            s = storage_client.NativeStorageClient(*params)
        #        else:
        #            msg = ("unrecognized server type '%s' in "
        #                   "tahoe.cfg [client-server-selection]server.%s.type"
        #                   % (server_type, serverid))
        #            raise storage_client.UnknownServerTypeError(msg)
        #        sb.add_server(s.serverid, s)

        # check to see if we're supposed to use the introducer too
        if self.get_config("client-server-selection",
                           "use_introducer",
                           default=True,
                           boolean=True):
            sb.use_introducer(self.introducer_client)

    def get_storage_broker(self):
        return self.storage_broker

    def init_stub_client(self):
        def _publish(res):
            # we publish an empty object so that the introducer can count how
            # many clients are connected and see what versions they're
            # running.
            sc = StubClient()
            furl = self.tub.registerReference(sc)
            ri_name = RIStubClient.__remote_name__
            self.introducer_client.publish(furl, "stub_client", ri_name)

        d = self.when_tub_ready()
        d.addCallback(_publish)
        d.addErrback(log.err,
                     facility="tahoe.init",
                     level=log.BAD,
                     umid="OEHq3g")

    def init_blacklist(self):
        fn = os.path.join(self.basedir, "access.blacklist")
        self.blacklist = Blacklist(fn)

    def init_nodemaker(self):
        default = self.get_config("client", "mutable.format", default="SDMF")
        if default.upper() == "MDMF":
            self.mutable_file_default = MDMF_VERSION
        else:
            self.mutable_file_default = SDMF_VERSION
        self.nodemaker = NodeMaker(self.storage_broker, self._secret_holder,
                                   self.get_history(),
                                   self.getServiceNamed("uploader"),
                                   self.terminator,
                                   self.get_encoding_parameters(),
                                   self.mutable_file_default,
                                   self._key_generator, self.blacklist)

    def get_history(self):
        return self.history

    def init_control(self):
        d = self.when_tub_ready()

        def _publish(res):
            c = ControlServer()
            c.setServiceParent(self)
            control_url = self.tub.registerReference(c)
            self.write_private_config("control.furl", control_url + "\n")

        d.addCallback(_publish)
        d.addErrback(log.err,
                     facility="tahoe.init",
                     level=log.BAD,
                     umid="d3tNXA")

    def init_helper(self):
        d = self.when_tub_ready()

        def _publish(self):
            self.helper = Helper(os.path.join(self.basedir, "helper"),
                                 self.storage_broker, self._secret_holder,
                                 self.stats_provider, self.history)
            # TODO: this is confusing. BASEDIR/private/helper.furl is created
            # by the helper. BASEDIR/helper.furl is consumed by the client
            # who wants to use the helper. I like having the filename be the
            # same, since that makes 'cp' work smoothly, but the difference
            # between config inputs and generated outputs is hard to see.
            helper_furlfile = os.path.join(self.basedir, "private",
                                           "helper.furl").encode(
                                               get_filesystem_encoding())
            self.tub.registerReference(self.helper, furlFile=helper_furlfile)

        d.addCallback(_publish)
        d.addErrback(log.err,
                     facility="tahoe.init",
                     level=log.BAD,
                     umid="K0mW5w")

    def init_key_gen(self, key_gen_furl):
        d = self.when_tub_ready()

        def _subscribe(self):
            self.tub.connectTo(key_gen_furl, self._got_key_generator)

        d.addCallback(_subscribe)
        d.addErrback(log.err,
                     facility="tahoe.init",
                     level=log.BAD,
                     umid="z9DMzw")

    def _got_key_generator(self, key_generator):
        self._key_generator.set_remote_generator(key_generator)
        key_generator.notifyOnDisconnect(self._lost_key_generator)

    def _lost_key_generator(self):
        self._key_generator.set_remote_generator(None)

    def set_default_mutable_keysize(self, keysize):
        self._key_generator.set_default_keysize(keysize)

    def init_web(self, webport):
        self.log("init_web(webport=%s)", args=(webport, ))

        from allmydata.webish import WebishServer
        nodeurl_path = os.path.join(self.basedir, "node.url")
        staticdir = self.get_config("node", "web.static", "public_html")
        staticdir = os.path.expanduser(staticdir)
        ws = WebishServer(self, webport, nodeurl_path, staticdir)
        self.add_service(ws)

    def init_ftp_server(self):
        if self.get_config("ftpd", "enabled", False, boolean=True):
            accountfile = self.get_config("ftpd", "accounts.file", None)
            accounturl = self.get_config("ftpd", "accounts.url", None)
            ftp_portstr = self.get_config("ftpd", "port", "8021")

            from allmydata.frontends import ftpd
            s = ftpd.FTPServer(self, accountfile, accounturl, ftp_portstr)
            s.setServiceParent(self)

    def init_sftp_server(self):
        if self.get_config("sftpd", "enabled", False, boolean=True):
            accountfile = self.get_config("sftpd", "accounts.file", None)
            accounturl = self.get_config("sftpd", "accounts.url", None)
            sftp_portstr = self.get_config("sftpd", "port", "8022")
            pubkey_file = self.get_config("sftpd", "host_pubkey_file")
            privkey_file = self.get_config("sftpd", "host_privkey_file")

            from allmydata.frontends import sftpd
            s = sftpd.SFTPServer(self, accountfile, accounturl, sftp_portstr,
                                 pubkey_file, privkey_file)
            s.setServiceParent(self)

    def init_drop_uploader(self):
        if self.get_config("drop_upload", "enabled", False, boolean=True):
            upload_dircap = self.get_config("drop_upload", "upload.dircap",
                                            None)
            local_dir_utf8 = self.get_config("drop_upload", "local.directory",
                                             None)

            if upload_dircap and local_dir_utf8:
                try:
                    from allmydata.frontends import drop_upload
                    s = drop_upload.DropUploader(self, upload_dircap,
                                                 local_dir_utf8)
                    s.setServiceParent(self)
                    s.startService()
                except Exception, e:
                    self.log("couldn't start drop-uploader: %r", args=(e, ))
            else:
                self.log(
                    "couldn't start drop-uploader: upload.dircap or local.directory not specified"
                )
コード例 #10
0
class Client(node.Node, pollmixin.PollMixin):

    PORTNUMFILE = "client.port"
    STOREDIR = 'storage'
    NODETYPE = "client"
    EXIT_TRIGGER_FILE = "exit_trigger"

    # This means that if a storage server treats me as though I were a
    # 1.0.0 storage client, it will work as they expect.
    OLDEST_SUPPORTED_VERSION = "1.0.0"

    # This is a dictionary of (needed, desired, total, max_segment_size). 'needed'
    # is the number of shares required to reconstruct a file. 'desired' means
    # that we will abort an upload unless we can allocate space for at least
    # this many. 'total' is the total number of shares created by encoding.
    # If everybody has room then this is is how many we will upload.
    DEFAULT_ENCODING_PARAMETERS = {
        "k": 3,
        "happy": 7,
        "n": 10,
        "max_segment_size": 128 * KiB,
    }

    def __init__(self, basedir="."):
        node.Node.__init__(self, basedir)
        # All tub.registerReference must happen *after* we upcall, since
        # that's what does tub.setLocation()
        configutil.validate_config(self.config_fname, self.config,
                                   _valid_config_sections())
        self._magic_folder = None
        self.started_timestamp = time.time()
        self.logSource = "Client"
        self.encoding_params = self.DEFAULT_ENCODING_PARAMETERS.copy()
        self.init_introducer_clients()
        self.init_stats_provider()
        self.init_secrets()
        self.init_node_key()
        self.init_storage()
        self.init_control()
        self._key_generator = KeyGenerator()
        key_gen_furl = self.get_config("client", "key_generator.furl", None)
        if key_gen_furl:
            log.msg("[client]key_generator.furl= is now ignored, see #2783")
        self.init_client()
        self.load_static_servers()
        self.helper = None
        if self.get_config("helper", "enabled", False, boolean=True):
            if not self._tub_is_listening:
                raise ValueError("config error: helper is enabled, but tub "
                                 "is not listening ('tub.port=' is empty)")
            self.init_helper()
        self.init_ftp_server()
        self.init_sftp_server()
        self.init_magic_folder()

        # If the node sees an exit_trigger file, it will poll every second to see
        # whether the file still exists, and what its mtime is. If the file does not
        # exist or has not been modified for a given timeout, the node will exit.
        exit_trigger_file = os.path.join(self.basedir, self.EXIT_TRIGGER_FILE)
        if os.path.exists(exit_trigger_file):
            age = time.time() - os.stat(exit_trigger_file)[stat.ST_MTIME]
            self.log("%s file noticed (%ds old), starting timer" %
                     (self.EXIT_TRIGGER_FILE, age))
            exit_trigger = TimerService(1.0, self._check_exit_trigger,
                                        exit_trigger_file)
            exit_trigger.setServiceParent(self)

        # this needs to happen last, so it can use getServiceNamed() to
        # acquire references to StorageServer and other web-statusable things
        webport = self.get_config("node", "web.port", None)
        if webport:
            self.init_web(webport)  # strports string

    def _sequencer(self):
        seqnum_s = self.get_config_from_file("announcement-seqnum")
        if not seqnum_s:
            seqnum_s = "0"
        seqnum = int(seqnum_s.strip())
        seqnum += 1  # increment
        self.write_config("announcement-seqnum", "%d\n" % seqnum)
        nonce = _make_secret().strip()
        return seqnum, nonce

    def init_introducer_clients(self):
        self.introducer_clients = []
        self.introducer_furls = []

        introducers_yaml_filename = os.path.join(self.basedir, "private",
                                                 "introducers.yaml")
        introducers_filepath = FilePath(introducers_yaml_filename)

        try:
            with introducers_filepath.open() as f:
                introducers_yaml = yamlutil.safe_load(f)
                introducers = introducers_yaml.get("introducers", {})
                log.msg("found %d introducers in private/introducers.yaml" %
                        len(introducers))
        except EnvironmentError:
            introducers = {}

        if "default" in introducers.keys():
            raise ValueError(
                "'default' introducer furl cannot be specified in introducers.yaml; please fix impossible configuration."
            )

        # read furl from tahoe.cfg
        tahoe_cfg_introducer_furl = self.get_config("client",
                                                    "introducer.furl", None)
        if tahoe_cfg_introducer_furl == "None":
            raise ValueError("tahoe.cfg has invalid 'introducer.furl = None':"
                             " to disable it, use 'introducer.furl ='"
                             " or omit the key entirely")
        if tahoe_cfg_introducer_furl:
            introducers[u'default'] = {'furl': tahoe_cfg_introducer_furl}

        for petname, introducer in introducers.items():
            introducer_cache_filepath = FilePath(
                os.path.join(self.basedir, "private",
                             "introducer_{}_cache.yaml".format(petname)))
            ic = IntroducerClient(self.tub, introducer['furl'].encode("ascii"),
                                  self.nickname,
                                  str(allmydata.__full_version__),
                                  str(self.OLDEST_SUPPORTED_VERSION),
                                  self.get_app_versions(), self._sequencer,
                                  introducer_cache_filepath)
            self.introducer_clients.append(ic)
            self.introducer_furls.append(introducer['furl'])
            ic.setServiceParent(self)

    def init_stats_provider(self):
        gatherer_furl = self.get_config("client", "stats_gatherer.furl", None)
        self.stats_provider = StatsProvider(self, gatherer_furl)
        self.add_service(self.stats_provider)
        self.stats_provider.register_producer(self)

    def get_stats(self):
        return {'node.uptime': time.time() - self.started_timestamp}

    def init_secrets(self):
        lease_s = self.get_or_create_private_config("secret", _make_secret)
        lease_secret = base32.a2b(lease_s)
        convergence_s = self.get_or_create_private_config(
            'convergence', _make_secret)
        self.convergence = base32.a2b(convergence_s)
        self._secret_holder = SecretHolder(lease_secret, self.convergence)

    def init_node_key(self):
        # we only create the key once. On all subsequent runs, we re-use the
        # existing key
        def _make_key():
            sk_vs, vk_vs = keyutil.make_keypair()
            return sk_vs + "\n"

        sk_vs = self.get_or_create_private_config("node.privkey", _make_key)
        sk, vk_vs = keyutil.parse_privkey(sk_vs.strip())
        self.write_config("node.pubkey", vk_vs + "\n")
        self._node_key = sk

    def get_long_nodeid(self):
        # this matches what IServer.get_longname() says about us elsewhere
        vk_bytes = self._node_key.get_verifying_key_bytes()
        return "v0-" + base32.b2a(vk_bytes)

    def get_long_tubid(self):
        return idlib.nodeid_b2a(self.nodeid)

    def _init_permutation_seed(self, ss):
        seed = self.get_config_from_file("permutation-seed")
        if not seed:
            have_shares = ss.have_shares()
            if have_shares:
                # if the server has shares but not a recorded
                # permutation-seed, then it has been around since pre-#466
                # days, and the clients who uploaded those shares used our
                # TubID as a permutation-seed. We should keep using that same
                # seed to keep the shares in the same place in the permuted
                # ring, so those clients don't have to perform excessive
                # searches.
                seed = base32.b2a(self.nodeid)
            else:
                # otherwise, we're free to use the more natural seed of our
                # pubkey-based serverid
                vk_bytes = self._node_key.get_verifying_key_bytes()
                seed = base32.b2a(vk_bytes)
            self.write_config("permutation-seed", seed + "\n")
        return seed.strip()

    def init_storage(self):
        # should we run a storage server (and publish it for others to use)?
        if not self.get_config("storage", "enabled", True, boolean=True):
            return
        if not self._tub_is_listening:
            raise ValueError("config error: storage is enabled, but tub "
                             "is not listening ('tub.port=' is empty)")
        readonly = self.get_config("storage", "readonly", False, boolean=True)

        storedir = os.path.join(self.basedir, self.STOREDIR)

        data = self.get_config("storage", "reserved_space", None)
        try:
            reserved = parse_abbreviated_size(data)
        except ValueError:
            log.msg("[storage]reserved_space= contains unparseable value %s" %
                    data)
            raise
        if reserved is None:
            reserved = 0
        discard = self.get_config("storage",
                                  "debug_discard",
                                  False,
                                  boolean=True)

        expire = self.get_config("storage",
                                 "expire.enabled",
                                 False,
                                 boolean=True)
        if expire:
            mode = self.get_config("storage", "expire.mode")  # require a mode
        else:
            mode = self.get_config("storage", "expire.mode", "age")

        o_l_d = self.get_config("storage", "expire.override_lease_duration",
                                None)
        if o_l_d is not None:
            o_l_d = parse_duration(o_l_d)

        cutoff_date = None
        if mode == "cutoff-date":
            cutoff_date = self.get_config("storage", "expire.cutoff_date")
            cutoff_date = parse_date(cutoff_date)

        sharetypes = []
        if self.get_config("storage", "expire.immutable", True, boolean=True):
            sharetypes.append("immutable")
        if self.get_config("storage", "expire.mutable", True, boolean=True):
            sharetypes.append("mutable")
        expiration_sharetypes = tuple(sharetypes)

        ss = StorageServer(storedir,
                           self.nodeid,
                           reserved_space=reserved,
                           discard_storage=discard,
                           readonly_storage=readonly,
                           stats_provider=self.stats_provider,
                           expiration_enabled=expire,
                           expiration_mode=mode,
                           expiration_override_lease_duration=o_l_d,
                           expiration_cutoff_date=cutoff_date,
                           expiration_sharetypes=expiration_sharetypes)
        self.add_service(ss)

        furl_file = os.path.join(self.basedir, "private",
                                 "storage.furl").encode(
                                     get_filesystem_encoding())
        furl = self.tub.registerReference(ss, furlFile=furl_file)
        ann = {
            "anonymous-storage-FURL": furl,
            "permutation-seed-base32": self._init_permutation_seed(ss),
        }
        for ic in self.introducer_clients:
            ic.publish("storage", ann, self._node_key)

    def init_client(self):
        helper_furl = self.get_config("client", "helper.furl", None)
        if helper_furl in ("None", ""):
            helper_furl = None

        DEP = self.encoding_params
        DEP["k"] = int(self.get_config("client", "shares.needed", DEP["k"]))
        DEP["n"] = int(self.get_config("client", "shares.total", DEP["n"]))
        DEP["happy"] = int(
            self.get_config("client", "shares.happy", DEP["happy"]))

        # for the CLI to authenticate to local JSON endpoints
        self._create_auth_token()

        self.init_client_storage_broker()
        self.history = History(self.stats_provider)
        self.terminator = Terminator()
        self.terminator.setServiceParent(self)
        self.add_service(
            Uploader(helper_furl, self.stats_provider, self.history))
        self.init_blacklist()
        self.init_nodemaker()

    def get_auth_token(self):
        """
        This returns a local authentication token, which is just some
        random data in "api_auth_token" which must be echoed to API
        calls.

        Currently only the URI '/magic' for magic-folder status; other
        endpoints are invited to include this as well, as appropriate.
        """
        return self.get_private_config('api_auth_token')

    def _create_auth_token(self):
        """
        Creates new auth-token data written to 'private/api_auth_token'.

        This is intentionally re-created every time the node starts.
        """
        self.write_private_config(
            'api_auth_token',
            urlsafe_b64encode(os.urandom(32)) + '\n',
        )

    def init_client_storage_broker(self):
        # create a StorageFarmBroker object, for use by Uploader/Downloader
        # (and everybody else who wants to use storage servers)
        ps = self.get_config("client", "peers.preferred", "").split(",")
        preferred_peers = tuple([p.strip() for p in ps if p != ""])
        sb = storage_client.StorageFarmBroker(
            permute_peers=True,
            tub_maker=self._create_tub,
            preferred_peers=preferred_peers,
        )
        self.storage_broker = sb
        sb.setServiceParent(self)
        for ic in self.introducer_clients:
            sb.use_introducer(ic)

    def get_storage_broker(self):
        return self.storage_broker

    def load_static_servers(self):
        """
        Load the servers.yaml file if it exists, and provide the static
        server data to the StorageFarmBroker.
        """
        fn = os.path.join(self.basedir, "private", "servers.yaml")
        servers_filepath = FilePath(fn)
        try:
            with servers_filepath.open() as f:
                servers_yaml = yamlutil.safe_load(f)
            static_servers = servers_yaml.get("storage", {})
            log.msg("found %d static servers in private/servers.yaml" %
                    len(static_servers))
            self.storage_broker.set_static_servers(static_servers)
        except EnvironmentError:
            pass

    def init_blacklist(self):
        fn = os.path.join(self.basedir, "access.blacklist")
        self.blacklist = Blacklist(fn)

    def init_nodemaker(self):
        default = self.get_config("client", "mutable.format", default="SDMF")
        if default.upper() == "MDMF":
            self.mutable_file_default = MDMF_VERSION
        else:
            self.mutable_file_default = SDMF_VERSION
        self.nodemaker = NodeMaker(self.storage_broker, self._secret_holder,
                                   self.get_history(),
                                   self.getServiceNamed("uploader"),
                                   self.terminator,
                                   self.get_encoding_parameters(),
                                   self.mutable_file_default,
                                   self._key_generator, self.blacklist)

    def get_history(self):
        return self.history

    def init_control(self):
        c = ControlServer()
        c.setServiceParent(self)
        control_url = self.control_tub.registerReference(c)
        self.write_private_config("control.furl", control_url + "\n")

    def init_helper(self):
        self.helper = Helper(os.path.join(self.basedir, "helper"),
                             self.storage_broker, self._secret_holder,
                             self.stats_provider, self.history)
        # TODO: this is confusing. BASEDIR/private/helper.furl is created by
        # the helper. BASEDIR/helper.furl is consumed by the client who wants
        # to use the helper. I like having the filename be the same, since
        # that makes 'cp' work smoothly, but the difference between config
        # inputs and generated outputs is hard to see.
        helper_furlfile = os.path.join(self.basedir, "private",
                                       "helper.furl").encode(
                                           get_filesystem_encoding())
        self.tub.registerReference(self.helper, furlFile=helper_furlfile)

    def set_default_mutable_keysize(self, keysize):
        self._key_generator.set_default_keysize(keysize)

    def init_web(self, webport):
        self.log("init_web(webport=%s)", args=(webport, ))

        from allmydata.webish import WebishServer
        nodeurl_path = os.path.join(self.basedir, "node.url")
        staticdir_config = self.get_config("node", "web.static",
                                           "public_html").decode("utf-8")
        staticdir = abspath_expanduser_unicode(staticdir_config,
                                               base=self.basedir)
        ws = WebishServer(self, webport, nodeurl_path, staticdir)
        self.add_service(ws)

    def init_ftp_server(self):
        if self.get_config("ftpd", "enabled", False, boolean=True):
            accountfile = from_utf8_or_none(
                self.get_config("ftpd", "accounts.file", None))
            if accountfile:
                accountfile = abspath_expanduser_unicode(accountfile,
                                                         base=self.basedir)
            accounturl = self.get_config("ftpd", "accounts.url", None)
            ftp_portstr = self.get_config("ftpd", "port", "8021")

            from allmydata.frontends import ftpd
            s = ftpd.FTPServer(self, accountfile, accounturl, ftp_portstr)
            s.setServiceParent(self)

    def init_sftp_server(self):
        if self.get_config("sftpd", "enabled", False, boolean=True):
            accountfile = from_utf8_or_none(
                self.get_config("sftpd", "accounts.file", None))
            if accountfile:
                accountfile = abspath_expanduser_unicode(accountfile,
                                                         base=self.basedir)
            accounturl = self.get_config("sftpd", "accounts.url", None)
            sftp_portstr = self.get_config("sftpd", "port", "8022")
            pubkey_file = from_utf8_or_none(
                self.get_config("sftpd", "host_pubkey_file"))
            privkey_file = from_utf8_or_none(
                self.get_config("sftpd", "host_privkey_file"))

            from allmydata.frontends import sftpd
            s = sftpd.SFTPServer(self, accountfile, accounturl, sftp_portstr,
                                 pubkey_file, privkey_file)
            s.setServiceParent(self)

    def init_magic_folder(self):
        #print "init_magic_folder"
        if self.get_config("drop_upload", "enabled", False, boolean=True):
            raise OldConfigOptionError(
                "The [drop_upload] section must be renamed to [magic_folder].\n"
                "See docs/frontends/magic-folder.rst for more information.")

        if self.get_config("magic_folder", "enabled", False, boolean=True):
            #print "magic folder enabled"
            from allmydata.frontends import magic_folder

            db_filename = os.path.join(self.basedir, "private",
                                       "magicfolderdb.sqlite")
            local_dir_config = self.get_config(
                "magic_folder", "local.directory").decode("utf-8")
            try:
                poll_interval = int(
                    self.get_config("magic_folder", "poll_interval", 3))
            except ValueError:
                raise ValueError("[magic_folder] poll_interval must be an int")

            s = magic_folder.MagicFolder(
                client=self,
                upload_dircap=self.get_private_config("magic_folder_dircap"),
                collective_dircap=self.get_private_config("collective_dircap"),
                local_path_u=abspath_expanduser_unicode(local_dir_config,
                                                        base=self.basedir),
                dbfile=abspath_expanduser_unicode(db_filename),
                umask=self.get_config("magic_folder", "download.umask", 0077),
                downloader_delay=poll_interval,
            )
            self._magic_folder = s
            s.setServiceParent(self)
            s.startService()

            # start processing the upload queue when we've connected to
            # enough servers
            threshold = min(self.encoding_params["k"],
                            self.encoding_params["happy"] + 1)
            d = self.storage_broker.when_connected_enough(threshold)
            d.addCallback(lambda ign: s.ready())

    def _check_exit_trigger(self, exit_trigger_file):
        if os.path.exists(exit_trigger_file):
            mtime = os.stat(exit_trigger_file)[stat.ST_MTIME]
            if mtime > time.time() - 120.0:
                return
            else:
                self.log("%s file too old, shutting down" %
                         (self.EXIT_TRIGGER_FILE, ))
        else:
            self.log("%s file missing, shutting down" %
                     (self.EXIT_TRIGGER_FILE, ))
        reactor.stop()

    def get_encoding_parameters(self):
        return self.encoding_params

    def introducer_connection_statuses(self):
        return [ic.connection_status() for ic in self.introducer_clients]

    def connected_to_introducer(self):
        return any(
            [ic.connected_to_introducer() for ic in self.introducer_clients])

    def get_renewal_secret(self):  # this will go away
        return self._secret_holder.get_renewal_secret()

    def get_cancel_secret(self):
        return self._secret_holder.get_cancel_secret()

    def debug_wait_for_client_connections(self, num_clients):
        """Return a Deferred that fires (with None) when we have connections
        to the given number of peers. Useful for tests that set up a
        temporary test network and need to know when it is safe to proceed
        with an upload or download."""
        def _check():
            return len(
                self.storage_broker.get_connected_servers()) >= num_clients

        d = self.poll(_check, 0.5)
        d.addCallback(lambda res: None)
        return d

    # these four methods are the primitives for creating filenodes and
    # dirnodes. The first takes a URI and produces a filenode or (new-style)
    # dirnode. The other three create brand-new filenodes/dirnodes.

    def create_node_from_uri(self,
                             write_uri,
                             read_uri=None,
                             deep_immutable=False,
                             name="<unknown name>"):
        # This returns synchronously.
        # Note that it does *not* validate the write_uri and read_uri; instead we
        # may get an opaque node if there were any problems.
        return self.nodemaker.create_from_cap(write_uri,
                                              read_uri,
                                              deep_immutable=deep_immutable,
                                              name=name)

    def create_dirnode(self, initial_children={}, version=None):
        d = self.nodemaker.create_new_mutable_directory(initial_children,
                                                        version=version)
        return d

    def create_immutable_dirnode(self, children, convergence=None):
        return self.nodemaker.create_immutable_directory(children, convergence)

    def create_mutable_file(self, contents=None, keysize=None, version=None):
        return self.nodemaker.create_mutable_file(contents,
                                                  keysize,
                                                  version=version)

    def upload(self, uploadable, reactor=None):
        uploader = self.getServiceNamed("uploader")
        return uploader.upload(uploadable, reactor=reactor)
コード例 #11
0
 def init_stats_provider(self):
     self.stats_provider = StatsProvider(self)
     self.stats_provider.setServiceParent(self)
     self.stats_provider.register_producer(self)
コード例 #12
0
class Client(node.Node, pollmixin.PollMixin):
    implements(IStatsProducer)

    PORTNUMFILE = "client.port"
    STOREDIR = 'storage'
    NODETYPE = "client"
    EXIT_TRIGGER_FILE = "exit_trigger"

    # This means that if a storage server treats me as though I were a
    # 1.0.0 storage client, it will work as they expect.
    OLDEST_SUPPORTED_VERSION = "1.0.0"

    # This is a dictionary of (needed, desired, total, max_segment_size). 'needed'
    # is the number of shares required to reconstruct a file. 'desired' means
    # that we will abort an upload unless we can allocate space for at least
    # this many. 'total' is the total number of shares created by encoding.
    # If everybody has room then this is is how many we will upload.
    DEFAULT_ENCODING_PARAMETERS = {
        "k": 3,
        "happy": 7,
        "n": 10,
        "max_segment_size": 128 * KiB,
    }

    def __init__(self, basedir="."):
        node.Node.__init__(self, basedir)
        self.started_timestamp = time.time()
        self.logSource = "Client"
        self.encoding_params = self.DEFAULT_ENCODING_PARAMETERS.copy()
        self.init_introducer_client()
        self.init_stats_provider()
        self.init_secrets()
        self.init_node_key()
        self.init_storage()
        self.init_control()
        self.helper = None
        if self.get_config("helper", "enabled", False, boolean=True):
            self.init_helper()
        self._key_generator = KeyGenerator()
        key_gen_furl = self.get_config("client", "key_generator.furl", None)
        if key_gen_furl:
            self.init_key_gen(key_gen_furl)
        self.init_client()
        # ControlServer and Helper are attached after Tub startup
        self.init_ftp_server()
        self.init_sftp_server()
        self.init_drop_uploader()

        # If the node sees an exit_trigger file, it will poll every second to see
        # whether the file still exists, and what its mtime is. If the file does not
        # exist or has not been modified for a given timeout, the node will exit.
        exit_trigger_file = os.path.join(self.basedir, self.EXIT_TRIGGER_FILE)
        if os.path.exists(exit_trigger_file):
            age = time.time() - os.stat(exit_trigger_file)[stat.ST_MTIME]
            self.log("%s file noticed (%ds old), starting timer" %
                     (self.EXIT_TRIGGER_FILE, age))
            exit_trigger = TimerService(1.0, self._check_exit_trigger,
                                        exit_trigger_file)
            exit_trigger.setServiceParent(self)

        # this needs to happen last, so it can use getServiceNamed() to
        # acquire references to StorageServer and other web-statusable things
        webport = self.get_config("node", "web.port", None)
        if webport:
            self.init_web(webport)  # strports string

    def _sequencer(self):
        seqnum_s = self.get_config_from_file("announcement-seqnum")
        if not seqnum_s:
            seqnum_s = "0"
        seqnum = int(seqnum_s.strip())
        seqnum += 1  # increment
        self.write_config("announcement-seqnum", "%d\n" % seqnum)
        nonce = _make_secret().strip()
        return seqnum, nonce

    def init_introducer_client(self):
        self.introducer_furl = self.get_config("client", "introducer.furl")
        ic = IntroducerClient(self.tub, self.introducer_furl, self.nickname,
                              str(allmydata.__full_version__),
                              str(self.OLDEST_SUPPORTED_VERSION),
                              self.get_app_versions(), self._sequencer)
        self.introducer_client = ic
        # hold off on starting the IntroducerClient until our tub has been
        # started, so we'll have a useful address on our RemoteReference, so
        # that the introducer's status page will show us.
        d = self.when_tub_ready()

        def _start_introducer_client(res):
            ic.setServiceParent(self)

        d.addCallback(_start_introducer_client)
        d.addErrback(log.err,
                     facility="tahoe.init",
                     level=log.BAD,
                     umid="URyI5w")

    def init_stats_provider(self):
        gatherer_furl = self.get_config("client", "stats_gatherer.furl", None)
        self.stats_provider = StatsProvider(self, gatherer_furl)
        self.add_service(self.stats_provider)
        self.stats_provider.register_producer(self)

    def get_stats(self):
        return {'node.uptime': time.time() - self.started_timestamp}

    def init_secrets(self):
        lease_s = self.get_or_create_private_config("secret", _make_secret)
        lease_secret = base32.a2b(lease_s)
        convergence_s = self.get_or_create_private_config(
            'convergence', _make_secret)
        self.convergence = base32.a2b(convergence_s)
        self._secret_holder = SecretHolder(lease_secret, self.convergence)

    def init_node_key(self):
        # we only create the key once. On all subsequent runs, we re-use the
        # existing key
        def _make_key():
            sk_vs, vk_vs = keyutil.make_keypair()
            return sk_vs + "\n"

        sk_vs = self.get_or_create_private_config("node.privkey", _make_key)
        sk, vk_vs = keyutil.parse_privkey(sk_vs.strip())
        self.write_config("node.pubkey", vk_vs + "\n")
        self._node_key = sk

    def get_long_nodeid(self):
        # this matches what IServer.get_longname() says about us elsewhere
        vk_bytes = self._node_key.get_verifying_key_bytes()
        return "v0-" + base32.b2a(vk_bytes)

    def get_long_tubid(self):
        return idlib.nodeid_b2a(self.nodeid)

    def _init_permutation_seed(self, ss):
        seed = self.get_config_from_file("permutation-seed")
        if not seed:
            have_shares = ss.have_shares()
            if have_shares:
                # if the server has shares but not a recorded
                # permutation-seed, then it has been around since pre-#466
                # days, and the clients who uploaded those shares used our
                # TubID as a permutation-seed. We should keep using that same
                # seed to keep the shares in the same place in the permuted
                # ring, so those clients don't have to perform excessive
                # searches.
                seed = base32.b2a(self.nodeid)
            else:
                # otherwise, we're free to use the more natural seed of our
                # pubkey-based serverid
                vk_bytes = self._node_key.get_verifying_key_bytes()
                seed = base32.b2a(vk_bytes)
            self.write_config("permutation-seed", seed + "\n")
        return seed.strip()

    def init_storage(self):
        # should we run a storage server (and publish it for others to use)?
        if not self.get_config("storage", "enabled", True, boolean=True):
            return
        readonly = self.get_config("storage", "readonly", False, boolean=True)

        storedir = os.path.join(self.basedir, self.STOREDIR)

        data = self.get_config("storage", "reserved_space", None)
        try:
            reserved = parse_abbreviated_size(data)
        except ValueError:
            log.msg("[storage]reserved_space= contains unparseable value %s" %
                    data)
            raise
        if reserved is None:
            reserved = 0
        discard = self.get_config("storage",
                                  "debug_discard",
                                  False,
                                  boolean=True)

        expire = self.get_config("storage",
                                 "expire.enabled",
                                 False,
                                 boolean=True)
        if expire:
            mode = self.get_config("storage", "expire.mode")  # require a mode
        else:
            mode = self.get_config("storage", "expire.mode", "age")

        o_l_d = self.get_config("storage", "expire.override_lease_duration",
                                None)
        if o_l_d is not None:
            o_l_d = parse_duration(o_l_d)

        cutoff_date = None
        if mode == "cutoff-date":
            cutoff_date = self.get_config("storage", "expire.cutoff_date")
            cutoff_date = parse_date(cutoff_date)

        sharetypes = []
        if self.get_config("storage", "expire.immutable", True, boolean=True):
            sharetypes.append("immutable")
        if self.get_config("storage", "expire.mutable", True, boolean=True):
            sharetypes.append("mutable")
        expiration_sharetypes = tuple(sharetypes)

        ss = StorageServer(storedir,
                           self.nodeid,
                           reserved_space=reserved,
                           discard_storage=discard,
                           readonly_storage=readonly,
                           stats_provider=self.stats_provider,
                           expiration_enabled=expire,
                           expiration_mode=mode,
                           expiration_override_lease_duration=o_l_d,
                           expiration_cutoff_date=cutoff_date,
                           expiration_sharetypes=expiration_sharetypes)
        self.add_service(ss)

        d = self.when_tub_ready()

        # we can't do registerReference until the Tub is ready
        def _publish(res):
            furl_file = os.path.join(self.basedir, "private",
                                     "storage.furl").encode(
                                         get_filesystem_encoding())
            furl = self.tub.registerReference(ss, furlFile=furl_file)
            ann = {
                "anonymous-storage-FURL": furl,
                "permutation-seed-base32": self._init_permutation_seed(ss),
            }
            self.introducer_client.publish("storage", ann, self._node_key)

        d.addCallback(_publish)
        d.addErrback(log.err,
                     facility="tahoe.init",
                     level=log.BAD,
                     umid="aLGBKw")

    def init_client(self):
        helper_furl = self.get_config("client", "helper.furl", None)
        if helper_furl in ("None", ""):
            helper_furl = None

        DEP = self.encoding_params
        DEP["k"] = int(self.get_config("client", "shares.needed", DEP["k"]))
        DEP["n"] = int(self.get_config("client", "shares.total", DEP["n"]))
        DEP["happy"] = int(
            self.get_config("client", "shares.happy", DEP["happy"]))

        self.init_client_storage_broker()
        self.history = History(self.stats_provider)
        self.terminator = Terminator()
        self.terminator.setServiceParent(self)
        self.add_service(
            Uploader(helper_furl, self.stats_provider, self.history))
        self.init_blacklist()
        self.init_nodemaker()

    def init_client_storage_broker(self):
        # create a StorageFarmBroker object, for use by Uploader/Downloader
        # (and everybody else who wants to use storage servers)
        sb = storage_client.StorageFarmBroker(self.tub, permute_peers=True)
        self.storage_broker = sb

        # load static server specifications from tahoe.cfg, if any.
        # Not quite ready yet.
        #if self.config.has_section("client-server-selection"):
        #    server_params = {} # maps serverid to dict of parameters
        #    for (name, value) in self.config.items("client-server-selection"):
        #        pieces = name.split(".")
        #        if pieces[0] == "server":
        #            serverid = pieces[1]
        #            if serverid not in server_params:
        #                server_params[serverid] = {}
        #            server_params[serverid][pieces[2]] = value
        #    for serverid, params in server_params.items():
        #        server_type = params.pop("type")
        #        if server_type == "tahoe-foolscap":
        #            s = storage_client.NativeStorageClient(*params)
        #        else:
        #            msg = ("unrecognized server type '%s' in "
        #                   "tahoe.cfg [client-server-selection]server.%s.type"
        #                   % (server_type, serverid))
        #            raise storage_client.UnknownServerTypeError(msg)
        #        sb.add_server(s.serverid, s)

        # check to see if we're supposed to use the introducer too
        if self.get_config("client-server-selection",
                           "use_introducer",
                           default=True,
                           boolean=True):
            sb.use_introducer(self.introducer_client)

    def get_storage_broker(self):
        return self.storage_broker

    def init_blacklist(self):
        fn = os.path.join(self.basedir, "access.blacklist")
        self.blacklist = Blacklist(fn)

    def init_nodemaker(self):
        default = self.get_config("client", "mutable.format", default="SDMF")
        if default.upper() == "MDMF":
            self.mutable_file_default = MDMF_VERSION
        else:
            self.mutable_file_default = SDMF_VERSION
        self.nodemaker = NodeMaker(self.storage_broker, self._secret_holder,
                                   self.get_history(),
                                   self.getServiceNamed("uploader"),
                                   self.terminator,
                                   self.get_encoding_parameters(),
                                   self.mutable_file_default,
                                   self._key_generator, self.blacklist)

    def get_history(self):
        return self.history

    def init_control(self):
        d = self.when_tub_ready()

        def _publish(res):
            c = ControlServer()
            c.setServiceParent(self)
            control_url = self.tub.registerReference(c)
            self.write_private_config("control.furl", control_url + "\n")

        d.addCallback(_publish)
        d.addErrback(log.err,
                     facility="tahoe.init",
                     level=log.BAD,
                     umid="d3tNXA")

    def init_helper(self):
        d = self.when_tub_ready()

        def _publish(self):
            self.helper = Helper(os.path.join(self.basedir, "helper"),
                                 self.storage_broker, self._secret_holder,
                                 self.stats_provider, self.history)
            # TODO: this is confusing. BASEDIR/private/helper.furl is created
            # by the helper. BASEDIR/helper.furl is consumed by the client
            # who wants to use the helper. I like having the filename be the
            # same, since that makes 'cp' work smoothly, but the difference
            # between config inputs and generated outputs is hard to see.
            helper_furlfile = os.path.join(self.basedir, "private",
                                           "helper.furl").encode(
                                               get_filesystem_encoding())
            self.tub.registerReference(self.helper, furlFile=helper_furlfile)

        d.addCallback(_publish)
        d.addErrback(log.err,
                     facility="tahoe.init",
                     level=log.BAD,
                     umid="K0mW5w")

    def init_key_gen(self, key_gen_furl):
        d = self.when_tub_ready()

        def _subscribe(self):
            self.tub.connectTo(key_gen_furl, self._got_key_generator)

        d.addCallback(_subscribe)
        d.addErrback(log.err,
                     facility="tahoe.init",
                     level=log.BAD,
                     umid="z9DMzw")

    def _got_key_generator(self, key_generator):
        self._key_generator.set_remote_generator(key_generator)
        key_generator.notifyOnDisconnect(self._lost_key_generator)

    def _lost_key_generator(self):
        self._key_generator.set_remote_generator(None)

    def set_default_mutable_keysize(self, keysize):
        self._key_generator.set_default_keysize(keysize)

    def init_web(self, webport):
        self.log("init_web(webport=%s)", args=(webport, ))

        from allmydata.webish import WebishServer
        nodeurl_path = os.path.join(self.basedir, "node.url")
        staticdir = self.get_config("node", "web.static", "public_html")
        staticdir = os.path.expanduser(staticdir)
        ws = WebishServer(self, webport, nodeurl_path, staticdir)
        self.add_service(ws)

    def init_ftp_server(self):
        if self.get_config("ftpd", "enabled", False, boolean=True):
            accountfile = self.get_config("ftpd", "accounts.file", None)
            accounturl = self.get_config("ftpd", "accounts.url", None)
            ftp_portstr = self.get_config("ftpd", "port", "8021")

            from allmydata.frontends import ftpd
            s = ftpd.FTPServer(self, accountfile, accounturl, ftp_portstr)
            s.setServiceParent(self)

    def init_sftp_server(self):
        if self.get_config("sftpd", "enabled", False, boolean=True):
            accountfile = self.get_config("sftpd", "accounts.file", None)
            accounturl = self.get_config("sftpd", "accounts.url", None)
            sftp_portstr = self.get_config("sftpd", "port", "8022")
            pubkey_file = self.get_config("sftpd", "host_pubkey_file")
            privkey_file = self.get_config("sftpd", "host_privkey_file")

            from allmydata.frontends import sftpd
            s = sftpd.SFTPServer(self, accountfile, accounturl, sftp_portstr,
                                 pubkey_file, privkey_file)
            s.setServiceParent(self)

    def init_drop_uploader(self):
        if self.get_config("drop_upload", "enabled", False, boolean=True):
            if self.get_config("drop_upload", "upload.dircap", None):
                raise OldConfigOptionError(
                    "The [drop_upload]upload.dircap option is no longer supported; please "
                    "put the cap in a 'private/drop_upload_dircap' file, and delete this option."
                )

            upload_dircap = self.get_or_create_private_config(
                "drop_upload_dircap")
            local_dir_utf8 = self.get_config("drop_upload", "local.directory")

            try:
                from allmydata.frontends import drop_upload
                s = drop_upload.DropUploader(self, upload_dircap,
                                             local_dir_utf8)
                s.setServiceParent(self)
                s.startService()
            except Exception, e:
                self.log("couldn't start drop-uploader: %r", args=(e, ))