Beispiel #1
0
        def _got(data):
            if self._aborted:
                raise UploadAborted()
            encrypted_pieces = []
            length = 0
            while data:
                encrypted_piece = data.pop(0)
                length += len(encrypted_piece)
                crypttext_segment_hasher.update(encrypted_piece)
                self._crypttext_hasher.update(encrypted_piece)
                encrypted_pieces.append(encrypted_piece)

            precondition(length <= input_chunk_size,
                         "length=%d > input_chunk_size=%d" %
                         (length, input_chunk_size))
            if allow_short:
                if length < input_chunk_size:
                    # padding
                    pad_size = input_chunk_size - length
                    encrypted_pieces.append('\x00' * pad_size)
            else:
                # non-tail segments should be the full segment size
                if length != input_chunk_size:
                    log.msg("non-tail segment should be full segment size: %d!=%d"
                            % (length, input_chunk_size),
                            level=log.BAD, umid="jNk5Yw")
                precondition(length == input_chunk_size,
                             "length=%d != input_chunk_size=%d" %
                             (length, input_chunk_size))

            encrypted_piece = "".join(encrypted_pieces)
            return previous_chunks + [encrypted_piece]
Beispiel #2
0
def _decode(s):
    precondition(isinstance(s, basestring), s=s)

    if isinstance(s, bytes):
        return s.decode(filesystem_encoding)
    else:
        return s
    def _handle_bad_share(self, f, readers):
        """
        I am called when a block or a salt fails to correctly validate, or when
        the decryption or decoding operation fails for some reason.  I react to
        this failure by notifying the remote server of corruption, and then
        removing the remote server from further activity.
        """
        # these are the errors we can tolerate: by giving up on this share
        # and finding others to replace it. Any other errors (i.e. coding
        # bugs) are re-raised, causing the download to fail.
        f.trap(DeadReferenceError, RemoteException, BadShareError)

        # DeadReferenceError happens when we try to fetch data from a server
        # that has gone away. RemoteException happens if the server had an
        # internal error. BadShareError encompasses: (UnknownVersionError,
        # LayoutInvalid, struct.error) which happen when we get obviously
        # wrong data, and CorruptShareError which happens later, when we
        # perform integrity checks on the data.

        precondition(isinstance(readers, list), readers)
        bad_shnums = [reader.shnum for reader in readers]

        self.log("validation or decoding failed on share(s) %s, server(s) %s "
                 ", segment %d: %s" % \
                 (bad_shnums, readers, self._current_segment, str(f)))
        for reader in readers:
            self._mark_bad_share(reader.server, reader.shnum, reader, f)
        return None
Beispiel #4
0
    def _write_share_data(self, f, offset, data):
        length = len(data)
        precondition(offset >= 0)
        data_length = self._read_data_length(f)
        extra_lease_offset = self._read_extra_lease_offset(f)

        if offset+length >= data_length:
            # They are expanding their data size.
            if self.DATA_OFFSET+offset+length > extra_lease_offset:
                # Their new data won't fit in the current container, so we
                # have to move the leases. With luck, they're expanding it
                # more than the size of the extra lease block, which will
                # minimize the corrupt-the-share window
                self._change_container_size(f, offset+length)
                extra_lease_offset = self._read_extra_lease_offset(f)

                # an interrupt here is ok.. the container has been enlarged
                # but the data remains untouched

            assert self.DATA_OFFSET+offset+length <= extra_lease_offset
            # Their data now fits in the current container. We must write
            # their new data and modify the recorded data size.
            new_data_length = offset+length
            self._write_data_length(f, new_data_length)
            # an interrupt here will result in a corrupted share

        # now all that's left to do is write out their data
        f.seek(self.DATA_OFFSET+offset)
        f.write(data)
        return
Beispiel #5
0
 def addDeferred(self, d):
     precondition(not self.closed, "don't call addDeferred() on a closed ExpandableDeferredList")
     index = len(self.resultList)
     self.resultList.append(None)
     d.addCallbacks(self._cbDeferred, self._ebDeferred,
                    callbackArgs=(index,))
     return d
Beispiel #6
0
def create_node(config):
    out = config.stdout
    err = config.stderr
    basedir = config['basedir']
    # This should always be called with an absolute Unicode basedir.
    precondition(isinstance(basedir, unicode), basedir)

    if os.path.exists(basedir):
        if listdir_unicode(basedir):
            print >>err, "The base directory %s is not empty." % quote_local_unicode_path(basedir)
            print >>err, "To avoid clobbering anything, I am going to quit now."
            print >>err, "Please use a different directory, or empty this one."
            defer.returnValue(-1)
        # we're willing to use an empty directory
    else:
        os.mkdir(basedir)
    write_tac(basedir, "client")

    fileutil.make_dirs(os.path.join(basedir, "private"), 0700)
    with open(os.path.join(basedir, "tahoe.cfg"), "w") as c:
        yield write_node_config(c, config)
        write_client_config(c, config)

    print >>out, "Node created in %s" % quote_local_unicode_path(basedir)
    if not config.get("introducer", ""):
        print >>out, " Please set [client]introducer.furl= in tahoe.cfg!"
        print >>out, " The node cannot connect to a grid without it."
    if not config.get("nickname", ""):
        print >>out, " Please set [node]nickname= in tahoe.cfg"
    defer.returnValue(0)
 def do_join(self, client_num, local_dir, invite_code):
     action = start_action(
         action_type=u"join-magic-folder",
         client_num=client_num,
         local_dir=local_dir,
         invite_code=invite_code,
     )
     with action.context():
         precondition(isinstance(local_dir, unicode), local_dir=local_dir)
         precondition(isinstance(invite_code, str), invite_code=invite_code)
         local_dir_arg = unicode_to_argv(local_dir)
         d = DeferredContext(
             self.do_cli(
                 "magic-folder",
                 "join",
                 invite_code,
                 local_dir_arg,
                 client_num=client_num,
             )
         )
     def _done(args):
         (rc, stdout, stderr) = args
         self.failUnlessEqual(rc, 0)
         self.failUnlessEqual(stdout, "")
         self.failUnlessEqual(stderr, "")
         return (rc, stdout, stderr)
     d.addCallback(_done)
     return d.addActionFinish()
Beispiel #8
0
    def modify(self, old_contents, servermap, first_time):
        children = self.node._unpack_contents(old_contents)
        now = time.time()
        for (namex, (child, new_metadata)) in self.entries.iteritems():
            name = normalize(namex)
            precondition(IFilesystemNode.providedBy(child), child)

            # Strictly speaking this is redundant because we would raise the
            # error again in _pack_normalized_children.
            child.raise_error()

            metadata = None
            if name in children:
                if not self.overwrite:
                    raise ExistingChildError("child %s already exists" % quote_output(name, encoding='utf-8'))

                if self.overwrite == "only-files" and IDirectoryNode.providedBy(children[name][0]):
                    raise ExistingChildError("child %s already exists as a directory" % quote_output(name, encoding='utf-8'))
                metadata = children[name][1].copy()

            metadata = update_metadata(metadata, new_metadata, now)
            if self.create_readonly_node and metadata.get('no-write', False):
                child = self.create_readonly_node(child, name)

            children[name] = (child, metadata)
        new_contents = self.node._pack_contents(children)
        return new_contents
Beispiel #9
0
def quote_output(s, quotemarks=True, encoding=None):
    """
    Encode either a Unicode string or a UTF-8-encoded bytestring for representation
    on stdout or stderr, tolerating errors. If 'quotemarks' is True, the string is
    always quoted; otherwise, it is quoted only if necessary to avoid ambiguity or
    control bytes in the output.
    Quoting may use either single or double quotes. Within single quotes, all
    characters stand for themselves, and ' will not appear. Within double quotes,
    Python-compatible backslash escaping is used.
    """
    precondition(isinstance(s, (str, unicode)), s)

    if isinstance(s, str):
        try:
            s = s.decode('utf-8')
        except UnicodeDecodeError:
            return 'b"%s"' % (ESCAPABLE_8BIT.sub(_str_escape, s),)

    if MUST_DOUBLE_QUOTE.search(s) is None:
        try:
            out = s.encode(encoding or io_encoding)
            if quotemarks or out.startswith('"'):
                return "'%s'" % (out,)
            else:
                return out
        except (UnicodeDecodeError, UnicodeEncodeError):
            pass

    escaped = ESCAPABLE_UNICODE.sub(_unicode_escape, s)
    return '"%s"' % (escaped.encode(encoding or io_encoding, 'backslashreplace'),)
Beispiel #10
0
 def add_or_renew_lease(self, lease_info):
     precondition(lease_info.owner_num != 0) # 0 means "no lease here"
     try:
         self.renew_lease(lease_info.renew_secret,
                          lease_info.expiration_time)
     except IndexError:
         self.add_lease(lease_info)
Beispiel #11
0
    def _finished(self, ur):
        assert interfaces.IUploadResults.providedBy(ur), ur
        vcapstr = ur.get_verifycapstr()
        precondition(isinstance(vcapstr, str), vcapstr)
        v = uri.from_string(vcapstr)
        f_times = self._fetcher.get_times()

        hur = upload.HelperUploadResults()
        hur.timings = {"cumulative_fetch": f_times["cumulative_fetch"],
                       "total_fetch": f_times["total"],
                       }
        for key,val in ur.get_timings().items():
            hur.timings[key] = val
        hur.uri_extension_hash = v.uri_extension_hash
        hur.ciphertext_fetched = self._fetcher.get_ciphertext_fetched()
        hur.preexisting_shares = ur.get_preexisting_shares()
        # hur.sharemap needs to be {shnum: set(serverid)}
        hur.sharemap = {}
        for shnum, servers in ur.get_sharemap().items():
            hur.sharemap[shnum] = set([s.get_serverid() for s in servers])
        # and hur.servermap needs to be {serverid: set(shnum)}
        hur.servermap = {}
        for server, shnums in ur.get_servermap().items():
            hur.servermap[server.get_serverid()] = set(shnums)
        hur.pushed_shares = ur.get_pushed_shares()
        hur.file_size = ur.get_file_size()
        hur.uri_extension_data = ur.get_uri_extension_data()
        hur.verifycapstr = vcapstr

        self._reader.close()
        os.unlink(self._encoding_file)
        self._finished_observers.fire(hur)
        self._helper.upload_finished(self._storage_index, v.size)
        del self._reader
 def _got_announcement(self, key_s, ann):
     if key_s is not None:
         precondition(isinstance(key_s, str), key_s)
         precondition(key_s.startswith("v0-"), key_s)
     assert ann["service-name"] == "storage"
     s = NativeStorageServer(key_s, ann, self._tub_options)
     s.on_status_changed(lambda _: self._got_connection())
     serverid = s.get_serverid()
     old = self.servers.get(serverid)
     if old:
         if old.get_announcement() == ann:
             return # duplicate
         # replacement
         del self.servers[serverid]
         old.stop_connecting()
         old.disownServiceParent()
         # NOTE: this disownServiceParent() returns a Deferred that
         # doesn't fire until Tub.stopService fires, which will wait for
         # any existing connections to be shut down. This doesn't
         # generally matter for normal runtime, but unit tests can run
         # into DirtyReactorErrors if they don't block on these. If a test
         # replaces one server with a newer version, then terminates
         # before the old one has been shut down, it might get
         # DirtyReactorErrors. The fix would be to gather these Deferreds
         # into a structure that will block StorageFarmBroker.stopService
         # until they have fired (but hopefully don't keep reference
         # cycles around when they fire earlier than that, which will
         # almost always be the case for normal runtime).
     # now we forget about them and start using the new one
     self.servers[serverid] = s
     s.setServiceParent(self)
     s.start_connecting(self._trigger_connections)
Beispiel #13
0
def create_stats_gatherer(config):
    err = config.stderr
    basedir = config['basedir']
    # This should always be called with an absolute Unicode basedir.
    precondition(isinstance(basedir, unicode), basedir)

    if os.path.exists(basedir):
        if listdir_unicode(basedir):
            print("The base directory %s is not empty." % quote_output(basedir), file=err)
            print("To avoid clobbering anything, I am going to quit now.", file=err)
            print("Please use a different directory, or empty this one.", file=err)
            return -1
        # we're willing to use an empty directory
    else:
        os.mkdir(basedir)
    write_tac(basedir, "stats-gatherer")
    if config["hostname"]:
        portnum = iputil.allocate_tcp_port()
        location = "tcp:%s:%d" % (config["hostname"], portnum)
        port = "tcp:%d" % portnum
    else:
        location = config["location"]
        port = config["port"]
    fileutil.write(os.path.join(basedir, "location"), location+"\n")
    fileutil.write(os.path.join(basedir, "port"), port+"\n")
    return 0
Beispiel #14
0
def could_be_base32_encoded_l(s, lengthinbits, s5=s5, tr=string.translate, identitytranstable=identitytranstable, chars=chars):
    precondition(isinstance(s, str), s)
    if s == '':
        return True
    assert lengthinbits%5 < len(s5), lengthinbits
    assert ord(s[-1]) < s5[lengthinbits%5]
    return (((lengthinbits+4)/5) == len(s)) and s5[lengthinbits%5][ord(s[-1])] and not string.translate(s, identitytranstable, chars)
def add_alias(options):
    nodedir = options['node-directory']
    alias = options.alias
    precondition(isinstance(alias, unicode), alias=alias)
    cap = options.cap
    stdout = options.stdout
    stderr = options.stderr
    if u":" in alias:
        # a single trailing colon will already have been stripped if present
        print >>stderr, "Alias names cannot contain colons."
        return 1
    if u" " in alias:
        print >>stderr, "Alias names cannot contain spaces."
        return 1

    old_aliases = get_aliases(nodedir)
    if alias in old_aliases:
        print >>stderr, "Alias %s already exists!" % quote_output(alias)
        return 1
    aliasfile = os.path.join(nodedir, "private", "aliases")
    cap = uri.from_string_dirnode(cap).to_string()

    add_line_to_aliasfile(aliasfile, alias, cap)

    print >>stdout, "Alias %s added" % quote_output(alias)
    return 0
Beispiel #16
0
 def put_uri_extension(self, data):
     offset = self._offsets['uri_extension']
     assert isinstance(data, str)
     precondition(len(data) <= self._uri_extension_size_max,
                  len(data), self._uri_extension_size_max)
     length = struct.pack(self.fieldstruct, len(data))
     return self._write(offset, length+data)
Beispiel #17
0
    def upload(self, childpath):
        precondition(isinstance(childpath, unicode), childpath)

        #self.verboseprint("uploading %s.." % quote_output(childpath))
        metadata = get_local_metadata(childpath)

        # we can use the backupdb here
        must_upload, bdb_results = self.check_backupdb_file(childpath)

        if must_upload:
            self.verboseprint("uploading %s.." % quote_output(childpath))
            infileobj = open(childpath, "rb")
            url = self.options['node-url'] + "uri"
            resp = do_http("PUT", url, infileobj)
            if resp.status not in (200, 201):
                raise HTTPError("Error during file PUT", resp)

            filecap = resp.read().strip()
            self.verboseprint(" %s -> %s" % (quote_output(childpath, quotemarks=False),
                                             quote_output(filecap, quotemarks=False)))
            #self.verboseprint(" metadata: %s" % (quote_output(metadata, quotemarks=False),))

            if bdb_results:
                bdb_results.did_upload(filecap)

            self.files_uploaded += 1
            return filecap, metadata

        else:
            self.verboseprint("skipping %s.." % quote_output(childpath))
            self.files_reused += 1
            return bdb_results.was_uploaded(), metadata
Beispiel #18
0
    def process(self, localpath):
        precondition(isinstance(localpath, unicode), localpath)
        # returns newdircap

        self.verboseprint("processing %s" % quote_output(localpath))
        create_contents = {} # childname -> (type, rocap, metadata)
        compare_contents = {} # childname -> rocap

        try:
            children = listdir_unicode(localpath)
        except EnvironmentError:
            self.directories_skipped += 1
            self.warn("WARNING: permission denied on directory %s" % quote_output(localpath))
            children = []
        except FilenameEncodingError:
            self.directories_skipped += 1
            self.warn("WARNING: could not list directory %s due to a filename encoding error" % quote_output(localpath))
            children = []

        for child in self.options.filter_listdir(children):
            assert isinstance(child, unicode), child
            childpath = os.path.join(localpath, child)
            # note: symlinks to directories are both islink() and isdir()
            if os.path.isdir(childpath) and not os.path.islink(childpath):
                metadata = get_local_metadata(childpath)
                # recurse on the child directory
                childcap = self.process(childpath)
                assert isinstance(childcap, str)
                create_contents[child] = ("dirnode", childcap, metadata)
                compare_contents[child] = childcap
            elif os.path.isfile(childpath) and not os.path.islink(childpath):
                try:
                    childcap, metadata = self.upload(childpath)
                    assert isinstance(childcap, str)
                    create_contents[child] = ("filenode", childcap, metadata)
                    compare_contents[child] = childcap
                except EnvironmentError:
                    self.files_skipped += 1
                    self.warn("WARNING: permission denied on file %s" % quote_output(childpath))
            else:
                self.files_skipped += 1
                if os.path.islink(childpath):
                    self.warn("WARNING: cannot backup symlink %s" % quote_output(childpath))
                else:
                    self.warn("WARNING: cannot backup special file %s" % quote_output(childpath))

        must_create, r = self.check_backupdb_directory(compare_contents)
        if must_create:
            self.verboseprint(" creating directory for %s" % quote_output(localpath))
            newdircap = mkdir(create_contents, self.options)
            assert isinstance(newdircap, str)
            if r:
                r.did_create(newdircap)
            self.directories_created += 1
            return newdircap
        else:
            self.verboseprint(" re-using old directory for %s" % quote_output(localpath))
            self.directories_reused += 1
            return r.was_created()
Beispiel #19
0
def _pack_normalized_children(children, writekey, deep_immutable=False):
    """Take a dict that maps:
         children[unicode_nfc_name] = (IFileSystemNode, metadata_dict)
    and pack it into a single string, for use as the contents of the backing
    file. This is the same format as is returned by _unpack_contents. I also
    accept an AuxValueDict, in which case I'll use the auxilliary cached data
    as the pre-packed entry, which is faster than re-packing everything each
    time.

    If writekey is provided then I will superencrypt the child's writecap with
    writekey.

    If deep_immutable is True, I will require that all my children are deeply
    immutable, and will raise a MustBeDeepImmutableError if not.
    """
    precondition((writekey is None) or isinstance(writekey, str), writekey)

    has_aux = isinstance(children, AuxValueDict)
    entries = []
    for name in sorted(children.keys()):
        assert isinstance(name, unicode)
        entry = None
        (child, metadata) = children[name]
        child.raise_error()
        if deep_immutable and not child.is_allowed_in_immutable_directory():
            raise MustBeDeepImmutableError(
                "child %s is not allowed in an immutable directory" % quote_output(name, encoding="utf-8"), name
            )
        if has_aux:
            entry = children.get_aux(name)
        if not entry:
            assert IFilesystemNode.providedBy(child), (name, child)
            assert isinstance(metadata, dict)
            rw_uri = child.get_write_uri()
            if rw_uri is None:
                rw_uri = ""
            assert isinstance(rw_uri, str), rw_uri

            # should be prevented by MustBeDeepImmutableError check above
            assert not (rw_uri and deep_immutable)

            ro_uri = child.get_readonly_uri()
            if ro_uri is None:
                ro_uri = ""
            assert isinstance(ro_uri, str), ro_uri
            if writekey is not None:
                writecap = netstring(_encrypt_rw_uri(writekey, rw_uri))
            else:
                writecap = ZERO_LEN_NETSTR
            entry = "".join(
                [
                    netstring(name.encode("utf-8")),
                    netstring(strip_prefix_for_ro(ro_uri, deep_immutable)),
                    writecap,
                    netstring(simplejson.dumps(metadata)),
                ]
            )
        entries.append(netstring(entry))
    return "".join(entries)
Beispiel #20
0
 def remote_write(self, offset, data):
     start = time.time()
     precondition(not self.closed)
     if self.throw_out_all_data:
         return
     self._sharefile.write_share_data(offset, data)
     self.ss.add_latency("write", time.time() - start)
     self.ss.count("write")
Beispiel #21
0
 def _got_data(self, results, blocknum):
     precondition(blocknum < self.num_blocks, self, blocknum, self.num_blocks)
     sharehashes, blockhashes, blockdata = results
     try:
         sharehashes = dict(sharehashes)
     except ValueError, le:
         le.args = tuple(le.args + (sharehashes,))
         raise
Beispiel #22
0
 def decode(self, some_shares, their_shareids):
     precondition(len(some_shares) == len(their_shareids),
                  len(some_shares), len(their_shareids))
     precondition(len(some_shares) == self.required_shares,
                  len(some_shares), self.required_shares)
     data = self.decoder.decode(some_shares,
                                [int(s) for s in their_shareids])
     return defer.succeed(data)
Beispiel #23
0
 def set_nodes(self, entries, overwrite=True):
     precondition(isinstance(entries, dict), entries)
     if self.is_readonly():
         return defer.fail(NotWriteableError())
     a = Adder(self, entries, overwrite=overwrite, create_readonly_node=self._create_readonly_node)
     d = self._node.modify(a.modify)
     d.addCallback(lambda res: self)
     return d
Beispiel #24
0
 def next(self):
     precondition(self.i <= len(self.c.l), "The iterated ValueOrderedDict doesn't have this many elements.  Most likely this is because someone altered the contents of the ValueOrderedDict while the iteration was in progress.", self.i, self.c)
     precondition((self.i == len(self.c.l)) or self.c.d.has_key(self.c.l[self.i][1]), "The iterated ValueOrderedDict doesn't have this key.  Most likely this is because someone altered the contents of the ValueOrderedDict while the iteration was in progress.", self.i, (self.i < len(self.c.l)) and self.c.l[self.i], self.c)
     if self.i == len(self.c.l):
         raise StopIteration
     le = self.c.l[self.i]
     self.i += 1
     return le[0]
Beispiel #25
0
def a2b(cs):
    """
    @param cs the base-32 encoded data (a string)
    """
    precondition(could_be_base32_encoded(cs), "cs is required to be possibly base32 encoded data.", cs=cs)
    precondition(isinstance(cs, str), cs)

    return a2b_l(cs, num_octets_that_encode_to_this_many_quintets(len(cs))*8)
Beispiel #26
0
    def get_source_info(self, source_spec):
        """
        This turns an argv string into a (Local|Tahoe)(File|Directory)Source.
        """
        precondition(isinstance(source_spec, unicode), source_spec)
        rootcap, path_utf8 = get_alias(self.aliases, source_spec, None)
        path = path_utf8.decode("utf-8")
        # any trailing slash is removed in abspath_expanduser_unicode(), so
        # make a note of it here, to throw an error later
        had_trailing_slash = path.endswith("/")
        if rootcap == DefaultAliasMarker:
            # no alias, so this is a local file
            pathname = abspath_expanduser_unicode(path)
            name = os.path.basename(pathname)
            if not os.path.exists(pathname):
                raise MissingSourceError(source_spec, quotefn=quote_local_unicode_path)
            if os.path.isdir(pathname):
                t = LocalDirectorySource(self.progress, pathname, name)
            else:
                if had_trailing_slash:
                    raise FilenameWithTrailingSlashError(source_spec,
                                                         quotefn=quote_local_unicode_path)
                if not os.path.isfile(pathname):
                    raise WeirdSourceError(pathname)
                t = LocalFileSource(pathname, name) # non-empty
        else:
            # this is a tahoe object
            url = self.nodeurl + "uri/%s" % urllib.quote(rootcap)
            name = None
            if path:
                if path.endswith("/"):
                    path = path[:-1]
                url += "/" + escape_path(path)
                last_slash = path.rfind(u"/")
                name = path
                if last_slash != -1:
                    name = path[last_slash+1:]

            resp = do_http("GET", url + "?t=json")
            if resp.status == 404:
                raise MissingSourceError(source_spec)
            elif resp.status != 200:
                raise HTTPError("Error examining source %s" % quote_output(source_spec),
                                resp)
            parsed = json.loads(resp.read())
            nodetype, d = parsed
            if nodetype == "dirnode":
                t = TahoeDirectorySource(self.nodeurl, self.cache,
                                         self.progress, name)
                t.init_from_parsed(parsed)
            else:
                if had_trailing_slash:
                    raise FilenameWithTrailingSlashError(source_spec)
                writecap = to_str(d.get("rw_uri"))
                readcap = to_str(d.get("ro_uri"))
                mutable = d.get("mutable", False) # older nodes don't provide it
                t = TahoeFileSource(self.nodeurl, mutable, writecap, readcap, name)
        return t
Beispiel #27
0
def pack_children(childrenx, writekey, deep_immutable=False):
    # initial_children must have metadata (i.e. {} instead of None)
    children = {}
    for (namex, (node, metadata)) in childrenx.iteritems():
        precondition(isinstance(metadata, dict),
                     "directory creation requires metadata to be a dict, not None", metadata)
        children[normalize(namex)] = (node, metadata)

    return _pack_normalized_children(children, writekey=writekey, deep_immutable=deep_immutable)
Beispiel #28
0
def get_alias(aliases, path_unicode, default):
    """
    Transform u"work:path/filename" into (aliases[u"work"], u"path/filename".encode('utf-8')).
    If default=None, then an empty alias is indicated by returning
    DefaultAliasMarker. We special-case strings with a recognized cap URI
    prefix, to make it easy to access specific files/directories by their
    caps.
    If the transformed alias is either not found in aliases, or is blank
    and default is not found in aliases, an UnknownAliasError is
    raised.
    """
    precondition(isinstance(path_unicode, unicode), path_unicode)

    from allmydata import uri
    path = path_unicode.encode('utf-8').strip(" ")
    if uri.has_uri_prefix(path):
        # We used to require "URI:blah:./foo" in order to get a subpath,
        # stripping out the ":./" sequence. We still allow that for compatibility,
        # but now also allow just "URI:blah/foo".
        sep = path.find(":./")
        if sep != -1:
            return path[:sep], path[sep+3:]
        sep = path.find("/")
        if sep != -1:
            return path[:sep], path[sep+1:]
        return path, ""
    colon = path.find(":")
    if colon == -1:
        # no alias
        if default == None:
            return DefaultAliasMarker, path
        if default not in aliases:
            raise UnknownAliasError("No alias specified, and the default %s alias doesn't exist. "
                                    "To create it, use 'tahoe create-alias %s'."
                                    % (quote_output(default), quote_output(default, quotemarks=False)))
        return uri.from_string_dirnode(aliases[default]).to_string(), path
    if colon == 1 and default is None and platform_uses_lettercolon_drivename():
        # treat C:\why\must\windows\be\so\weird as a local path, not a tahoe
        # file in the "C:" alias
        return DefaultAliasMarker, path

    # decoding must succeed because path is valid UTF-8 and colon & space are ASCII
    alias = path[:colon].decode('utf-8')
    if u"/" in alias:
        # no alias, but there's a colon in a dirname/filename, like
        # "foo/bar:7"
        if default == None:
            return DefaultAliasMarker, path
        if default not in aliases:
            raise UnknownAliasError("No alias specified, and the default %s alias doesn't exist. "
                                    "To create it, use 'tahoe create-alias %s'."
                                    % (quote_output(default), quote_output(default, quotemarks=False)))
        return uri.from_string_dirnode(aliases[default]).to_string(), path
    if alias not in aliases:
        raise UnknownAliasError("Unknown alias %s, please create it with 'tahoe add-alias' or 'tahoe create-alias'." %
                                quote_output(alias))
    return uri.from_string_dirnode(aliases[alias]).to_string(), path[colon+1:]
Beispiel #29
0
 def subscribe_to(self, service_name, cb, *args, **kwargs):
     self._local_subscribers.append( (service_name,cb,args,kwargs) )
     self._subscribed_service_names.add(service_name)
     self._maybe_subscribe()
     for index,(ann,key_s,when) in self._inbound_announcements.items():
         precondition(isinstance(key_s, str), key_s)
         servicename = index[0]
         if servicename == service_name:
             eventually(cb, key_s, ann, *args, **kwargs)
Beispiel #30
0
 def __init__(self, node, entries=None, overwrite=True, create_readonly_node=None):
     self.node = node
     if entries is None:
         entries = {}
     precondition(isinstance(entries, dict), entries)
     # keys of 'entries' may not be normalized.
     self.entries = entries
     self.overwrite = overwrite
     self.create_readonly_node = create_readonly_node
Beispiel #31
0
    def _write_share_data(self, f, offset, data):
        length = len(data)
        precondition(offset >= 0)
        data_length = self._read_data_length(f)
        extra_lease_offset = self._read_extra_lease_offset(f)

        if offset + length >= data_length:
            # They are expanding their data size.

            if self.DATA_OFFSET + offset + length > extra_lease_offset:
                # TODO: allow containers to shrink. For now, they remain
                # large.

                # Their new data won't fit in the current container, so we
                # have to move the leases. With luck, they're expanding it
                # more than the size of the extra lease block, which will
                # minimize the corrupt-the-share window
                self._change_container_size(f, offset + length)
                extra_lease_offset = self._read_extra_lease_offset(f)

                # an interrupt here is ok.. the container has been enlarged
                # but the data remains untouched

            assert self.DATA_OFFSET + offset + length <= extra_lease_offset
            # Their data now fits in the current container. We must write
            # their new data and modify the recorded data size.

            # Fill any newly exposed empty space with 0's.
            if offset > data_length:
                f.seek(self.DATA_OFFSET + data_length)
                f.write('\x00' * (offset - data_length))
                f.flush()

            new_data_length = offset + length
            self._write_data_length(f, new_data_length)
            # an interrupt here will result in a corrupted share

        # now all that's left to do is write out their data
        f.seek(self.DATA_OFFSET + offset)
        f.write(data)
        return
Beispiel #32
0
def run(config, stdout, stderr):
    from twisted.internet import reactor
    from twisted.python import log, logfile
    from allmydata import client

    basedir = config['basedir']
    precondition(isinstance(basedir, unicode), basedir)

    if not os.path.isdir(basedir):
        print >> stderr, "%s does not look like a directory at all" % quote_output(
            basedir)
        return 1
    for fn in listdir_unicode(basedir):
        if fn.endswith(u".tac"):
            tac = str(fn)
            break
    else:
        print >> stderr, "%s does not look like a node directory (no .tac file)" % quote_output(
            basedir)
        return 1
    if "client" not in tac:
        print >> stderr, ("%s looks like it contains a non-client node (%s).\n"
                          "Use 'tahoe start' instead of 'tahoe run'." %
                          (quote_output(basedir), tac))
        return 1

    os.chdir(basedir)

    # set up twisted logging. this will become part of the node rsn.
    logdir = os.path.join(basedir, 'logs')
    if not os.path.exists(logdir):
        os.makedirs(logdir)
    lf = logfile.LogFile('tahoesvc.log', logdir)
    log.startLogging(lf)

    # run the node itself
    c = client.Client(basedir)
    reactor.callLater(0, c.startService)  # after reactor startup
    reactor.run()

    return 0
Beispiel #33
0
def invite(options):
    precondition(isinstance(options.alias, unicode), alias=options.alias)
    precondition(isinstance(options.nickname, unicode),
                 nickname=options.nickname)

    from allmydata.scripts import tahoe_mkdir
    mkdir_options = _delegate_options(options, MakeDirectoryOptions())
    mkdir_options.where = None

    rc = tahoe_mkdir.mkdir(mkdir_options)
    if rc != 0:
        print >> options.stderr, "magic-folder: failed to mkdir\n"
        return rc

    # FIXME this assumes caps are ASCII.
    dmd_write_cap = mkdir_options.stdout.getvalue().strip()
    dmd_readonly_cap = uri.from_string(
        dmd_write_cap).get_readonly().to_string()
    if dmd_readonly_cap is None:
        print >> options.stderr, "magic-folder: failed to diminish dmd write cap\n"
        return 1

    magic_write_cap = get_aliases(options["node-directory"])[options.alias]
    magic_readonly_cap = uri.from_string(
        magic_write_cap).get_readonly().to_string()

    # tahoe ln CLIENT_READCAP COLLECTIVE_WRITECAP/NICKNAME
    ln_options = _delegate_options(options, LnOptions())
    ln_options.from_file = unicode(dmd_readonly_cap, 'utf-8')
    ln_options.to_file = u"%s/%s" % (unicode(magic_write_cap,
                                             'utf-8'), options.nickname)
    rc = tahoe_mv.mv(ln_options, mode="link")
    if rc != 0:
        print >> options.stderr, "magic-folder: failed to create link\n"
        print >> options.stderr, ln_options.stderr.getvalue()
        return rc

    # FIXME: this assumes caps are ASCII.
    print >> options.stdout, "%s%s%s" % (magic_readonly_cap, INVITE_SEPARATOR,
                                         dmd_write_cap)
    return 0
Beispiel #34
0
    def __init__(self, readbucketproxy, verifycap, fetch_failures=None):
        # fetch_failures is for debugging -- see test_encode.py
        self._fetch_failures = fetch_failures
        self._readbucketproxy = readbucketproxy
        precondition(IVerifierURI.providedBy(verifycap), verifycap)
        self._verifycap = verifycap

        # required
        self.segment_size = None
        self.crypttext_root_hash = None
        self.share_root_hash = None

        # computed
        self.block_size = None
        self.share_size = None
        self.num_segments = None
        self.tail_data_size = None
        self.tail_segment_size = None

        # optional
        self.crypttext_hash = None
Beispiel #35
0
    def set_node(self, namex, child, metadata=None, overwrite=True):
        """I add a child at the specific name. I return a Deferred that fires
        when the operation finishes. This Deferred will fire with the child
        node that was just added. I will replace any existing child of the
        same name.

        If this directory node is read-only, the Deferred will errback with a
        NotWriteableError."""

        precondition(IFilesystemNode.providedBy(child), child)

        if self.is_readonly():
            return defer.fail(NotWriteableError())
        assert IFilesystemNode.providedBy(child), child
        a = Adder(self,
                  overwrite=overwrite,
                  create_readonly_node=self._create_readonly_node)
        a.set_node(namex, child, metadata)
        d = self._node.modify(a.modify)
        d.addCallback(lambda res: child)
        return d
Beispiel #36
0
    def _parse_offsets(self, data):
        precondition(len(data) >= 0x4)
        self._offsets = {}
        (version, ) = struct.unpack(">L", data[0:4])
        if version != 1 and version != 2:
            raise ShareVersionIncompatible(version)

        if version == 1:
            precondition(len(data) >= 0x24)
            x = 0x0c
            fieldsize = 0x4
            fieldstruct = ">L"
        else:
            precondition(len(data) >= 0x44)
            x = 0x14
            fieldsize = 0x8
            fieldstruct = ">Q"

        self._version = version
        self._fieldsize = fieldsize
        self._fieldstruct = fieldstruct

        for field in (
                'data',
                'plaintext_hash_tree',  # UNUSED
                'crypttext_hash_tree',
                'block_hashes',
                'share_hashes',
                'uri_extension',
        ):
            offset = struct.unpack(fieldstruct, data[x:x + fieldsize])[0]
            x += fieldsize
            self._offsets[field] = offset
        return self._offsets
Beispiel #37
0
 def _got_announcement(self, key_s, ann, handlers=None):
     precondition(isinstance(key_s, str), key_s)
     precondition(key_s.startswith("v0-"), key_s)
     precondition(ann["service-name"] == "storage", ann["service-name"])
     if handlers is not None:
         s = NativeStorageServer(key_s, ann, self._tub_options, handlers)
     else:
         s = NativeStorageServer(key_s, ann, self._tub_options,
                                 self._tub_handlers)
     s.on_status_changed(lambda _: self._got_connection())
     server_id = s.get_serverid()
     old = self.servers.get(server_id)
     if old and server_id not in self.static_servers:
         if old.get_announcement() == ann:
             return  # duplicate
         # replacement
         del self.servers[server_id]
         old.stop_connecting()
         old.disownServiceParent()
         # NOTE: this disownServiceParent() returns a Deferred that
         # doesn't fire until Tub.stopService fires, which will wait for
         # any existing connections to be shut down. This doesn't
         # generally matter for normal runtime, but unit tests can run
         # into DirtyReactorErrors if they don't block on these. If a test
         # replaces one server with a newer version, then terminates
         # before the old one has been shut down, it might get
         # DirtyReactorErrors. The fix would be to gather these Deferreds
         # into a structure that will block StorageFarmBroker.stopService
         # until they have fired (but hopefully don't keep reference
         # cycles around when they fire earlier than that, which will
         # almost always be the case for normal runtime).
     # now we forget about them and start using the new one
     s.setServiceParent(self)
     self.servers[server_id] = s
     s.start_connecting(self._trigger_connections)
Beispiel #38
0
def create(options):
    precondition(isinstance(options.alias, unicode), alias=options.alias)
    precondition(isinstance(options.nickname, (unicode, NoneType)), nickname=options.nickname)
    precondition(isinstance(options.local_dir, (unicode, NoneType)), local_dir=options.local_dir)

    from allmydata.scripts import tahoe_add_alias
    create_alias_options = _delegate_options(options, CreateAliasOptions())
    create_alias_options.alias = options.alias

    rc = tahoe_add_alias.create_alias(create_alias_options)
    if rc != 0:
        print >>options.stderr, create_alias_options.stderr.getvalue()
        return rc
    print >>options.stdout, create_alias_options.stdout.getvalue()

    if options.nickname is not None:
        invite_options = _delegate_options(options, InviteOptions())
        invite_options.alias = options.alias
        invite_options.nickname = options.nickname
        rc = invite(invite_options)
        if rc != 0:
            print >>options.stderr, "magic-folder: failed to invite after create\n"
            print >>options.stderr, invite_options.stderr.getvalue()
            return rc
        invite_code = invite_options.stdout.getvalue().strip()
        join_options = _delegate_options(options, JoinOptions())
        join_options.local_dir = options.local_dir
        join_options.invite_code = invite_code
        rc = join(join_options)
        if rc != 0:
            print >>options.stderr, "magic-folder: failed to join after create\n"
            print >>options.stderr, join_options.stderr.getvalue()
            return rc
    return 0
Beispiel #39
0
def create(options):
    precondition(isinstance(options.alias, unicode), alias=options.alias)
    precondition(isinstance(options.nickname, (unicode, NoneType)),
                 nickname=options.nickname)
    precondition(isinstance(options.local_dir, (unicode, NoneType)),
                 local_dir=options.local_dir)

    # make sure we don't already have a magic-folder with this name before we create the alias
    maybe_upgrade_magic_folders(options["node-directory"])
    folders = load_magic_folders(options["node-directory"])
    if options['name'] in folders:
        print("Already have a magic-folder named '{}'".format(options['name']),
              file=options.stderr)
        return 1

    # create an alias; this basically just remembers the cap for the
    # master directory
    create_alias_options = _delegate_options(options, CreateAliasOptions())
    create_alias_options.alias = options.alias

    rc = tahoe_add_alias.create_alias(create_alias_options)
    if rc != 0:
        print(create_alias_options.stderr.getvalue(), file=options.stderr)
        return rc
    print(create_alias_options.stdout.getvalue(), file=options.stdout)

    if options.nickname is not None:
        print(u"Inviting myself as client '{}':".format(options.nickname),
              file=options.stdout)
        invite_options = _delegate_options(options, InviteOptions())
        invite_options.alias = options.alias
        invite_options.nickname = options.nickname
        invite_options['name'] = options['name']
        rc = invite(invite_options)
        if rc != 0:
            print(u"magic-folder: failed to invite after create\n",
                  file=options.stderr)
            print(invite_options.stderr.getvalue(), file=options.stderr)
            return rc
        invite_code = invite_options.stdout.getvalue().strip()
        print(u"  created invite code", file=options.stdout)
        join_options = _delegate_options(options, JoinOptions())
        join_options['poll-interval'] = options['poll-interval']
        join_options.nickname = options.nickname
        join_options.local_dir = options.local_dir
        join_options.invite_code = invite_code
        rc = join(join_options)
        if rc != 0:
            print(u"magic-folder: failed to join after create\n",
                  file=options.stderr)
            print(join_options.stderr.getvalue(), file=options.stderr)
            return rc
        print(u"  joined new magic-folder", file=options.stdout)
        print(u"Successfully created magic-folder '{}' with alias '{}:' "
              u"and client '{}'\nYou must re-start your node before the "
              u"magic-folder will be active.".format(options['name'],
                                                     options.alias,
                                                     options.nickname),
              file=options.stdout)
    return 0
Beispiel #40
0
def quote_output(s, quotemarks=True, quote_newlines=None, encoding=None):
    """
    Encode either a Unicode string or a UTF-8-encoded bytestring for representation
    on stdout or stderr, tolerating errors. If 'quotemarks' is True, the string is
    always quoted; otherwise, it is quoted only if necessary to avoid ambiguity or
    control bytes in the output. (Newlines are counted as control bytes iff
    quote_newlines is True.)

    Quoting may use either single or double quotes. Within single quotes, all
    characters stand for themselves, and ' will not appear. Within double quotes,
    Python-compatible backslash escaping is used.

    If not explicitly given, quote_newlines is True when quotemarks is True.
    """
    precondition(isinstance(s, (str, unicode)), s)
    if quote_newlines is None:
        quote_newlines = quotemarks

    if isinstance(s, str):
        try:
            s = s.decode('utf-8')
        except UnicodeDecodeError:
            return 'b"%s"' % (ESCAPABLE_8BIT.sub(
                lambda m: _str_escape(m, quote_newlines), s), )

    must_double_quote = quote_newlines and MUST_DOUBLE_QUOTE_NL or MUST_DOUBLE_QUOTE
    if must_double_quote.search(s) is None:
        try:
            out = s.encode(encoding or io_encoding)
            if quotemarks or out.startswith('"'):
                return "'%s'" % (out, )
            else:
                return out
        except (UnicodeDecodeError, UnicodeEncodeError):
            pass

    escaped = ESCAPABLE_UNICODE.sub(
        lambda m: _unicode_escape(m, quote_newlines), s)
    return '"%s"' % (escaped.encode(encoding or io_encoding,
                                    'backslashreplace'), )
def unicode_to_argv(s, mangle=False):
    """
    Encode the given Unicode argument as a bytestring.
    If the argument is to be passed to a different process, then the 'mangle' argument
    should be true; on Windows, this uses a mangled encoding that will be reversed by
    code in runner.py.

    On Python 3, just return the string unchanged, since argv is unicode.
    """
    precondition(isinstance(s, unicode), s)
    if PY3:
        warnings.warn("This will be unnecessary once Python 2 is dropped.",
                      DeprecationWarning)
        return s

    if mangle and sys.platform == "win32":
        # This must be the same as 'mangle' in bin/tahoe-script.template.
        return bytes(
            re.sub(u'[^\\x20-\\x7F]', lambda m: u'\x7F%x;' %
                   (ord(m.group(0)), ), s), io_encoding)
    else:
        return s.encode(io_encoding)
Beispiel #42
0
    def set_children(self, entries, overwrite=True):
        # this takes URIs
        a = Adder(self, overwrite=overwrite,
                  create_readonly_node=self._create_readonly_node)
        for (namex, e) in entries.iteritems():
            assert isinstance(namex, unicode), namex
            if len(e) == 2:
                writecap, readcap = e
                metadata = None
            else:
                assert len(e) == 3
                writecap, readcap, metadata = e
            precondition(isinstance(writecap, (str,type(None))), writecap)
            precondition(isinstance(readcap, (str,type(None))), readcap)

            # We now allow packing unknown nodes, provided they are valid
            # for this type of directory.
            child_node = self._create_and_validate_node(writecap, readcap, namex)
            a.set_node(namex, child_node, metadata)
        d = self._node.modify(a.modify)
        d.addCallback(lambda ign: self)
        return d
Beispiel #43
0
        def _apply(old_contents):
            new_contents = modifier(old_contents, self._servermap, first_time)
            precondition((isinstance(new_contents, str) or
                          new_contents is None),
                         "Modifier function must return a string "
                         "or None")

            if new_contents is None or new_contents == old_contents:
                log.msg("no changes")
                # no changes need to be made
                if first_time:
                    return
                # However, since Publish is not automatically doing a
                # recovery when it observes UCWE, we need to do a second
                # publish. See #551 for details. We'll basically loop until
                # we managed an uncontested publish.
                old_uploadable = MutableData(old_contents)
                new_contents = old_uploadable
            else:
                new_contents = MutableData(new_contents)

            return self._upload(new_contents)
def create_introducer(config, out=sys.stdout, err=sys.stderr):
    basedir = config['basedir']
    # This should always be called with an absolute Unicode basedir.
    precondition(isinstance(basedir, unicode), basedir)

    if os.path.exists(basedir):
        if listdir_unicode(basedir):
            print >>err, "The base directory %s is not empty." % quote_local_unicode_path(basedir)
            print >>err, "To avoid clobbering anything, I am going to quit now."
            print >>err, "Please use a different directory, or empty this one."
            return -1
        # we're willing to use an empty directory
    else:
        os.mkdir(basedir)
    write_tac(basedir, "introducer")

    c = open(os.path.join(basedir, "tahoe.cfg"), "w")
    write_node_config(c, config)
    c.close()

    print >>out, "Introducer created in %s" % quote_local_unicode_path(basedir)
    return 0
Beispiel #45
0
    def __init__(self, client, accountfile, accounturl, ftp_portstr):
        precondition(isinstance(accountfile, (unicode, NoneType)), accountfile)
        service.MultiService.__init__(self)

        r = Dispatcher(client)
        p = portal.Portal(r)

        if accountfile:
            c = AccountFileChecker(self, accountfile)
            p.registerChecker(c)
        if accounturl:
            c = AccountURLChecker(self, accounturl)
            p.registerChecker(c)
        if not accountfile and not accounturl:
            # we could leave this anonymous, with just the /uri/CAP form
            raise NeedRootcapLookupScheme("must provide some translation")

        f = ftp.FTPFactory(p)
        # strports requires a native string.
        ftp_portstr = ensure_str(ftp_portstr)
        s = strports.service(ftp_portstr, f)
        s.setServiceParent(self)
def create_alias(options):
    # mkdir+add_alias
    nodedir = options['node-directory']
    alias = options.alias
    precondition(isinstance(alias, unicode), alias=alias)
    stdout = options.stdout
    stderr = options.stderr
    if u":" in alias:
        # a single trailing colon will already have been stripped if present
        print >>stderr, "Alias names cannot contain colons."
        return 1
    if u" " in alias:
        print >>stderr, "Alias names cannot contain spaces."
        return 1

    old_aliases = get_aliases(nodedir)
    if alias in old_aliases:
        print >>stderr, "Alias %s already exists!" % quote_output(alias)
        return 1

    aliasfile = os.path.join(nodedir, "private", "aliases")

    nodeurl = options['node-url']
    if not nodeurl.endswith("/"):
        nodeurl += "/"
    url = nodeurl + "uri?t=mkdir"
    resp = do_http("POST", url)
    rc = check_http_error(resp, stderr)
    if rc:
        return rc
    new_uri = resp.read().strip()

    # probably check for others..

    add_line_to_aliasfile(aliasfile, alias, new_uri)

    print >>stdout, "Alias %s created" % (quote_output(alias),)
    return 0
Beispiel #47
0
    def remote_close(self):
        precondition(not self.closed)
        start = time.time()

        fileutil.make_dirs(os.path.dirname(self.finalhome))
        fileutil.rename(self.incominghome, self.finalhome)
        try:
            # self.incominghome is like storage/shares/incoming/ab/abcde/4 .
            # We try to delete the parent (.../ab/abcde) to avoid leaving
            # these directories lying around forever, but the delete might
            # fail if we're working on another share for the same storage
            # index (like ab/abcde/5). The alternative approach would be to
            # use a hierarchy of objects (PrefixHolder, BucketHolder,
            # ShareWriter), each of which is responsible for a single
            # directory on disk, and have them use reference counting of
            # their children to know when they should do the rmdir. This
            # approach is simpler, but relies on os.rmdir refusing to delete
            # a non-empty directory. Do *not* use fileutil.rm_dir() here!
            os.rmdir(os.path.dirname(self.incominghome))
            # we also delete the grandparent (prefix) directory, .../ab ,
            # again to avoid leaving directories lying around. This might
            # fail if there is another bucket open that shares a prefix (like
            # ab/abfff).
            os.rmdir(os.path.dirname(os.path.dirname(self.incominghome)))
            # we leave the great-grandparent (incoming/) directory in place.
        except EnvironmentError:
            # ignore the "can't rmdir because the directory is not empty"
            # exceptions, those are normal consequences of the
            # above-mentioned conditions.
            pass
        self._sharefile = None
        self.closed = True
        self._canary.dontNotifyOnDisconnect(self._disconnect_marker)

        filelen = os.stat(self.finalhome)[stat.ST_SIZE]
        self.ss.bucket_writer_closed(self, filelen)
        self.ss.add_latency("close", time.time() - start)
        self.ss.count("close")
Beispiel #48
0
    def watch(self,
              path,
              mask=IN_WATCH_MASK,
              autoAdd=False,
              callbacks=None,
              recursive=False):
        precondition(isinstance(autoAdd, bool), autoAdd=autoAdd)
        precondition(isinstance(recursive, bool), recursive=recursive)
        assert autoAdd == False

        path_u = path.path
        if not isinstance(path_u, unicode):
            path_u = path_u.decode('utf-8')
            _assert(isinstance(path_u, unicode), path_u=path_u)

        if path_u not in self._callbacks.keys():
            self._callbacks[path_u] = callbacks or []
            self._watches[path_u] = self._observer.schedule(
                INotifyEventHandler(path_u, mask, self._callbacks[path_u],
                                    self._pending_delay),
                path=path_u,
                recursive=False,
            )
Beispiel #49
0
    def add_lease(self, available_space, lease_info):
        """
        Add a new lease to this share.

        :param int available_space: The maximum number of bytes of storage to
            commit in this operation.  If more than this number of bytes is
            required, raise ``NoSpace`` instead.

        :raise NoSpace: If more than ``available_space`` bytes is required to
            complete the operation.  In this case, no lease is added.

        :return: ``None``
        """
        precondition(lease_info.owner_num != 0) # 0 means "no lease here"
        with open(self.home, 'rb+') as f:
            num_lease_slots = self._get_num_lease_slots(f)
            empty_slot = self._get_first_empty_lease_slot(f)
            if empty_slot is not None:
                self._write_lease_record(f, empty_slot, lease_info)
            else:
                if lease_info.mutable_size() > available_space:
                    raise NoSpace()
                self._write_lease_record(f, num_lease_slots, lease_info)
Beispiel #50
0
def create_introducer(config):
    out = config.stdout
    err = config.stderr
    basedir = config['basedir']
    # This should always be called with an absolute Unicode basedir.
    precondition(isinstance(basedir, unicode), basedir)

    if os.path.exists(basedir):
        if listdir_unicode(basedir):
            print >>err, "The base directory %s is not empty." % quote_local_unicode_path(basedir)
            print >>err, "To avoid clobbering anything, I am going to quit now."
            print >>err, "Please use a different directory, or empty this one."
            defer.returnValue(-1)
        # we're willing to use an empty directory
    else:
        os.mkdir(basedir)
    write_tac(basedir, "introducer")

    fileutil.make_dirs(os.path.join(basedir, "private"), 0700)
    with open(os.path.join(basedir, "tahoe.cfg"), "w") as c:
        yield write_node_config(c, config)

    print >>out, "Introducer created in %s" % quote_local_unicode_path(basedir)
    defer.returnValue(0)
Beispiel #51
0
    def write(self, offset, data):  # type: (int, bytes) -> bool
        """
        Write data at given offset, return whether the upload is complete.
        """
        # Delay the timeout, since we received data:
        self._timeout.reset(30 * 60)
        start = self._clock.seconds()
        precondition(not self.closed)
        if self.throw_out_all_data:
            return False

        # Make sure we're not conflicting with existing data:
        end = offset + len(data)
        for (chunk_start, chunk_stop,
             _) in self._already_written.ranges(offset, end):
            chunk_len = chunk_stop - chunk_start
            actual_chunk = self._sharefile.read_share_data(
                chunk_start, chunk_len)
            writing_chunk = data[chunk_start - offset:chunk_stop - offset]
            if actual_chunk != writing_chunk:
                raise ConflictingWriteError(
                    "Chunk {}-{} doesn't match already written data.".format(
                        chunk_start, chunk_stop))
        self._sharefile.write_share_data(offset, data)

        self._already_written.set(True, offset, end)
        self.ss.add_latency("write", self._clock.seconds() - start)
        self.ss.count("write")

        # Return whether the whole thing has been written. See
        # https://github.com/mlenzen/collections-extended/issues/169 and
        # https://github.com/mlenzen/collections-extended/issues/172 for why
        # it's done this way.
        return sum([
            mr.stop - mr.start for mr in self._already_written.ranges()
        ]) == self._max_size
Beispiel #52
0
 def __init__(self, filename, max_size=None, create=False):
     """ If max_size is not None then I won't allow more than max_size to be written to me. If create=True and max_size must not be None. """
     precondition((max_size is not None) or (not create), max_size, create)
     self.home = filename
     self._max_size = max_size
     if create:
         # touch the file, so later callers will see that we're working on
         # it. Also construct the metadata.
         assert not os.path.exists(self.home)
         fileutil.make_dirs(os.path.dirname(self.home))
         f = open(self.home, 'wb')
         # The second field -- the four-byte share data length -- is no
         # longer used as of Tahoe v1.3.0, but we continue to write it in
         # there in case someone downgrades a storage server from >=
         # Tahoe-1.3.0 to < Tahoe-1.3.0, or moves a share file from one
         # server to another, etc. We do saturation -- a share data length
         # larger than 2**32-1 (what can fit into the field) is marked as
         # the largest length that can fit into the field. That way, even
         # if this does happen, the old < v1.3.0 server will still allow
         # clients to read the first part of the share.
         f.write(struct.pack(">LLL", 1, min(2**32 - 1, max_size), 0))
         f.close()
         self._lease_offset = max_size + 0x0c
         self._num_leases = 0
     else:
         f = open(self.home, 'rb')
         filesize = os.path.getsize(self.home)
         (version, unused, num_leases) = struct.unpack(">LLL", f.read(0xc))
         f.close()
         if version != 1:
             msg = "sharefile %s had version %d but we wanted 1" % \
                   (filename, version)
             raise UnknownImmutableContainerVersionError(msg)
         self._num_leases = num_leases
         self._lease_offset = filesize - (num_leases * self.LEASE_SIZE)
     self._data_offset = 0xc
Beispiel #53
0
 def copy_file_into_dir(self, source, name, target):
     precondition(isinstance(source, FileSources), source)
     precondition(isinstance(target, DirectoryTargets), target)
     precondition(isinstance(name, str), name)
     if self.need_to_copy_bytes(source, target):
         # if the target is a local directory, this will just write the
         # bytes to disk. If it is a tahoe directory, it will upload the
         # data, and stash the new filecap for a later set_children call.
         f = source.open(self.caps_only)
         target.put_file(name, f)
         return
     # otherwise we're copying tahoe to tahoe, and using immutable files,
     # so we can just make a link
     target.put_uri(name, source.bestcap())
Beispiel #54
0
 def _read_share_data(self, f, offset, length):
     precondition(offset >= 0)
     precondition(length >= 0)
     data_length = self._read_data_length(f)
     if offset+length > data_length:
         # reads beyond the end of the data are truncated. Reads that
         # start beyond the end of the data return an empty string.
         length = max(0, data_length-offset)
     if length == 0:
         return b""
     precondition(offset+length <= data_length)
     f.seek(self.DATA_OFFSET+offset)
     data = f.read(length)
     return data
Beispiel #55
0
    def __init__(self, verifycap, servers, verify, add_lease, secret_holder,
                 monitor):
        assert precondition(isinstance(verifycap, CHKFileVerifierURI), verifycap, type(verifycap))

        prefix = str(base32.b2a(verifycap.get_storage_index()[:8])[:12], "utf-8")
        log.PrefixingLogMixin.__init__(self, facility="tahoe.immutable.checker", prefix=prefix)

        self._verifycap = verifycap

        self._monitor = monitor
        self._servers = servers
        self._verify = verify # bool: verify what the servers claim, or not?
        self._add_lease = add_lease

        frs = file_renewal_secret_hash(secret_holder.get_renewal_secret(),
                                       self._verifycap.get_storage_index())
        self.file_renewal_secret = frs
        fcs = file_cancel_secret_hash(secret_holder.get_cancel_secret(),
                                      self._verifycap.get_storage_index())
        self.file_cancel_secret = fcs
Beispiel #56
0
    def _got_announcement(self, key_s, ann):
        """
        This callback is given to the introducer and called any time an
        announcement is received which has a valid signature and does not have
        a sequence number less than or equal to a previous sequence number
        seen for that server by that introducer.

        Note sequence numbers are not considered between different introducers
        so if we use more than one introducer it is possible for them to
        deliver us stale announcements in some cases.
        """
        precondition(isinstance(key_s, bytes), key_s)
        precondition(key_s.startswith(b"v0-"), key_s)
        precondition(ann["service-name"] == "storage", ann["service-name"])
        server_id = key_s

        if self._should_ignore_announcement(server_id, ann):
            return

        s = self._make_storage_server(
            server_id,
            {u"ann": ann},
        )

        try:
            old = self.servers.pop(server_id)
        except KeyError:
            pass
        else:
            # It's a replacement, get rid of the old one.
            old.stop_connecting()
            old.disownServiceParent()
            # NOTE: this disownServiceParent() returns a Deferred that
            # doesn't fire until Tub.stopService fires, which will wait for
            # any existing connections to be shut down. This doesn't
            # generally matter for normal runtime, but unit tests can run
            # into DirtyReactorErrors if they don't block on these. If a test
            # replaces one server with a newer version, then terminates
            # before the old one has been shut down, it might get
            # DirtyReactorErrors. The fix would be to gather these Deferreds
            # into a structure that will block StorageFarmBroker.stopService
            # until they have fired (but hopefully don't keep reference
            # cycles around when they fire earlier than that, which will
            # almost always be the case for normal runtime).

        # now we forget about them and start using the new one
        s.setServiceParent(self)
        self.servers[server_id] = s
        s.start_connecting(self._trigger_connections)
Beispiel #57
0
    def watch(self, path, mask=IN_WATCH_MASK, autoAdd=False, callbacks=None, recursive=False):
        precondition(self._state == NOT_STARTED, "watch() can only be called before startReading()", state=self._state)
        precondition(self._filter is None, "only one watch is supported")
        precondition(isinstance(autoAdd, bool), autoAdd=autoAdd)
        precondition(isinstance(recursive, bool), recursive=recursive)
        #precondition(autoAdd == recursive, "need autoAdd and recursive to be the same", autoAdd=autoAdd, recursive=recursive)

        self._path = path
        path_u = path.path
        if not isinstance(path_u, unicode):
            path_u = path_u.decode(sys.getfilesystemencoding())
            _assert(isinstance(path_u, unicode), path_u=path_u)

        self._filter = FILE_NOTIFY_CHANGE_FILE_NAME | FILE_NOTIFY_CHANGE_DIR_NAME | FILE_NOTIFY_CHANGE_LAST_WRITE

        if mask & (IN_ACCESS | IN_CLOSE_NOWRITE | IN_OPEN):
            self._filter = self._filter | FILE_NOTIFY_CHANGE_LAST_ACCESS
        if mask & IN_ATTRIB:
            self._filter = self._filter | FILE_NOTIFY_CHANGE_ATTRIBUTES | FILE_NOTIFY_CHANGE_SECURITY

        self._recursive = TRUE if recursive else FALSE
        self._callbacks = callbacks or []
        self._hDirectory = _open_directory(path_u)
Beispiel #58
0
 def _got_announcement(self, key_s, ann):
     precondition(isinstance(key_s, str), key_s)
     precondition(key_s.startswith("v0-"), key_s)
     precondition(ann["service-name"] == "storage", ann["service-name"])
     server_id = key_s
     if server_id in self._static_server_ids:
         log.msg(format="ignoring announcement for static server '%(id)s'",
                 id=server_id,
                 facility="tahoe.storage_broker",
                 umid="AlxzqA",
                 level=log.UNUSUAL)
         return
     s = self._make_storage_server(
         server_id.decode("utf-8"),
         {u"ann": ann},
     )
     server_id = s.get_serverid()
     old = self.servers.get(server_id)
     if old:
         if old.get_announcement() == ann:
             return  # duplicate
         # replacement
         del self.servers[server_id]
         old.stop_connecting()
         old.disownServiceParent()
         # NOTE: this disownServiceParent() returns a Deferred that
         # doesn't fire until Tub.stopService fires, which will wait for
         # any existing connections to be shut down. This doesn't
         # generally matter for normal runtime, but unit tests can run
         # into DirtyReactorErrors if they don't block on these. If a test
         # replaces one server with a newer version, then terminates
         # before the old one has been shut down, it might get
         # DirtyReactorErrors. The fix would be to gather these Deferreds
         # into a structure that will block StorageFarmBroker.stopService
         # until they have fired (but hopefully don't keep reference
         # cycles around when they fire earlier than that, which will
         # almost always be the case for normal runtime).
     # now we forget about them and start using the new one
     s.setServiceParent(self)
     self.servers[server_id] = s
     s.start_connecting(self._trigger_connections)
Beispiel #59
0
def create(options):
    precondition(isinstance(options.alias, unicode), alias=options.alias)
    precondition(isinstance(options.nickname, (unicode, NoneType)),
                 nickname=options.nickname)
    precondition(isinstance(options.local_dir, (unicode, NoneType)),
                 local_dir=options.local_dir)

    try:
        from twisted.internet import reactor
        treq = HTTPClient(Agent(reactor))

        name = options['name']
        nodedir = options.parent.node_directory
        localdir = options.local_dir
        rc = yield _create(options.alias, options.nickname, name, nodedir,
                           localdir, options["poll-interval"], treq)
        print("Alias %s created" % (quote_output(options.alias), ),
              file=options.stdout)
    except Exception as e:
        print("%s" % str(e), file=options.stderr)
        returnValue(1)

    returnValue(rc)
Beispiel #60
0
def create_node(config, out=sys.stdout, err=sys.stderr):
    basedir = config['basedir']
    # This should always be called with an absolute Unicode basedir.
    precondition(isinstance(basedir, unicode), basedir)

    if os.path.exists(basedir):
        if listdir_unicode(basedir):
            print >> err, "The base directory %s is not empty." % quote_output(
                basedir)
            print >> err, "To avoid clobbering anything, I am going to quit now."
            print >> err, "Please use a different directory, or empty this one."
            return -1
        # we're willing to use an empty directory
    else:
        os.mkdir(basedir)
    f = open(os.path.join(basedir, "tahoe-client.tac"), "w")
    f.write(client_tac)
    f.close()

    c = open(os.path.join(basedir, "tahoe.cfg"), "w")

    write_node_config(c, config)

    c.write("[client]\n")
    c.write("# Which services should this client connect to?\n")
    c.write("introducer.furl = %s\n" % config.get("introducer", ""))
    c.write("helper.furl =\n")
    c.write("#key_generator.furl =\n")
    c.write("#stats_gatherer.furl =\n")
    c.write("\n")
    c.write("# What encoding parameters should this client use for uploads?\n")
    c.write("#shares.needed = 3\n")
    c.write("#shares.happy = 7\n")
    c.write("#shares.total = 10\n")
    c.write("\n")

    boolstr = {True: "true", False: "false"}
    c.write("[storage]\n")
    c.write("# Shall this node provide storage service?\n")
    storage_enabled = not config.get("no-storage", None)
    c.write("enabled = %s\n" % boolstr[storage_enabled])
    c.write("#readonly =\n")
    c.write("reserved_space = 1G\n")
    c.write("#expire.enabled =\n")
    c.write("#expire.mode =\n")
    c.write("\n")

    c.write("[helper]\n")
    c.write("# Shall this node run a helper service that clients can use?\n")
    c.write("enabled = false\n")
    c.write("\n")

    c.write("[drop_upload]\n")
    c.write(
        "# Shall this node automatically upload files created or modified in a local directory?\n"
    )
    c.write("enabled = false\n")
    c.write(
        "# To specify the target of uploads, a mutable directory writecap URI must be placed\n"
        "# in 'private/drop_upload_dircap'.\n")
    c.write("local.directory = ~/drop_upload\n")
    c.write("\n")

    c.close()

    from allmydata.util import fileutil
    fileutil.make_dirs(os.path.join(basedir, "private"), 0700)
    print >> out, "Node created in %s" % quote_output(basedir)
    if not config.get("introducer", ""):
        print >> out, " Please set [client]introducer.furl= in tahoe.cfg!"
        print >> out, " The node cannot connect to a grid without it."
    if not config.get("nickname", ""):
        print >> out, " Please set [node]nickname= in tahoe.cfg"
    return 0