Example #1
0
    def build_targetmap(self, sources, target):
        num_source_files = len([s for s in sources
                                if isinstance(s, FileSources)])
        num_source_dirs = len([s for s in sources
                               if isinstance(s, DirectorySources)])
        self.progress("attaching sources to targets, "
                      "%d files / %d dirs in root" %
                      (num_source_files, num_source_dirs))

        # this maps each target directory to a list of source files that need
        # to be copied into it. All source files have names.
        targetmap = defaultdict(list)

        for s in sources:
            if isinstance(s, FileSources):
                targetmap[target].append(s)
            else:
                _assert(isinstance(s, DirectorySources), s)
                name = s.basename()
                if name is not None:
                    # named sources get a new directory. see #2329
                    new_target = target.get_child_target(name)
                else:
                    # unnamed sources have their contents copied directly
                    new_target = target
                self.assign_targets(targetmap, s, new_target)

        self.progress("targets assigned, %s dirs, %s files" %
                      (len(targetmap), self.count_files_to_copy(targetmap)))
        return targetmap
Example #2
0
    def make_client(self, i, write_config=True):
        clientid = hashutil.tagged_hash("clientid", str(i))[:20]
        clientdir = os.path.join(self.basedir, "clients",
                                 idlib.shortnodeid_b2a(clientid))
        fileutil.make_dirs(clientdir)

        tahoe_cfg_path = os.path.join(clientdir, "tahoe.cfg")
        if write_config:
            from twisted.internet import reactor
            _, port_endpoint = self.port_assigner.assign(reactor)
            f = open(tahoe_cfg_path, "w")
            f.write("[node]\n")
            f.write("nickname = client-%d\n" % i)
            f.write("web.port = {}\n".format(port_endpoint))
            f.write("[storage]\n")
            f.write("enabled = false\n")
            f.close()
        else:
            _assert(os.path.exists(tahoe_cfg_path), tahoe_cfg_path=tahoe_cfg_path)

        c = None
        if i in self.client_config_hooks:
            # this hook can either modify tahoe.cfg, or return an
            # entirely new Client instance
            c = self.client_config_hooks[i](clientdir)

        if not c:
            c = yield create_no_network_client(clientdir)
            c.set_default_mutable_keysize(TEST_RSA_KEY_SIZE)

        c.nodeid = clientid
        c.short_nodeid = b32encode(clientid).lower()[:8]
        c._servers = self.all_servers # can be updated later
        c.setServiceParent(self)
        defer.returnValue(c)
Example #3
0
    def build_targetmap(self, sources, target):
        num_source_files = len(
            [s for s in sources if isinstance(s, FileSources)])
        num_source_dirs = len(
            [s for s in sources if isinstance(s, DirectorySources)])
        self.progress("attaching sources to targets, "
                      "%d files / %d dirs in root" %
                      (num_source_files, num_source_dirs))

        # this maps each target directory to a list of source files that need
        # to be copied into it. All source files have names.
        targetmap = defaultdict(list)

        for s in sources:
            if isinstance(s, FileSources):
                targetmap[target].append(s)
            else:
                _assert(isinstance(s, DirectorySources), s)
                name = s.basename()
                if name is not None:
                    # named sources get a new directory. see #2329
                    new_target = target.get_child_target(name)
                else:
                    # unnamed sources have their contents copied directly
                    new_target = target
                self.assign_targets(targetmap, s, new_target)

        self.progress("targets assigned, %s dirs, %s files" %
                      (len(targetmap), self.count_files_to_copy(targetmap)))
        return targetmap
Example #4
0
    def copy_things_to_directory(self, sources, target):
        # step one: if the target is missing, we should mkdir it
        target = self.maybe_create_target(target)
        target.populate(recurse=False)

        # step two: scan any source dirs, recursively, to find children
        for s in sources:
            if isinstance(s, DirectorySources):
                s.populate(recurse=True)
            if isinstance(s, FileSources):
                # each source must have a name, or be a directory
                _assert(s.basename() is not None, s)

        # step three: find a target for each source node, creating
        # directories as necessary. 'targetmap' is a dictionary that uses
        # target Directory instances as keys, and has values of (name:
        # sourceobject) dicts for all the files that need to wind up there.
        targetmap = self.build_targetmap(sources, target)

        # step four: walk through the list of targets. For each one, copy all
        # the files. If the target is a TahoeDirectory, upload and create
        # read-caps, then do a set_children to the target directory.
        self.copy_to_targetmap(targetmap)

        return self.announce_success("files copied")
Example #5
0
 def _assert_invariants(self):
     iter = self.l.__iter__()
     try:
         oldx = iter.next()
         while True:
             x = iter.next()
             # self.l is required to be sorted
             _assert(x >= oldx, x, oldx)
             # every element of self.l is required to appear in self.d
             _assert(self.d.has_key(x[1]), x)
             oldx = x
     except StopIteration:
         pass
     for (
             k,
             v,
     ) in self.d.iteritems():
         i = bisect_left(self.l, (
             v,
             k,
         ))
         while (self.l[i][0] is not v) or (self.l[i][1] is not k):
             i += 1
         _assert(i < len(self.l), i, len(self.l), k, v, self.l)
         _assert(self.l[i][0] is v, i, v, l=self.l, d=self.d)
         _assert(self.l[i][1] is k, i, k, l=self.l, d=self.d)
     return True
Example #6
0
    def watch(self,
              path,
              mask=IN_WATCH_MASK,
              autoAdd=False,
              callbacks=None,
              recursive=False):
        precondition(self._state == NOT_STARTED,
                     "watch() can only be called before startReading()",
                     state=self._state)
        precondition(self._filter is None, "only one watch is supported")
        precondition(isinstance(autoAdd, bool), autoAdd=autoAdd)
        precondition(isinstance(recursive, bool), recursive=recursive)
        #precondition(autoAdd == recursive, "need autoAdd and recursive to be the same", autoAdd=autoAdd, recursive=recursive)

        self._path = path
        path_u = path.path
        if not isinstance(path_u, unicode):
            path_u = path_u.decode(sys.getfilesystemencoding())
            _assert(isinstance(path_u, unicode), path_u=path_u)

        self._filter = FILE_NOTIFY_CHANGE_FILE_NAME | FILE_NOTIFY_CHANGE_DIR_NAME | FILE_NOTIFY_CHANGE_LAST_WRITE

        if mask & (IN_ACCESS | IN_CLOSE_NOWRITE | IN_OPEN):
            self._filter = self._filter | FILE_NOTIFY_CHANGE_LAST_ACCESS
        if mask & IN_ATTRIB:
            self._filter = self._filter | FILE_NOTIFY_CHANGE_ATTRIBUTES | FILE_NOTIFY_CHANGE_SECURITY

        self._recursive = TRUE if recursive else FALSE
        self._callbacks = callbacks or []
        self._hDirectory = _open_directory(path_u)
Example #7
0
    def make_client(self, i, write_config=True):
        clientid = hashutil.tagged_hash(b"clientid", b"%d" % i)[:20]
        clientdir = os.path.join(self.basedir, "clients",
                                 idlib.shortnodeid_b2a(clientid))
        fileutil.make_dirs(clientdir)

        tahoe_cfg_path = os.path.join(clientdir, "tahoe.cfg")
        if write_config:
            from twisted.internet import reactor
            _, port_endpoint = self.port_assigner.assign(reactor)
            with open(tahoe_cfg_path, "w") as f:
                f.write("[node]\n")
                f.write("nickname = client-%d\n" % i)
                f.write("web.port = {}\n".format(port_endpoint))
                f.write("[storage]\n")
                f.write("enabled = false\n")
        else:
            _assert(os.path.exists(tahoe_cfg_path),
                    tahoe_cfg_path=tahoe_cfg_path)

        c = None
        if i in self.client_config_hooks:
            # this hook can either modify tahoe.cfg, or return an
            # entirely new Client instance
            c = self.client_config_hooks[i](clientdir)

        if not c:
            c = yield create_no_network_client(clientdir)
            c.set_default_mutable_keysize(TEST_RSA_KEY_SIZE)

        c.nodeid = clientid
        c.short_nodeid = b32encode(clientid).lower()[:8]
        c._servers = self.all_servers  # can be updated later
        c.setServiceParent(self)
        defer.returnValue(c)
Example #8
0
    def make_client(self, i, write_config=True):
        clientid = hashutil.tagged_hash("clientid", str(i))[:20]
        clientdir = os.path.join(self.basedir, "clients",
                                 idlib.shortnodeid_b2a(clientid))
        fileutil.make_dirs(clientdir)

        tahoe_cfg_path = os.path.join(clientdir, "tahoe.cfg")
        if write_config:
            f = open(tahoe_cfg_path, "w")
            f.write("[node]\n")
            f.write("nickname = client-%d\n" % i)
            f.write("web.port = tcp:0:interface=127.0.0.1\n")
            f.write("[storage]\n")
            f.write("enabled = false\n")
            f.close()
        else:
            _assert(os.path.exists(tahoe_cfg_path), tahoe_cfg_path=tahoe_cfg_path)

        c = None
        if i in self.client_config_hooks:
            # this hook can either modify tahoe.cfg, or return an
            # entirely new Client instance
            c = self.client_config_hooks[i](clientdir)

        if not c:
            c = NoNetworkClient(clientdir)
            c.set_default_mutable_keysize(TEST_RSA_KEY_SIZE)

        c.nodeid = clientid
        c.short_nodeid = b32encode(clientid).lower()[:8]
        c._servers = self.all_servers # can be updated later
        c.setServiceParent(self)
        return c
Example #9
0
    def _validate_block(self, results, segnum, reader, server, started):
        """
        I validate a block from one share on a remote server.
        """
        # Grab the part of the block hash tree that is necessary to
        # validate this block, then generate the block hash root.
        self.log("validating share %d for segment %d" % (reader.shnum,
                                                             segnum))
        elapsed = time.time() - started
        self._status.add_fetch_timing(server, elapsed)
        self._set_current_status("validating blocks")

        block_and_salt, blockhashes, sharehashes = results
        block, salt = block_and_salt
        _assert(type(block) is str, (block, salt))

        blockhashes = dict(enumerate(blockhashes))
        self.log("the reader gave me the following blockhashes: %s" % \
                 blockhashes.keys())
        self.log("the reader gave me the following sharehashes: %s" % \
                 sharehashes.keys())
        bht = self._block_hash_trees[reader.shnum]

        if bht.needed_hashes(segnum, include_leaf=True):
            try:
                bht.set_hashes(blockhashes)
            except (hashtree.BadHashError, hashtree.NotEnoughHashesError, \
                    IndexError), e:
                raise CorruptShareError(server,
                                        reader.shnum,
                                        "block hash tree failure: %s" % e)
Example #10
0
    def make_client(self, i, write_config=True):
        clientid = hashutil.tagged_hash("clientid", str(i))[:20]
        clientdir = os.path.join(self.basedir, "clients",
                                 idlib.shortnodeid_b2a(clientid))
        fileutil.make_dirs(clientdir)

        tahoe_cfg_path = os.path.join(clientdir, "tahoe.cfg")
        if write_config:
            f = open(tahoe_cfg_path, "w")
            f.write("[node]\n")
            f.write("nickname = client-%d\n" % i)
            f.write("web.port = tcp:0:interface=127.0.0.1\n")
            f.write("[storage]\n")
            f.write("enabled = false\n")
            f.close()
        else:
            _assert(os.path.exists(tahoe_cfg_path),
                    tahoe_cfg_path=tahoe_cfg_path)

        c = None
        if i in self.client_config_hooks:
            # this hook can either modify tahoe.cfg, or return an
            # entirely new Client instance
            c = self.client_config_hooks[i](clientdir)

        if not c:
            c = NoNetworkClient(clientdir)
            c.set_default_mutable_keysize(TEST_RSA_KEY_SIZE)

        c.nodeid = clientid
        c.short_nodeid = b32encode(clientid).lower()[:8]
        c._servers = self.all_servers  # can be updated later
        c.setServiceParent(self)
        return c
Example #11
0
    def copy_things_to_directory(self, sources, target):
        # step one: if the target is missing, we should mkdir it
        target = self.maybe_create_target(target)
        target.populate(recurse=False)

        # step two: scan any source dirs, recursively, to find children
        for s in sources:
            if isinstance(s, DirectorySources):
                s.populate(recurse=True)
            if isinstance(s, FileSources):
                # each source must have a name, or be a directory
                _assert(s.basename() is not None, s)

        # step three: find a target for each source node, creating
        # directories as necessary. 'targetmap' is a dictionary that uses
        # target Directory instances as keys, and has values of (name:
        # sourceobject) dicts for all the files that need to wind up there.
        targetmap = self.build_targetmap(sources, target)

        # step four: walk through the list of targets. For each one, copy all
        # the files. If the target is a TahoeDirectory, upload and create
        # read-caps, then do a set_children to the target directory.
        self.copy_to_targetmap(targetmap)

        return self.announce_success("files copied")
Example #12
0
    def _decode_blocks(self, results, segnum):
        """
        I take a list of k blocks and salts, and decode that into a
        single encrypted segment.
        """
        # 'results' is one or more dicts (each {shnum:(block,salt)}), and we
        # want to merge them all
        blocks_and_salts = {}
        for d in results:
            blocks_and_salts.update(d)

        # All of these blocks should have the same salt; in SDMF, it is
        # the file-wide IV, while in MDMF it is the per-segment salt. In
        # either case, we just need to get one of them and use it.
        #
        # d.items()[0] is like (shnum, (block, salt))
        # d.items()[0][1] is like (block, salt)
        # d.items()[0][1][1] is the salt.
        salt = blocks_and_salts.items()[0][1][1]
        # Next, extract just the blocks from the dict. We'll use the
        # salt in the next step.
        share_and_shareids = [(k, v[0]) for k, v in blocks_and_salts.items()]
        d2 = dict(share_and_shareids)
        shareids = []
        shares = []
        for shareid, share in d2.items():
            shareids.append(shareid)
            shares.append(share)

        self._set_current_status("decoding")
        started = time.time()
        _assert(len(shareids) >= self._required_shares, len(shareids))
        # zfec really doesn't want extra shares
        shareids = shareids[:self._required_shares]
        shares = shares[:self._required_shares]
        self.log("decoding segment %d" % segnum)
        if segnum == self._num_segments - 1:
            d = defer.maybeDeferred(self._tail_decoder.decode, shares, shareids)
        else:
            d = defer.maybeDeferred(self._segment_decoder.decode, shares, shareids)
        def _process(buffers):
            segment = "".join(buffers)
            self.log(format="now decoding segment %(segnum)s of %(numsegs)s",
                     segnum=segnum,
                     numsegs=self._num_segments,
                     level=log.NOISY)
            self.log(" joined length %d, datalength %d" %
                     (len(segment), self._data_length))
            if segnum == self._num_segments - 1:
                size_to_use = self._tail_data_size
            else:
                size_to_use = self._segment_size
            segment = segment[:size_to_use]
            self.log(" segment len=%d" % len(segment))
            self._status.accumulate_decode_time(time.time() - started)
            return segment, salt
        d.addCallback(_process)
        return d
Example #13
0
    def __init__(self, filenode, storage_broker, servermap, verinfo,
                 fetch_privkey=False, verify=False):
        self._node = filenode
        _assert(self._node.get_pubkey())
        self._storage_broker = storage_broker
        self._storage_index = filenode.get_storage_index()
        _assert(self._node.get_readkey())
        self._last_failure = None
        prefix = si_b2a(self._storage_index)[:5]
        self._log_number = log.msg("Retrieve(%s): starting" % prefix)
        self._running = True
        self._decoding = False
        self._bad_shares = set()

        self.servermap = servermap
        self.verinfo = verinfo
        # TODO: make it possible to use self.verinfo.datalength instead
        (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
         offsets_tuple) = self.verinfo
        self._data_length = datalength
        # during repair, we may be called upon to grab the private key, since
        # it wasn't picked up during a verify=False checker run, and we'll
        # need it for repair to generate a new version.
        self._need_privkey = verify or (fetch_privkey
                                        and not self._node.get_privkey())

        if self._need_privkey:
            # TODO: Evaluate the need for this. We'll use it if we want
            # to limit how many queries are on the wire for the privkey
            # at once.
            self._privkey_query_markers = [] # one Marker for each time we've
                                             # tried to get the privkey.

        # verify means that we are using the downloader logic to verify all
        # of our shares. This tells the downloader a few things.
        #
        # 1. We need to download all of the shares.
        # 2. We don't need to decode or decrypt the shares, since our
        #    caller doesn't care about the plaintext, only the
        #    information about which shares are or are not valid.
        # 3. When we are validating readers, we need to validate the
        #    signature on the prefix. Do we? We already do this in the
        #    servermap update?
        self._verify = verify

        self._status = RetrieveStatus()
        self._status.set_storage_index(self._storage_index)
        self._status.set_helper(False)
        self._status.set_progress(0.0)
        self._status.set_active(True)
        self._status.set_size(datalength)
        self._status.set_encoding(k, N)
        self.readers = {}
        self._stopped = False
        self._pause_deferred = None
        self._offset = None
        self._read_length = None
        self.log("got seqnum %d" % self.verinfo[0])
Example #14
0
 def __init__(self, size=1024):
     self.size = size
     self.buffer = create_string_buffer(size)
     address = addressof(self.buffer)
     _assert(
         address & 3 == 0,
         "address 0x%X returned by create_string_buffer is not DWORD-aligned"
         % (address, ))
     self.data = None
Example #15
0
 def assign_targets(self, targetmap, source, target):
     # copy everything in the source into the target
     precondition(isinstance(source, DirectorySources), source)
     for name, child in source.children.items():
         if isinstance(child, DirectorySources):
             # we will need a target directory for this one
             subtarget = target.get_child_target(name)
             self.assign_targets(targetmap, child, subtarget)
         else:
             _assert(isinstance(child, FileSources), child)
             targetmap[target].append(child)
Example #16
0
    def _send_segment(self, shares_and_shareids, segnum):
        # To generate the URI, we must generate the roothash, so we must
        # generate all shares, even if we aren't actually giving them to
        # anybody. This means that the set of shares we create will be equal
        # to or larger than the set of landlords. If we have any landlord who
        # *doesn't* have a share, that's an error.
        (shares, shareids) = shares_and_shareids
        _assert(set(self.landlords.keys()).issubset(set(shareids)),
                shareids=shareids,
                landlords=self.landlords)
        start = time.time()
        dl = []
        self.set_status("Sending segment %d of %d" %
                        (segnum + 1, self.num_segments))
        self.set_encode_and_push_progress(segnum)
        lognum = self.log("send_segment(%d)" % segnum, level=log.NOISY)
        for i in range(len(shares)):
            block = shares[i]
            shareid = shareids[i]
            d = self.send_block(shareid, segnum, block, lognum)
            dl.append(d)

            block_hash = hashutil.block_hash(block)
            #from allmydata.util import base32
            #log.msg("creating block (shareid=%d, blocknum=%d) "
            #        "len=%d %r .. %r: %s" %
            #        (shareid, segnum, len(block),
            #         block[:50], block[-50:], base32.b2a(block_hash)))
            self.block_hashes[shareid].append(block_hash)

        dl = self._gather_responses(dl)

        def do_progress(ign):
            done = self.segment_size * (segnum + 1)
            if self._progress:
                self._progress.set_progress(done)
            return ign

        dl.addCallback(do_progress)

        def _logit(res):
            self.log("%s uploaded %s / %s bytes (%d%%) of your file." % (
                self,
                self.segment_size * (segnum + 1),
                self.segment_size * self.num_segments,
                100 * (segnum + 1) / self.num_segments,
            ),
                     level=log.OPERATIONAL)
            elapsed = time.time() - start
            self._times["cumulative_sending"] += elapsed
            return res

        dl.addCallback(_logit)
        return dl
Example #17
0
 def assign_targets(self, targetmap, source, target):
     # copy everything in the source into the target
     precondition(isinstance(source, DirectorySources), source)
     for name, child in list(source.children.items()):
         if isinstance(child, DirectorySources):
             # we will need a target directory for this one
             subtarget = target.get_child_target(name)
             self.assign_targets(targetmap, child, subtarget)
         else:
             _assert(isinstance(child, FileSources), child)
             targetmap[target].append(child)
Example #18
0
 def init_tempdir(self):
     tempdir_config = self.get_config("node", "tempdir", "tmp").decode('utf-8')
     tempdir = abspath_expanduser_unicode(tempdir_config, base=self.basedir)
     if not os.path.exists(tempdir):
         fileutil.make_dirs(tempdir)
     tempfile.tempdir = tempdir
     # this should cause twisted.web.http (which uses
     # tempfile.TemporaryFile) to put large request bodies in the given
     # directory. Without this, the default temp dir is usually /tmp/,
     # which is frequently too small.
     test_name = tempfile.mktemp()
     _assert(os.path.dirname(test_name) == tempdir, test_name, tempdir)
Example #19
0
 def get_all_servers(self):
     # return a frozenset of (peerid, versioned-rref) tuples
     servers = {}
     for serverid,rref in self.test_servers.items():
         servers[serverid] = rref
     for serverid,dsc in self.descriptors.items():
         rref = dsc.get_rref()
         if rref:
             servers[serverid] = rref
     result = frozenset(servers.items())
     _assert(len(result) <= len(self.get_all_serverids()), result, self.get_all_serverids())
     return result
Example #20
0
 def init_tempdir(self):
     tempdir_config = self.get_config("node", "tempdir", "tmp").decode('utf-8')
     tempdir = abspath_expanduser_unicode(tempdir_config, base=self.basedir)
     if not os.path.exists(tempdir):
         fileutil.make_dirs(tempdir)
     tempfile.tempdir = tempdir
     # this should cause twisted.web.http (which uses
     # tempfile.TemporaryFile) to put large request bodies in the given
     # directory. Without this, the default temp dir is usually /tmp/,
     # which is frequently too small.
     test_name = tempfile.mktemp()
     _assert(os.path.dirname(test_name) == tempdir, test_name, tempdir)
Example #21
0
    def run_one_case(self, case):
        cmd = (case.replace("$PARENTCAP", self.PARENTCAP).replace(
            "$DIRCAP5",
            self.DIRCAP5).replace("$DIRCAP6", self.DIRCAP6).replace(
                "$DIRCAP",
                self.DIRCAP).replace("$DIRALIAS",
                                     "ALIAS:").replace("$FILECAP",
                                                       self.FILECAP).split())
        target = cmd[-1]
        _assert(target == "to" or target.startswith("to/"), target)
        cmd[-1] = os.path.abspath(os.path.join(self.basedir, cmd[-1]))

        # reset
        targetdir = os.path.abspath(os.path.join(self.basedir, "to"))
        fileutil.rm_dir(targetdir)
        os.mkdir(targetdir)

        if target.rstrip("/") == "to/existing-file":
            fileutil.write(cmd[-1], "existing file contents\n")

        # The abspath() for cmd[-1] strips a trailing slash, and we want to
        # test what happens when it is present. So put it back.
        if target.endswith("/"):
            cmd[-1] += "/"

        d = self.do_cli(*cmd)

        def _check(res):
            (rc, out, err) = res
            err = err.strip()
            if rc == 0:
                return self.check_output()
            if rc == 1:
                self.failUnlessEqual(out, "", str(res))
                if "when copying into a directory, all source files must have names, but" in err:
                    return set(["E2-DESTNAME"])
                if err == "cannot copy directories without --recursive":
                    return set(["E4-NEED-R"])
                if err == "cannot copy directory into a file":
                    return set(["E5-DIRTOFILE"])
                if err == "copying multiple things requires target be a directory":
                    return set(["E6-MANYONE"])
                if err == "target is not a directory, but ends with a slash":
                    return set(["E7-BADSLASH"])
                if (err.startswith("source ") and
                        "is not a directory, but ends with a slash" in err):
                    return set(["E8-BADSLASH"])
                if err == "cannot copy multiple files with the same name into the same target directory":
                    return set(["E9-COLLIDING-TARGETS"])
            self.fail("unrecognized error ('%s') %s" % (case, res))

        d.addCallback(_check)
        return d
    def run_one_case(self, case):
        cmd = (case
               .replace("$PARENTCAP", self.PARENTCAP)
               .replace("$DIRCAP5", self.DIRCAP5)
               .replace("$DIRCAP6", self.DIRCAP6)
               .replace("$DIRCAP", self.DIRCAP)
               .replace("$DIRALIAS", "ALIAS:")
               .replace("$FILECAP", self.FILECAP)
               .split())
        target = cmd[-1]
        _assert(target == "to" or target.startswith("to/"), target)
        cmd[-1] = os.path.abspath(os.path.join(self.basedir, cmd[-1]))

        # reset
        targetdir = os.path.abspath(os.path.join(self.basedir, "to"))
        fileutil.rm_dir(targetdir)
        os.mkdir(targetdir)

        if target.rstrip("/") == "to/existing-file":
            fileutil.write(cmd[-1], "existing file contents\n")

        # The abspath() for cmd[-1] strips a trailing slash, and we want to
        # test what happens when it is present. So put it back.
        if target.endswith("/"):
            cmd[-1] += "/"

        d = self.do_cli(*cmd)
        def _check(res):
            (rc, out, err) = res
            err = err.strip()
            if rc == 0:
                return self.check_output()
            if rc == 1:
                self.failUnlessEqual(out, "", str(res))
                if "when copying into a directory, all source files must have names, but" in err:
                    return set(["E2-DESTNAME"])
                if err == "cannot copy directories without --recursive":
                    return set(["E4-NEED-R"])
                if err == "cannot copy directory into a file":
                    return set(["E5-DIRTOFILE"])
                if err == "copying multiple things requires target be a directory":
                    return set(["E6-MANYONE"])
                if err == "target is not a directory, but ends with a slash":
                    return set(["E7-BADSLASH"])
                if (err.startswith("source ") and
                    "is not a directory, but ends with a slash" in err):
                    return set(["E8-BADSLASH"])
                if err == "cannot copy multiple files with the same name into the same target directory":
                    return set(["E9-COLLIDING-TARGETS"])
            self.fail("unrecognized error ('%s') %s" % (case, res))
        d.addCallback(_check)
        return d
Example #23
0
    def _thread(self):
        try:
            _assert(self._filter is not None, "no watch set")

            # To call Twisted or Tahoe APIs, use reactor.callFromThread as described in
            # <http://twistedmatrix.com/documents/current/core/howto/threading.html>.

            fni = FileNotifyInformation()

            while True:
                self._state = STARTED
                try:
                    fni.read_changes(self._hDirectory, self._recursive, self._filter)
                except WindowsError as e:
                    self._state = STOPPING

                if self._check_stop():
                    return
                for info in fni:
                    # print info
                    path = self._path.preauthChild(info.filename)  # FilePath with Unicode path
                    if info.action == FILE_ACTION_MODIFIED and path.isdir():
                        # print "Filtering out %r" % (info,)
                        continue
                    #mask = _action_to_inotify_mask.get(info.action, IN_CHANGED)

                    def _do_pending_calls():
                        self._pending_call = None
                        for path in self._pending:
                            if self._callbacks:
                                for cb in self._callbacks:
                                    try:
                                        cb(None, path, IN_CHANGED)
                                    except Exception, e:
                                        log.err(e)
                        self._pending = set()

                    def _maybe_notify(path):
                        if path not in self._pending:
                            self._pending.add(path)
                        if self._state not in [STOPPING, STOPPED]:
                            _do_pending_calls()
#                        if self._pending_call is None and self._state not in [STOPPING, STOPPED]:
#                            self._pending_call = reactor.callLater(self._pending_delay, _do_pending_calls)

                    reactor.callFromThread(_maybe_notify, path)
                    if self._check_stop():
                        return
        except Exception, e:
            log.err(e)
            self._state = STOPPED
            raise
Example #24
0
    def _thread(self):
        try:
            _assert(self._filter is not None, "no watch set")

            # To call Twisted or Tahoe APIs, use reactor.callFromThread as described in
            # <http://twistedmatrix.com/documents/current/core/howto/threading.html>.

            fni = FileNotifyInformation()

            while True:
                self._state = STARTED
                try:
                    fni.read_changes(self._hDirectory, self._recursive, self._filter)
                except WindowsError as e:
                    self._state = STOPPING

                if self._check_stop():
                    return
                for info in fni:
                    # print info
                    path = self._path.preauthChild(info.filename)  # FilePath with Unicode path
                    if info.action == FILE_ACTION_MODIFIED and path.isdir():
                        # print "Filtering out %r" % (info,)
                        continue
                    #mask = _action_to_inotify_mask.get(info.action, IN_CHANGED)

                    def _do_pending_calls():
                        self._pending_call = None
                        for path1 in self._pending:
                            if self._callbacks:
                                for cb in self._callbacks:
                                    try:
                                        cb(None, path1, IN_CHANGED)
                                    except Exception, e2:
                                        log.err(e2)
                        self._pending = set()

                    def _maybe_notify(path2):
                        if path2 not in self._pending:
                            self._pending.add(path2)
                        if self._state not in [STOPPING, STOPPED]:
                            _do_pending_calls()
#                        if self._pending_call is None and self._state not in [STOPPING, STOPPED]:
#                            self._pending_call = reactor.callLater(self._pending_delay, _do_pending_calls)

                    reactor.callFromThread(_maybe_notify, path)
                    if self._check_stop():
                        return
        except Exception, e:
            log.err(e)
            self._state = STOPPED
            raise
Example #25
0
    def _send_segment(self, shares_and_shareids, segnum):
        # To generate the URI, we must generate the roothash, so we must
        # generate all shares, even if we aren't actually giving them to
        # anybody. This means that the set of shares we create will be equal
        # to or larger than the set of landlords. If we have any landlord who
        # *doesn't* have a share, that's an error.
        (shares, shareids) = shares_and_shareids
        _assert(set(self.landlords.keys()).issubset(set(shareids)),
                shareids=shareids, landlords=self.landlords)
        start = time.time()
        dl = []
        self.set_status("Sending segment %d of %d" % (segnum+1,
                                                      self.num_segments))
        self.set_encode_and_push_progress(segnum)
        lognum = self.log("send_segment(%d)" % segnum, level=log.NOISY)
        for i in range(len(shares)):
            block = shares[i]
            shareid = shareids[i]
            d = self.send_block(shareid, segnum, block, lognum)
            dl.append(d)

            block_hash = hashutil.block_hash(block)
            #from allmydata.util import base32
            #log.msg("creating block (shareid=%d, blocknum=%d) "
            #        "len=%d %r .. %r: %s" %
            #        (shareid, segnum, len(block),
            #         block[:50], block[-50:], base32.b2a(block_hash)))
            self.block_hashes[shareid].append(block_hash)

        dl = self._gather_responses(dl)

        def do_progress(ign):
            done = self.segment_size * (segnum + 1)
            if self._progress:
                self._progress.set_progress(done)
            return ign
        dl.addCallback(do_progress)

        def _logit(res):
            self.log("%s uploaded %s / %s bytes (%d%%) of your file." %
                     (self,
                      self.segment_size*(segnum+1),
                      self.segment_size*self.num_segments,
                      100 * (segnum+1) / self.num_segments,
                      ),
                     level=log.OPERATIONAL)
            elapsed = time.time() - start
            self._times["cumulative_sending"] += elapsed
            return res
        dl.addCallback(_logit)
        return dl
Example #26
0
def to_filepath(path):
    precondition(isinstance(path, unicode if use_unicode_filepath else basestring),
                 path=path)

    if isinstance(path, unicode) and not use_unicode_filepath:
        path = path.encode(filesystem_encoding)

    if sys.platform == "win32":
        _assert(isinstance(path, unicode), path=path)
        if path.startswith(u"\\\\?\\") and len(path) > 4:
            # FilePath normally strips trailing path separators, but not in this case.
            path = path.rstrip(u"\\")

    return FilePath(path)
Example #27
0
def to_filepath(path):
    precondition(isinstance(path, unicode if use_unicode_filepath else (bytes, unicode)),
                 path=path)

    if isinstance(path, unicode) and not use_unicode_filepath:
        path = path.encode(filesystem_encoding)

    if sys.platform == "win32":
        _assert(isinstance(path, unicode), path=path)
        if path.startswith(u"\\\\?\\") and len(path) > 4:
            # FilePath normally strips trailing path separators, but not in this case.
            path = path.rstrip(u"\\")

    return FilePath(path)
Example #28
0
 def init_tempdir(self):
     """
     Initialize/create a directory for temporary files.
     """
     tempdir_config = self.config.get_config("node", "tempdir", "tmp").decode('utf-8')
     tempdir = self.config.get_config_path(tempdir_config)
     if not os.path.exists(tempdir):
         fileutil.make_dirs(tempdir)
     tempfile.tempdir = tempdir
     # this should cause twisted.web.http (which uses
     # tempfile.TemporaryFile) to put large request bodies in the given
     # directory. Without this, the default temp dir is usually /tmp/,
     # which is frequently too small.
     temp_fd, test_name = tempfile.mkstemp()
     _assert(os.path.dirname(test_name) == tempdir, test_name, tempdir)
     os.close(temp_fd)  # avoid leak of unneeded fd
Example #29
0
 def init_tempdir(self):
     """
     Initialize/create a directory for temporary files.
     """
     tempdir_config = self.config.get_config("node", "tempdir", "tmp").decode('utf-8')
     tempdir = self.config.get_config_path(tempdir_config)
     if not os.path.exists(tempdir):
         fileutil.make_dirs(tempdir)
     tempfile.tempdir = tempdir
     # this should cause twisted.web.http (which uses
     # tempfile.TemporaryFile) to put large request bodies in the given
     # directory. Without this, the default temp dir is usually /tmp/,
     # which is frequently too small.
     temp_fd, test_name = tempfile.mkstemp()
     _assert(os.path.dirname(test_name) == tempdir, test_name, tempdir)
     os.close(temp_fd)  # avoid leak of unneeded fd
Example #30
0
    def watch(self, path, mask=IN_WATCH_MASK, autoAdd=False, callbacks=None, recursive=False):
        precondition(isinstance(autoAdd, bool), autoAdd=autoAdd)
        precondition(isinstance(recursive, bool), recursive=recursive)
        assert autoAdd == False

        path_u = path.path
        if not isinstance(path_u, unicode):
            path_u = path_u.decode('utf-8')
            _assert(isinstance(path_u, unicode), path_u=path_u)

        if path_u not in self._callbacks.keys():
            self._callbacks[path_u] = callbacks or []
            self._watches[path_u] = self._observer.schedule(
                INotifyEventHandler(path_u, mask, self._callbacks[path_u], self._pending_delay),
                path=path_u,
                recursive=False,
            )
Example #31
0
def should_ignore_file(path_u):
    precondition(isinstance(path_u, unicode), path_u=path_u)

    for suffix in IGNORE_SUFFIXES:
        if path_u.endswith(suffix):
            return True

    while path_u != u"":
        oldpath_u = path_u
        path_u, tail_u = os.path.split(path_u)
        if tail_u.startswith(u"."):
            return True
        if path_u == oldpath_u:
            return True  # the path was absolute
        _assert(len(path_u) < len(oldpath_u), path_u=path_u, oldpath_u=oldpath_u)

    return False
Example #32
0
def make_dirs_with_absolute_mode(parent, dirname, mode):
    """
    Make directory `dirname` and chmod it to `mode` afterwards.
    We chmod all parent directories of `dirname` until we reach
    `parent`.
    """
    precondition_abspath(parent)
    precondition_abspath(dirname)
    if not is_ancestor_path(parent, dirname):
        raise AssertionError("dirname must be a descendant of parent")

    make_dirs(dirname)
    while dirname != parent:
        os.chmod(dirname, mode)
        # FIXME: doesn't seem to work on Windows for long paths
        old_dirname, dirname = dirname, os.path.dirname(dirname)
        _assert(len(dirname) < len(old_dirname), dirname=dirname, old_dirname=old_dirname)
Example #33
0
def make_dirs_with_absolute_mode(parent, dirname, mode):
    """
    Make directory `dirname` and chmod it to `mode` afterwards.
    We chmod all parent directories of `dirname` until we reach
    `parent`.
    """
    precondition_abspath(parent)
    precondition_abspath(dirname)
    if not is_ancestor_path(parent, dirname):
        raise AssertionError("dirname must be a descendant of parent")

    make_dirs(dirname)
    while dirname != parent:
        os.chmod(dirname, mode)
        # FIXME: doesn't seem to work on Windows for long paths
        old_dirname, dirname = dirname, os.path.dirname(dirname)
        _assert(len(dirname) < len(old_dirname), dirname=dirname, old_dirname=old_dirname)
Example #34
0
    def copy_things_to_directory(self, sources, target):
        # step one: if the target is missing, we should mkdir it
        target = self.maybe_create_target(target)
        target.populate(recurse=False)

        # step two: scan any source dirs, recursively, to find children
        for s in sources:
            if isinstance(s, DirectorySources):
                s.populate(recurse=True)
            if isinstance(s, FileSources):
                # each source must have a name, or be a directory
                _assert(s.basename() is not None, s)

        # step three: find a target for each source node, creating
        # directories as necessary. 'targetmap' is a dictionary that uses
        # target Directory instances as keys, and has values of (name:
        # sourceobject) dicts for all the files that need to wind up there.
        targetmap = self.build_targetmap(sources, target)

        # target name collisions are an error
        collisions = []
        for target, sources in list(targetmap.items()):
            target_names = {}
            for source in sources:
                name = source.basename()
                if name in target_names:
                    collisions.append((target, source, target_names[name]))
                else:
                    target_names[name] = source
        if collisions:
            self.to_stderr(
                "cannot copy multiple files with the same name into the same target directory"
            )
            # I'm not sure how to show where the collisions are coming from
            #for (target, source1, source2) in collisions:
            #    self.to_stderr(source1.basename())
            return 1

        # step four: walk through the list of targets. For each one, copy all
        # the files. If the target is a TahoeDirectory, upload and create
        # read-caps, then do a set_children to the target directory.
        self.copy_to_targetmap(targetmap)

        return self.announce_success("files copied")
Example #35
0
def should_ignore_file(path_u):
    precondition(isinstance(path_u, unicode), path_u=path_u)

    for suffix in IGNORE_SUFFIXES:
        if path_u.endswith(suffix):
            return True

    while path_u != u"":
        oldpath_u = path_u
        path_u, tail_u = os.path.split(path_u)
        if tail_u.startswith(u"."):
            return True
        if path_u == oldpath_u:
            return True  # the path was absolute
        _assert(len(path_u) < len(oldpath_u),
                path_u=path_u,
                oldpath_u=oldpath_u)

    return False
Example #36
0
    def _scan(self, reldir_u):
        # Scan a directory by (synchronously) adding the paths of all its children to self._pending.
        # Note that this doesn't add them to the deque -- that will

        self._log("SCAN '%r'" % (reldir_u,))
        fp = self._get_filepath(reldir_u)
        try:
            children = listdir_filepath(fp)
        except EnvironmentError:
            raise Exception("WARNING: magic folder: permission denied on directory %s"
                            % quote_filepath(fp))
        except FilenameEncodingError:
            raise Exception("WARNING: magic folder: could not list directory %s due to a filename encoding error"
                            % quote_filepath(fp))

        for child in children:
            self._log("   scan; child %r" % (child,))
            _assert(isinstance(child, unicode), child=child)
            self._add_pending("%s/%s" % (reldir_u, child) if reldir_u != u"" else child)
    def _scan(self, reldir_u):
        # Scan a directory by (synchronously) adding the paths of all its children to self._pending.
        # Note that this doesn't add them to the deque -- that will

        self._log("SCAN '%r'" % (reldir_u,))
        fp = self._get_filepath(reldir_u)
        try:
            children = listdir_filepath(fp)
        except EnvironmentError:
            raise Exception("WARNING: magic folder: permission denied on directory %s"
                            % quote_filepath(fp))
        except FilenameEncodingError:
            raise Exception("WARNING: magic folder: could not list directory %s due to a filename encoding error"
                            % quote_filepath(fp))

        for child in children:
            self._log("   scan; child %r" % (child,))
            _assert(isinstance(child, unicode), child=child)
            self._add_pending("%s/%s" % (reldir_u, child) if reldir_u != u"" else child)
Example #38
0
    def copy_things_to_directory(self, sources, target):
        # step one: if the target is missing, we should mkdir it
        target = self.maybe_create_target(target)
        target.populate(recurse=False)

        # step two: scan any source dirs, recursively, to find children
        for s in sources:
            if isinstance(s, DirectorySources):
                s.populate(recurse=True)
            if isinstance(s, FileSources):
                # each source must have a name, or be a directory
                _assert(s.basename() is not None, s)

        # step three: find a target for each source node, creating
        # directories as necessary. 'targetmap' is a dictionary that uses
        # target Directory instances as keys, and has values of (name:
        # sourceobject) dicts for all the files that need to wind up there.
        targetmap = self.build_targetmap(sources, target)

        # target name collisions are an error
        collisions = []
        for target, sources in targetmap.items():
            target_names = {}
            for source in sources:
                name = source.basename()
                if name in target_names:
                    collisions.append((target, source, target_names[name]))
                else:
                    target_names[name] = source
        if collisions:
            self.to_stderr("cannot copy multiple files with the same name into the same target directory")
            # I'm not sure how to show where the collisions are coming from
            #for (target, source1, source2) in collisions:
            #    self.to_stderr(source1.basename())
            return 1

        # step four: walk through the list of targets. For each one, copy all
        # the files. If the target is a TahoeDirectory, upload and create
        # read-caps, then do a set_children to the target directory.
        self.copy_to_targetmap(targetmap)

        return self.announce_success("files copied")
Example #39
0
    def copy_to_targetmap(self, targetmap):
        files_to_copy = self.count_files_to_copy(targetmap)
        self.progress("starting copy, %d files, %d directories" %
                      (files_to_copy, len(targetmap)))
        files_copied = 0
        targets_finished = 0

        for target, sources in targetmap.items():
            _assert(isinstance(target, DirectoryTargets), target)
            for source in sources:
                _assert(isinstance(source, FileSources), source)
                self.copy_file_into_dir(source, source.basename(), target)
                files_copied += 1
                self.progress("%d/%d files, %d/%d directories" %
                              (files_copied, files_to_copy,
                               targets_finished, len(targetmap)))
            target.set_children()
            targets_finished += 1
            self.progress("%d/%d directories" %
                          (targets_finished, len(targetmap)))
Example #40
0
    def copy_to_targetmap(self, targetmap):
        files_to_copy = self.count_files_to_copy(targetmap)
        self.progress("starting copy, %d files, %d directories" %
                      (files_to_copy, len(targetmap)))
        files_copied = 0
        targets_finished = 0

        for target, sources in list(targetmap.items()):
            _assert(isinstance(target, DirectoryTargets), target)
            for source in sources:
                _assert(isinstance(source, FileSources), source)
                self.copy_file_into_dir(source, source.basename(), target)
                files_copied += 1
                self.progress("%d/%d files, %d/%d directories" %
                              (files_copied, files_to_copy, targets_finished,
                               len(targetmap)))
            target.set_children()
            targets_finished += 1
            self.progress("%d/%d directories" %
                          (targets_finished, len(targetmap)))
Example #41
0
 def download(self, consumer=None, offset=0, size=None):
     precondition(self._verify or IConsumer.providedBy(consumer))
     if size is None:
         size = self._data_length - offset
     if self._verify:
         _assert(size == self._data_length, (size, self._data_length))
     self.log("starting download")
     self._done_deferred = defer.Deferred()
     if consumer:
         self._consumer = consumer
         # we provide IPushProducer, so streaming=True, per IConsumer.
         self._consumer.registerProducer(self, streaming=True)
     self._started = time.time()
     self._started_fetching = time.time()
     if size == 0:
         # short-circuit the rest of the process
         self._done()
     else:
         self._start_download(consumer, offset, size)
     return self._done_deferred
Example #42
0
 def download(self, consumer=None, offset=0, size=None):
     precondition(self._verify or IConsumer.providedBy(consumer))
     if size is None:
         size = self._data_length - offset
     if self._verify:
         _assert(size == self._data_length, (size, self._data_length))
     self.log("starting download")
     self._done_deferred = defer.Deferred()
     if consumer:
         self._consumer = consumer
         # we provide IPushProducer, so streaming=True, per IConsumer.
         self._consumer.registerProducer(self, streaming=True)
     self._started = time.time()
     self._started_fetching = time.time()
     if size == 0:
         # short-circuit the rest of the process
         self._done()
     else:
         self._start_download(consumer, offset, size)
     return self._done_deferred
Example #43
0
 def _assert_invariants(self):
     iter = self.l.__iter__()
     try:
         oldx = iter.next()
         while True:
             x = iter.next()
             # self.l is required to be sorted
             _assert(x >= oldx, x, oldx)
             # every element of self.l is required to appear in self.d
             _assert(self.d.has_key(x[1]), x)
             oldx =x
     except StopIteration:
         pass
     for (k, v,) in self.d.iteritems():
         i = bisect_left(self.l, (v, k,))
         while (self.l[i][0] is not v) or (self.l[i][1] is not k):
             i += 1
         _assert(i < len(self.l), i, len(self.l), k, v, self.l)
         _assert(self.l[i][0] is v, i, v, l=self.l, d=self.d)
         _assert(self.l[i][1] is k, i, k, l=self.l, d=self.d)
     return True
Example #44
0
    def watch(self, path, mask=IN_WATCH_MASK, autoAdd=False, callbacks=None, recursive=False):
        precondition(self._state == NOT_STARTED, "watch() can only be called before startReading()", state=self._state)
        precondition(self._filter is None, "only one watch is supported")
        precondition(isinstance(autoAdd, bool), autoAdd=autoAdd)
        precondition(isinstance(recursive, bool), recursive=recursive)
        #precondition(autoAdd == recursive, "need autoAdd and recursive to be the same", autoAdd=autoAdd, recursive=recursive)

        self._path = path
        path_u = path.path
        if not isinstance(path_u, unicode):
            path_u = path_u.decode(sys.getfilesystemencoding())
            _assert(isinstance(path_u, unicode), path_u=path_u)

        self._filter = FILE_NOTIFY_CHANGE_FILE_NAME | FILE_NOTIFY_CHANGE_DIR_NAME | FILE_NOTIFY_CHANGE_LAST_WRITE

        if mask & (IN_ACCESS | IN_CLOSE_NOWRITE | IN_OPEN):
            self._filter = self._filter | FILE_NOTIFY_CHANGE_LAST_ACCESS
        if mask & IN_ATTRIB:
            self._filter = self._filter | FILE_NOTIFY_CHANGE_ATTRIBUTES | FILE_NOTIFY_CHANGE_SECURITY

        self._recursive = TRUE if recursive else FALSE
        self._callbacks = callbacks or []
        self._hDirectory = _open_directory(path_u)
Example #45
0
    def set_hook(self, name, d=None, ignore_count=0):
        """
        Called by the hook observer (e.g. by a test).
        If d is not given, an unfired Deferred is created and returned.
        The hook must not already be set.
        """
        self._log("set_hook %r, ignore_count=%r" % (name, ignore_count))
        if d is None:
            d = defer.Deferred()
        _assert(ignore_count >= 0, ignore_count=ignore_count)
        _assert(name in self._hooks, name=name)
        _assert(self._hooks[name] is None, name=name, hook=self._hooks[name])
        _assert(isinstance(d, defer.Deferred), d=d)

        self._hooks[name] = (d, ignore_count)
        return d
Example #46
0
    def watch(self,
              path,
              mask=IN_WATCH_MASK,
              autoAdd=False,
              callbacks=None,
              recursive=False):
        precondition(isinstance(autoAdd, bool), autoAdd=autoAdd)
        precondition(isinstance(recursive, bool), recursive=recursive)
        assert autoAdd == False

        path_u = path.path
        if not isinstance(path_u, unicode):
            path_u = path_u.decode('utf-8')
            _assert(isinstance(path_u, unicode), path_u=path_u)

        if path_u not in self._callbacks.keys():
            self._callbacks[path_u] = callbacks or []
            self._watches[path_u] = self._observer.schedule(
                INotifyEventHandler(path_u, mask, self._callbacks[path_u],
                                    self._pending_delay),
                path=path_u,
                recursive=False,
            )
Example #47
0
    def set_hook(self, name, d=None, ignore_count=0):
        """
        Called by the hook observer (e.g. by a test).
        If d is not given, an unfired Deferred is created and returned.
        The hook must not already be set.
        """
        self._log("set_hook %r, ignore_count=%r" % (name, ignore_count))
        if d is None:
            d = defer.Deferred()
        _assert(ignore_count >= 0, ignore_count=ignore_count)
        _assert(name in self._hooks, name=name)
        _assert(self._hooks[name] is None, name=name, hook=self._hooks[name])
        _assert(isinstance(d, defer.Deferred), d=d)

        self._hooks[name] = (d, ignore_count)
        return d
Example #48
0
        def _ready(res):
            k,happy,n = e.get_param("share_counts")
            _assert(n == NUM_SHARES) # else we'll be completely confused
            numsegs = e.get_param("num_segments")
            _assert(numsegs == NUM_SEGMENTS, numsegs, NUM_SEGMENTS)
            segsize = e.get_param("segment_size")
            _assert( (NUM_SEGMENTS-1)*segsize < len(data) <= NUM_SEGMENTS*segsize,
                     NUM_SEGMENTS, segsize,
                     (NUM_SEGMENTS-1)*segsize, len(data), NUM_SEGMENTS*segsize)

            shareholders = {}
            servermap = {}
            for shnum in range(NUM_SHARES):
                peer = FakeBucketReaderWriterProxy()
                shareholders[shnum] = peer
                servermap.setdefault(shnum, set()).add(peer.get_peerid())
                all_shareholders.append(peer)
            e.set_shareholders(shareholders, servermap)
            return e.start()
Example #49
0
 def __init__(self, size=1024):
     self.size = size
     self.buffer = create_string_buffer(size)
     address = addressof(self.buffer)
     _assert(address & 3 == 0, "address 0x%X returned by create_string_buffer is not DWORD-aligned" % (address,))
     self.data = None
Example #50
0
def from_string_dirnode(s, **kwargs):
    u = from_string(s, **kwargs)
    _assert(IDirnodeURI.providedBy(u))
    return u
Example #51
0
def from_string_verifier(s, **kwargs):
    u = from_string(s, **kwargs)
    _assert(IVerifierURI.providedBy(u))
    return u
Example #52
0
def from_string_mutable_filenode(s, **kwargs):
    u = from_string(s, **kwargs)
    _assert(IMutableFileURI.providedBy(u))
    return u
Example #53
0
def from_string_dirnode(s, **kwargs):
    u = from_string(s, **kwargs)
    _assert(IDirnodeURI.providedBy(u))
    return u
Example #54
0
 def __init__(self, filenode_uri=None):
     if filenode_uri:
         _assert(IVerifierURI.providedBy(filenode_uri))
     self._filenode_uri = filenode_uri
Example #55
0
def from_string_verifier(s, **kwargs):
    u = from_string(s, **kwargs)
    _assert(IVerifierURI.providedBy(u))
    return u
Example #56
0
def from_string_mutable_filenode(s, **kwargs):
    u = from_string(s, **kwargs)
    _assert(IMutableFileURI.providedBy(u))
    return u