Esempio n. 1
0
    def __init__(self,
                 storage_broker,
                 verifycap,
                 node,
                 download_status,
                 logparent=None,
                 max_outstanding_requests=10):
        self.running = True  # stopped by Share.stop, from Terminator
        self.verifycap = verifycap
        self._started = False
        self._storage_broker = storage_broker
        self.share_consumer = self.node = node
        self.max_outstanding_requests = max_outstanding_requests
        self._hungry = False

        self._commonshares = {}  # shnum to CommonShare instance
        self.pending_requests = set()
        self.overdue_requests = set()  # subset of pending_requests
        self.overdue_timers = {}

        self._storage_index = verifycap.storage_index
        self._si_prefix = base32.b2a_l(self._storage_index[:8], 60)
        self._node_logparent = logparent
        self._download_status = download_status
        self._lp = log.msg(format="ShareFinder[si=%(si)s] starting",
                           si=self._si_prefix,
                           level=log.NOISY,
                           parent=logparent,
                           umid="2xjj2A")
Esempio n. 2
0
 def __init__(self, sharenum, bucket, share_hash_tree, num_blocks, block_size, share_size):
     """ share_hash_tree is required to have already been initialized with
     the root hash (the number-0 hash), using the share_root_hash from the
     UEB"""
     precondition(share_hash_tree[0] is not None, share_hash_tree)
     prefix = "%d-%s-%s" % (sharenum, bucket, base32.b2a_l(share_hash_tree[0][:8], 60))
     log.PrefixingLogMixin.__init__(self, facility="tahoe.immutable.download", prefix=prefix)
     self.sharenum = sharenum
     self.bucket = bucket
     self.share_hash_tree = share_hash_tree
     self.num_blocks = num_blocks
     self.block_size = block_size
     self.share_size = share_size
     self.block_hash_tree = hashtree.IncompleteHashTree(self.num_blocks)
Esempio n. 3
0
    def __init__(self, verifycap, servers, verify, add_lease, secret_holder, monitor):
        assert precondition(isinstance(verifycap, CHKFileVerifierURI), verifycap, type(verifycap))

        prefix = "%s" % base32.b2a_l(verifycap.get_storage_index()[:8], 60)
        log.PrefixingLogMixin.__init__(self, facility="tahoe.immutable.checker", prefix=prefix)

        self._verifycap = verifycap

        self._monitor = monitor
        self._servers = servers
        self._verify = verify  # bool: verify what the servers claim, or not?
        self._add_lease = add_lease

        frs = file_renewal_secret_hash(secret_holder.get_renewal_secret(), self._verifycap.get_storage_index())
        self.file_renewal_secret = frs
        fcs = file_cancel_secret_hash(secret_holder.get_cancel_secret(), self._verifycap.get_storage_index())
        self.file_cancel_secret = fcs
Esempio n. 4
0
 def __init__(self, sharenum, bucket, share_hash_tree, num_blocks,
              block_size, share_size):
     """ share_hash_tree is required to have already been initialized with
     the root hash (the number-0 hash), using the share_root_hash from the
     UEB"""
     precondition(share_hash_tree[0] is not None, share_hash_tree)
     prefix = "%d-%s-%s" % (sharenum, bucket,
                            base32.b2a_l(share_hash_tree[0][:8], 60))
     log.PrefixingLogMixin.__init__(self,
                                    facility="tahoe.immutable.download",
                                    prefix=prefix)
     self.sharenum = sharenum
     self.bucket = bucket
     self.share_hash_tree = share_hash_tree
     self.num_blocks = num_blocks
     self.block_size = block_size
     self.share_size = share_size
     self.block_hash_tree = hashtree.IncompleteHashTree(self.num_blocks)
Esempio n. 5
0
    def __init__(self, verifycap, servers, verify, add_lease, secret_holder,
                 monitor):
        assert precondition(isinstance(verifycap, CHKFileVerifierURI), verifycap, type(verifycap))

        prefix = "%s" % base32.b2a_l(verifycap.get_storage_index()[:8], 60)
        log.PrefixingLogMixin.__init__(self, facility="tahoe.immutable.checker", prefix=prefix)

        self._verifycap = verifycap

        self._monitor = monitor
        self._servers = servers
        self._verify = verify # bool: verify what the servers claim, or not?
        self._add_lease = add_lease

        frs = file_renewal_secret_hash(secret_holder.get_renewal_secret(),
                                       self._verifycap.get_storage_index())
        self.file_renewal_secret = frs
        fcs = file_cancel_secret_hash(secret_holder.get_cancel_secret(),
                                      self._verifycap.get_storage_index())
        self.file_cancel_secret = fcs
Esempio n. 6
0
    def __init__(self, storage_broker, verifycap, node, download_status,
                 logparent=None, max_outstanding_requests=10):
        self.running = True # stopped by Share.stop, from Terminator
        self.verifycap = verifycap
        self._started = False
        self._storage_broker = storage_broker
        self.share_consumer = self.node = node
        self.max_outstanding_requests = max_outstanding_requests
        self._hungry = False

        self._commonshares = {} # shnum to CommonShare instance
        self.pending_requests = set()
        self.overdue_requests = set() # subset of pending_requests
        self.overdue_timers = {}

        self._storage_index = verifycap.storage_index
        self._si_prefix = base32.b2a_l(self._storage_index[:8], 60)
        self._node_logparent = logparent
        self._download_status = download_status
        self._lp = log.msg(format="ShareFinder[si=%(si)s] starting",
                           si=self._si_prefix,
                           level=log.NOISY, parent=logparent, umid="2xjj2A")
Esempio n. 7
0
    def __init__(self, verifycap, storage_broker, secret_holder,
                 terminator, history, download_status):
        assert isinstance(verifycap, uri.CHKFileVerifierURI)
        self._verifycap = verifycap
        self._storage_broker = storage_broker
        self._si_prefix = base32.b2a_l(verifycap.storage_index[:8], 60)
        self.running = True
        if terminator:
            terminator.register(self) # calls self.stop() at stopService()
        # the rules are:
        # 1: Only send network requests if you're active (self.running is True)
        # 2: Use TimerService, not reactor.callLater
        # 3: You can do eventual-sends any time.
        # These rules should mean that once
        # stopService()+flushEventualQueue() fires, everything will be done.
        self._secret_holder = secret_holder
        self._history = history
        self._download_status = download_status

        k, N = self._verifycap.needed_shares, self._verifycap.total_shares
        self.share_hash_tree = IncompleteHashTree(N)

        # we guess the segment size, so Segmentation can pull non-initial
        # segments in a single roundtrip. This populates
        # .guessed_segment_size, .guessed_num_segments, and
        # .ciphertext_hash_tree (with a dummy, to let us guess which hashes
        # we'll need)
        self._build_guessed_tables(DEFAULT_MAX_SEGMENT_SIZE)

        # filled in when we parse a valid UEB
        self.have_UEB = False
        self.segment_size = None
        self.tail_segment_size = None
        self.tail_segment_padded = None
        self.num_segments = None
        self.block_size = None
        self.tail_block_size = None

        # things to track callers that want data

        # _segment_requests can have duplicates
        self._segment_requests = [] # (segnum, d, cancel_handle, logparent)
        self._active_segment = None # a SegmentFetcher, with .segnum

        self._segsize_observers = observer.OneShotObserverList()

        # we create one top-level logparent for this _Node, and another one
        # for each read() call. Segmentation and get_segment() messages are
        # associated with the read() call, everything else is tied to the
        # _Node's log entry.
        lp = log.msg(format="Immutable.DownloadNode(%(si)s) created:"
                     " size=%(size)d,"
                     " guessed_segsize=%(guessed_segsize)d,"
                     " guessed_numsegs=%(guessed_numsegs)d",
                     si=self._si_prefix, size=verifycap.size,
                     guessed_segsize=self.guessed_segment_size,
                     guessed_numsegs=self.guessed_num_segments,
                     level=log.OPERATIONAL, umid="uJ0zAQ")
        self._lp = lp

        self._sharefinder = ShareFinder(storage_broker, verifycap, self,
                                        self._download_status, lp)
        self._shares = set()
Esempio n. 8
0
 def __repr__(self):
     return "<%s %s %s>" % (self.__class__.__name__,
                            base32.b2a_l(self.storage_index[:8], 60),
                            self.shnum)
Esempio n. 9
0
 def __repr__(self):
     return "<%s %s %s>" % (self.__class__.__name__,
                            base32.b2a_l(self.storage_index[:8],
                                         60), self.shnum)
Esempio n. 10
0
    def __init__(self, verifycap, storage_broker, secret_holder,
                 terminator, history, download_status):
        assert isinstance(verifycap, uri.CHKFileVerifierURI)
        self._verifycap = verifycap
        self._storage_broker = storage_broker
        self._si_prefix = base32.b2a_l(verifycap.storage_index[:8], 60)
        self.running = True
        if terminator:
            terminator.register(self) # calls self.stop() at stopService()
        # the rules are:
        # 1: Only send network requests if you're active (self.running is True)
        # 2: Use TimerService, not reactor.callLater
        # 3: You can do eventual-sends any time.
        # These rules should mean that once
        # stopService()+flushEventualQueue() fires, everything will be done.
        self._secret_holder = secret_holder
        self._history = history
        self._download_status = download_status

        k, N = self._verifycap.needed_shares, self._verifycap.total_shares
        self.share_hash_tree = IncompleteHashTree(N)

        # we guess the segment size, so Segmentation can pull non-initial
        # segments in a single roundtrip. This populates
        # .guessed_segment_size, .guessed_num_segments, and
        # .ciphertext_hash_tree (with a dummy, to let us guess which hashes
        # we'll need)
        self._build_guessed_tables(DEFAULT_MAX_SEGMENT_SIZE)

        # filled in when we parse a valid UEB
        self.have_UEB = False
        self.segment_size = None
        self.tail_segment_size = None
        self.tail_segment_padded = None
        self.num_segments = None
        self.block_size = None
        self.tail_block_size = None

        # things to track callers that want data

        # _segment_requests can have duplicates
        self._segment_requests = [] # (segnum, d, cancel_handle, seg_ev, lp)
        self._active_segment = None # a SegmentFetcher, with .segnum

        self._segsize_observers = observer.OneShotObserverList()

        # we create one top-level logparent for this _Node, and another one
        # for each read() call. Segmentation and get_segment() messages are
        # associated with the read() call, everything else is tied to the
        # _Node's log entry.
        lp = log.msg(format="Immutable.DownloadNode(%(si)s) created:"
                     " size=%(size)d,"
                     " guessed_segsize=%(guessed_segsize)d,"
                     " guessed_numsegs=%(guessed_numsegs)d",
                     si=self._si_prefix, size=verifycap.size,
                     guessed_segsize=self.guessed_segment_size,
                     guessed_numsegs=self.guessed_num_segments,
                     level=log.OPERATIONAL, umid="uJ0zAQ")
        self._lp = lp

        self._sharefinder = ShareFinder(storage_broker, verifycap, self,
                                        self._download_status, lp)
        self._shares = set()