示例#1
0
    def uploads(self, requests, peers, history):
                
        if len(requests) == 0:
            return []

        rec_from = defaultdict(lambda: 0) #how much we've received from peers in the last round
        for x in history.downloads[-1]:
            rec_from[x.from_id] += x.blocks

        requests = list(set([x.requester_id for x in requests]))
        total = sum([rec_from[x] for x in requests])

        if total == 0: #nobody has uploaded to this agent yet
            if len(requests) <= 4:
                #if less than four requests, just fill them all
                chosen = requests[:]
            else:
                #just randomly take 4
                chosen = random.sample(requests, 4)
            bws = even_split(self.up_bw, len(chosen))
            random.shuffle(bws)
            uploads = [Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws)]

        else: #people have uploaded to this agent before
            zeros = [x for x in requests if rec_from[x] == 0] #those who did not give to this agent last round
            ones  = [[int(rec_from[x]/float(total)*self.up_bw*0.9), x] for x in requests if x[0] > 0]  #those who have given
            remain = self.up_bw - sum([x[0] for x in ones])
            if remain > 0: #if there's leftover bandwidth
                if len(zeros): #give to someone who didn't give last time
                    ones.append([remain, random.choice(zeros)])
                else: #if everyone gave last time, give someone the extra bandwidth
                    random.choice(ones)[0] += remain
            uploads = [Upload(self.id, peer_id, bw) for (bw, peer_id) in ones]

        return uploads
示例#2
0
文件: tyrant2.py 项目: Zhu-Justin/P2P
    def uploads(self, requests, peers, history):
        round = history.current_round()

        if self.d is None:
            self.d = {
                p.id: self.up_bw / 4
                for p in peers
            }  #the book said to estimate the total download speed and divide by 4. Since we have no history, our upload bandwidth is the best guess
            self.u = {
                p.id: 1
                for p in peers
            }  #The graph in the book is in different units that I can't convert, so I have to just start here

        if len(history.downloads):
            givers = set([
                x.from_id for x in history.downloads[-1] if x.to_id == self.id
            ])  #peers who gave in the last round
            for p in self.u:
                if p not in givers:  #all the peers who did not unchoke this peer
                    self.u[p] *= (1 + self.alpha)
                if p in givers:
                    peer = [x for x in peers
                            if x.id == p][0]  #to get the peer with this ID
                    down_speed = len(peer.available_pieces
                                     ) * self.conf.blocks_per_piece / round / 4
                    self.d[p] = down_speed
        if len(history.downloads) >= self.r:
            givers = [[x.from_id for x in y]
                      for y in history.downloads[-self.r:]]
            for p in self.u:
                if all([p in x for x in givers]) and 'Seed' not in p:
                    self.u[p] *= 1 - self.gamma

        uploads = []
        if len(requests) == 0:
            return []
        else:
            req_peers = [[
                self.d[r.requester_id], self.u[r.requester_id], r.requester_id
            ] for r in requests]
            req_peers.sort(reverse=True,
                           key=lambda x: (x[0] / x[1], random.random()))
            total = 0
            for k in req_peers:
                if total + k[1] < self.cap:
                    uploads.append(Upload(self.id, k[2], k[1]))
                    total += k[1]
                elif self.cap - total > 0:  #even if it seems too little, might as well try to give this person the last bandwidth
                    uploads.append(Upload(self.id, k[2], self.cap - total))
                    total = self.cap
                    break
                else:
                    break

        return uploads
示例#3
0
    def uploads(self, requests, peers, history):
        """
        requests -- a list of the requests for this peer for this round
        peers -- available info about all the peers
        history -- history for all previous rounds

        returns: list of Upload objects.

        In each round, this will be called after requests().
        """

        if len(requests) == 0:
            return []

        n = min(len(requests), 3)

        chosen_requests = random.sample(requests, n)
        chosen = [request.requester_id for request in chosen_requests]
        # Evenly "split" my upload bandwidth among the one chosen requester
        bws = even_split(self.up_bw, len(chosen))

        # create actual uploads out of the list of peer ids and bandwidths
        uploads = [
            Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws)
        ]

        return uploads
示例#4
0
文件: jzasstd.py 项目: Zhu-Justin/P2P
    def uploads(self, requests, peers, history):

        #count how much people have downloaded to me in the last two rounds
        download_counts = defaultdict(lambda: 0)
        for r in history.downloads[-2:]:
            for down in r:
                if down.to_id == self.id:
                    download_counts[down.from_id] += down.blocks

        if len(requests) == 0:
            return []

        #sort all the requests by how much they've given to me
        chosen = list(set([x.requester_id for x in requests]))
        chosen.sort(reverse=True,
                    key=lambda x: (download_counts[x], random.random()))

        if len(chosen) > 3:
            if self.optimistic_id is None or self.optimistic_id not in chosen[
                    3:] or self.optimistic_count >= 3:
                self.optimistic_id = random.choice(chosen[3:])
                self.optimistic_count = 0
            else:
                self.optimistic_count += 1
            chosen = chosen[:3] + [self.optimistic_id]

        bws = even_split(self.up_bw, len(chosen))
        random.shuffle(bws)

        uploads = [
            Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws)
        ]

        return uploads
示例#5
0
    def uploads(self, requests, peers, history):
        """
        requests -- a list of the requests for this peer for this round
        peers -- available info about all the peers
        history -- history for all previous rounds

        returns: list of Upload objects.

        In each round, this will be called after requests().
        """
        # No requests received -> just terminate
        if len(requests) == 0:
            return []

        # Fin the amount of slots we want to allocate this round
        n_slots = min(self.slots, len(requests))

        # Choose n peers completely at random from the set of peers that requested pieces
        # This again break symmetry and makes us somewhat strategy proof (more on that in the writeup)
        # Also, this makes the likelihood of being reciprocated very large
        # since we give away so much bw to a few peers
        chosen_requests = random.sample(requests, n_slots)
        chosen = [request.requester_id for request in chosen_requests]

        # Distribute the bw among the chosen peers
        bws = even_split(self.up_bw, len(chosen))

        # Create the upload objects out of the list of peer ids and bandwidths
        uploads = [Upload(self.id, peer_id, bw)
                   for (peer_id, bw) in zip(chosen, bws)]

        return uploads
示例#6
0
    def uploads(self, requests, peers, history):
        """
        requests -- a list of the requests for this peer for this round
        peers -- available info about all the peers
        history -- history for all previous rounds

        returns: list of Upload objects.

        In each round, this will be called after requests().
        """

        # No need to waste compute if there are no requests
        if len(requests) == 0:
            return []

        # Initialize a counter to keep track of how much the agent downloaded from all other
        # peers the last two rounds
        uploader_c = Counter()
        two_last = history.downloads[-2:]
        requester_ids = set(x.requester_id for x in requests)

        # Iterate over all download objects that we recieved the previous two rounds
        for download in chain(*two_last):
            # Only count the downloads that are from a peer that actually requested a piece from us
            # requster_ids is a set so this operation is luckily O(1)
            if download.from_id in requester_ids:
                uploader_c.update({download.from_id: download.blocks})

        # Choose the n peers who uploaded most to us as the ones we are going to reciprocate
        # Choose n to be the smaller of how many normal slots there are and how many requesters there are
        chosen = set(x[0] for x in uploader_c.most_common(
            min(len(requester_ids), self.normal_slots)))
        peer_ids = set(x.requester_id for x in requests).difference(chosen)

        # If there still are peers left to unchoke
        # Select a peer to optimistically unchoke if we either do not currently have unchoked anyone
        # or if 3 rounds have passed
        if bool(peer_ids) and (len(history.downloads) %
                               self.optimistic_unchoke_interval == 0
                               or not self.optimistic_unchoke):
            unchoke = random.choice(list(peer_ids))
            self.optimistic_unchoke = unchoke

        # Add the optimistically unchoked peer to the set
        if self.optimistic_unchoke:
            chosen.add(self.optimistic_unchoke)

        # No need to go any further if no peers were chosen
        if len(chosen) < 1:
            return []

        # Distribute as evenly as possible the bw across the chosen peer(s)
        bws = even_split(self.up_bw, len(chosen))

        # Create upload objects for the peer(s) with their respective allocated bandwidth(s)
        uploads = [
            Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws)
        ]

        return uploads
示例#7
0
    def uploads(self, requests, peers, history):
        """
        requests -- a list of the requests for this peer for this round
        peers -- available info about all the peers
        history -- history for all previous rounds

        returns: list of Upload objects.

        In each round, this will be called after requests().
        """
        S = 4   # total unchoke slots

        round = history.current_round()

        # look at past rounds to determine previously cooperative peers
        if round >= 2:
            last_round_dl = history.downloads[round-1]
            second_last_round_dl =history.downloads[round-2]
        # initialize array of chosen peers to unchoke
        chosen = []

        # check if no peers need to be unchoked
        if len(requests) == 0:
            bws = []

        else:
            # Step 1: Pick S-1 most cooperative peers with requests to unchoke

            # count total downloaded blocks from each peer in last 2 rounds
            downloads_per_peer = {peer.id:0 for peer in peers}
            for dl in last_round_dl:
                downloads_per_peer[dl.from_id] += dl.blocks
            for dl in second_last_round_dl:
                downloads_per_peer[dl.from_id] += dl.blocks

            # sort peers with requests by amount downloaded from them
            requester_ids = set([r.requester_id for r in requests])
            cooperative_peers = sorted(requester_ids, key=lambda x:downloads_per_peer[x])

            # choose S-1 most cooperative peers that have requests to unchoke
            chosen = cooperative_peers[:S-1]

            # Step 2: optimistically unchoke 1 peer every 3 rounds, store choice in class state
            if round % 3 == 0 or not self.optimistic_id:
                unchosen_requesters = set(requester_ids) - set(chosen)
                if len(unchosen_requesters) > 0:
                    self.optimistic_id = random.choice(tuple(unchosen_requesters))
                    chosen.append(self.optimistic_id)
            else:
                chosen.append(self.optimistic_id)

            # Evenly "split" upload bandwidth among the chosen requesters
            bws = even_split(self.up_bw, len(chosen))

        # create actual uploads out of the list of peer ids and bandwidths
        uploads = [Upload(self.id, peer_id, bw)
                   for (peer_id, bw) in zip(chosen, bws)]
            
        return uploads
示例#8
0
    def uploads(self, requests, peers, history):
        max_upload = 4  # max num of peers to upload to at a time
        requester_ids = list(set(map(lambda r: r.requester_id, requests)))

        n = min(max_upload, len(requester_ids))
        if n == 0:
            return []
        bws = even_split(self.up_bw, n)
        uploads = [Upload(self.id, p_id, bw)
                   for (p_id, bw) in zip(random.sample(requester_ids, n), bws)]
        return uploads
示例#9
0
    def uploads(self, requests, peers, history):
        """
        requests -- a list of the requests for this peer for this round
        peers -- available info about all the peers
        history -- history for all previous rounds

        returns: list of Upload objects.

        In each round, this will be called after requests().
        """

        round = history.current_round()
        logging.debug("%s again.  It's round %d." % (self.id, round))
        # One could look at other stuff in the history too here.
        # For example, history.downloads[round-1] (if round != 0, of course)
        # has a list of Download objects for each Download to this peer in
        # the previous round.

        if len(requests) == 0:
            logging.debug("No one wants my pieces!")
            chosen = []
            bws = []
        else:
            # chosen = []
            download_bandwidth = {}
            for peer in peers:
                download_bandwidth[peer.id] = 0
            for downloads in history.downloads:
                for d in downloads:
                    download_bandwidth[d.from_id] += d.blocks
            # pick top 3 as unchoked peers
            chosen = sorted(download_bandwidth,
                            key=download_bandwidth.get,
                            reverse=True)[:3]
            # each 3 rounds, randomly unchoke another peer
            if round % 3 == 0:
                chosen.append(random.choice(peers).id)

            # logging.debug("Still here: uploading to a random peer")
            # change my internal state for no reason
            #    self.dummy_state["cake"] = "pie"
            # request = random.choice(requests)
            # chosen = [request.requester_id]
            # Evenly "split" my upload bandwidth among the one chosen requester
            bws = even_split(self.up_bw, len(chosen))

        # create actual uploads out of the list of peer ids and bandwidths
        uploads = [
            Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws)
        ]
        return uploads
    def uploads(self, requests, peers, history):
        """
        requests -- a list of the requests for this peer for this round
        peers -- available info about all the peers
        history -- history for all previous rounds

        returns: list of Upload objects.

        In each round, this will be called after requests().
        """

        ##############################################################################
        # The code and suggestions here will get you started for the standard client #
        # You'll need to change things for the other clients                         #
        ##############################################################################

        round = history.current_round()
        logging.debug("%s again.  It's round %d." % (
            self.id, round))
        # One could look at other stuff in the history too here.
        # For example, history.downloads[round-1] (if round != 0, of course)
        # has a list of Download objects for each Download to this peer in
        # the previous round.

        if len(requests) == 0:
            logging.debug("No one wants my pieces!")
            chosen = []
            bws = []
        else:
            logging.debug("Still here: uploading to a random peer")

            ########################################################################
            # The dummy client picks a single peer at random to unchoke.           #
            # You should decide a set of peers to unchoke accoring to the protocol #
            ########################################################################
            request = random.choice(requests)
            chosen = [request.requester_id]


            
            # Now that we have chosen who to unchoke, the standard client evenly shares
            # its bandwidth among them
            bws = even_split(self.up_bw, len(chosen))

        # create actual uploads out of the list of peer ids and bandwidths
        # You don't need to change this
        uploads = [Upload(self.id, peer_id, bw)
                   for (peer_id, bw) in zip(chosen, bws)]
            
        return uploads
示例#11
0
    def uploads(self, requests, peers, history):

        #list of all the peers who have made a request
        peer_reduce = {}
        for r in requests:
            peer_reduce[r.requester_id] = r
        for p in peers:
            if p.id in peer_reduce:
                peer_reduce[p.id] = p
        peer_reduce = [peer_reduce[p] for p in peer_reduce]

        #find out who from the list of people that want something from me, who I want something from as well
        self.last_uploaded = []
        made = self.requests(peer_reduce, history)

        #take all the peers who have made a request, sort them by how much I would like to ask from them
        req_ids = defaultdict(lambda: 0)
        for x in made:
            req_ids[x.peer_id] += self.conf.blocks_per_piece - x.start
        peer_reduce.sort(reverse=True,
                         key=lambda x: (req_ids[x.id], random.random()))
        peer_reduce = [x.id for x in peer_reduce]

        #sort the requests. First take the peers from whom I did not just ask something for, then the peers for whom I want the most from
        requests.sort(
            key=lambda x: (peer_reduce.index(x.requester_id), random.random()))
        # requests.sort(key = lambda x: (x.requester_id in self.last_requested, peer_reduce.index(x.requester_id), random.random()))

        uploads = []
        bw_used = 0
        for r in requests:
            give = min(self.conf.blocks_per_piece - r.start,
                       self.up_bw - bw_used)
            if give <= 0:
                break
            uploads.append(Upload(self.id, r.requester_id, give))
            if r.requester_id not in self.last_uploaded:
                self.last_uploaded.append(r.requester_id)
            bw_used += give

        self.last_uploaded = [x for x in peers if x.id in self.last_uploaded]

        return uploads
示例#12
0
    def uploads(self, requests, peers, history):
        """
        requests -- a list of the requests for this peer for this round
        peers -- available info about all the peers
        history -- history for all previous rounds

        returns: list of Upload objects.

        In each round, this will be called after requests().
        """

        round = history.current_round()
        #logging.debug("%s again.  It's round %d." % (
        #    self.id, round))
        # One could look at other stuff in the history too here.
        # For example, history.downloads[round-1] (if round != 0, of course)
        # has a list of Download objects for each Download to this peer in
        # the previous round.

        if len(requests) == 0:
            #logging.debug("No one wants my pieces!")
            chosen = []
            bws = []
        else:
            #logging.debug("Still here: uploading to a random peer")
            # change my internal state for no reason
            self.dummy_state["cake"] = "pie"

            request = random.choice(requests)
            chosen = [request.requester_id]
            # Evenly "split" my upload bandwidth among the one chosen requester
            bws = even_split(self.up_bw, len(chosen))

        # create actual uploads out of the list of peer ids and bandwidths
        uploads = [
            Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws)
        ]

        return uploads
示例#13
0
    def uploads(self, requests, peers, history):

        # current round
        round = history.current_round()

        if len(requests) == 0 or round == 0:
            chosen = []
            bws = []

        else:

            # list of requesters
            requesters = []
            for request in requests:
                requesters.append(request.requester_id)

            # count downloads received from peer in last round
            peerIds = [x.id for x in peers]
            scores = dict(zip(peerIds, [0] * len(peerIds)))
            for dl in history.downloads[round - 1]:
                if dl.from_id in requesters:
                    scores[dl.from_id] += dl.blocks

            # all downloads I got from requesters last round
            totalDownloads = sum(scores.values())

            # take list of unique requesters
            requesters = list(set(requesters))

            # create dictionary of peers to upload to, and how much to send to them
            # Formula: u_j = 0.9*u_t*(d_j/d_t)
            numsToUpload = {}
            for requester in requesters:
                if totalDownloads != 0:
                    numsToUpload[requester] = 0.9 * self.up_bw * (
                        scores[requester] / totalDownloads)
                else:
                    numsToUpload[requester] = 0

            # chosen and bandwidths for (possible) unchoking and rounding
            chosen = list(numsToUpload.keys())
            bwPreRound = list(numsToUpload.values())

            # choose someone to optimistically unchoke on third round
            if round % 3 == 0:
                candidates = dict(
                    (k, v) for k, v in numsToUpload.items() if v == 0)

                # if there is a candidate, add to chosen and bws
                if len(candidates) != 0:
                    self.optim = random.choice(candidates.keys())
                    chosen.append(self.optim)
                    bwPreRound.append(0.1 * self.up_bw)

                # otherwise, add 0.1*up_bw for all chosen
                else:
                    bwPreRound = [
                        x + (0.1 * self.up_bw / len(chosen))
                        for x in bwPreRound
                    ]

            # or keep person unchoked if they requested again and didn't upload
            else:
                if self.optim in requesters and (scores[self.optim] == 0):
                    chosen.append(self.optim)
                    bwPreRound.append(0.1 * self.up_bw)

                # otherwise, add 0.1*up_bw for all chosen
                else:
                    bwPreRound = [
                        x + (0.1 * self.up_bw / len(chosen))
                        for x in bwPreRound
                    ]

            # round bandwidths using helper function (in util.py)
            bws = round_list(bwPreRound)

        # create actual uploads out of the list of peer ids and bandwidths
        uploads = [
            Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws)
        ]

        return uploads
示例#14
0
    def uploads(self, requests, peers, history):
        """
        requests -- a list of the requests for this peer for this round
        peers -- available info about all the peers
        history -- history for all previous rounds

        returns: list of Upload objects.

        In each round, this will be called after requests().
        """

        round = history.current_round()
        logging.debug("%s again.  It's round %d." % (self.id, round))
        # One could look at other stuff in the history too here.
        # For example, history.downloads[round-1] (if round != 0, of course)
        # has a list of Download objects for each Download to this peer in
        # the previous round.

        previous_peer_download_rates = Counter()
        if history.downloads:
            for d in history.downloads[-1]:
                previous_peer_download_rates[d.from_id] += d.blocks
        self.past_peer_download_rates.append(previous_peer_download_rates)

        previous_peer_upload_rates = {}
        if history.uploads:
            for u in history.uploads[-1]:
                previous_peer_upload_rates[u.to_id] = u.bw
        self.past_peer_upload_rates.append(previous_peer_upload_rates)

        for p in peers:
            # Keep track of piece finish rate
            available_pieces = set(p.available_pieces)
            if p.id not in self.past_peer_finish_rates:
                incremental_piece_count = [len(p.available_pieces)]
                self.past_peer_finish_rates[p.id] = (available_pieces,
                                                     incremental_piece_count)
            else:
                previous_pieces, incremental_piece_count = self.past_peer_finish_rates[
                    p.id]
                incremental_piece_count = incremental_piece_count + [
                    len(available_pieces - previous_pieces)
                ]
                self.past_peer_finish_rates[p.id] = (available_pieces,
                                                     incremental_piece_count)

            # Determine download rates
            if p.id in previous_peer_download_rates:
                peer_download_rate = previous_peer_download_rates[p.id]
                # Downloaded from peer in the previous round, set to actual download rate
                self.peer_download_rates[p.id] = peer_download_rate
            else:
                # Estimate download rate from piece finish rate
                past_piece_finish_rates = incremental_piece_count[
                    -self.num_download_rate_average_rounds:]
                peer_download_rate = float(
                    sum(past_piece_finish_rates)
                ) * self.conf.blocks_per_piece / len(past_piece_finish_rates)
                self.peer_download_rates[p.id] = peer_download_rate

            # Determine upload rates needed to satisfy peers to unchoke us
            if p.id in previous_peer_upload_rates:
                # Peer was unchoked by us in the previous period
                if p.id not in previous_peer_download_rates:
                    # Peer did not unchoke us
                    self.peer_upload_rates[p.id] *= self.alpha
                elif len(self.past_peer_upload_rates) >= self.r:
                    # Check if peer unchoked us in the previous r periods
                    unchoked = True
                    for ur in self.past_peer_upload_rates[-self.r:]:
                        if p.id not in ur:
                            unchoked = False
                            break
                    if unchoked:
                        self.peer_upload_rates[p.id] *= self.gamma
            else:
                # Estimate upload rate with equal split
                self.peer_upload_rates[p.id] = peer_download_rate

        if len(requests) == 0:
            logging.debug("No one wants my pieces!")
            return []
        else:
            peer_request_bw = Counter()
            for r in requests:
                peer_request_bw[
                    r.requester_id] += self.conf.blocks_per_piece - r.start

            logging.debug("Past peer finish rates %s" %
                          self.past_peer_finish_rates)
            logging.debug("Past peer upload rates %s" %
                          self.past_peer_upload_rates)
            logging.debug("Past peer download rates %s" %
                          self.past_peer_download_rates)
            logging.debug("Upload rates: %s" % self.peer_upload_rates)
            logging.debug("Download rates: %s" % self.peer_download_rates)

            peer_ids = list(peer_request_bw.keys())
            peer_ids.sort(key=lambda x: self.peer_download_rates[
                x] / self.peer_upload_rates[x] if self.peer_download_rates[x] >
                          0 and self.peer_upload_rates[x] > 0 else 1.0)

            uploads = []

            remaining_bw = self.up_bw
            for peer_id in peer_ids:
                upload_bw = min(peer_request_bw[peer_id],
                                self.peer_upload_rates[peer_id])
                if remaining_bw >= upload_bw:
                    uploads.append(Upload(self.id, peer_id, upload_bw))
                    remaining_bw -= upload_bw
                else:
                    break

            return uploads
示例#15
0
    def uploads(self, requests, peers, history):
        """
        requests -- a list of the requests for this peer for this round
        peers -- available info about all the peers
        history -- history for all previous rounds

        returns: list of Upload objects.

        In each round, this will be called after requests().
        """

        round = history.current_round()
        logging.debug("%s again.  It's round %d." % (self.id, round))
        # Discount factor
        gamma = 0.8

        # Initialize u_j and d_j for all peers
        if round == 0:
            for peer in peers:
                self.upload_rates[peer.id] = self.up_bw / 4
                self.download_rates[peer.id] = 1
                self.unchoked[peer.id] = 0
                self.diff_available[peer.id] = 0
                self.last_available[peer.id] = 0

        if len(requests) == 0:
            logging.debug("No one wants my pieces!")
            chosen = []
            bws = []
        else:

            chosen = []
            bws = []

            # Store peers who requested pieces from you
            request_ids = []
            requested_pieces = {}
            requesting_peers = {}
            for request in requests:
                request_ids.append(request.requester_id)
                if request.piece_id in requested_pieces:
                    requested_pieces[request.piece_id] += 1
                else:
                    requested_pieces[request.piece_id] = 1
                if request.piece_id in requesting_peers:
                    requesting_peers[request.piece_id].append(
                        request.requester_id)
                else:
                    requesting_peers[request.piece_id] = [request.requester_id]

            # Do not upload to peers that are requesting the most requested piece
            max_piece = max(requested_pieces.iteritems(),
                            key=operator.itemgetter(1))[0]
            peers_to_avoid = requesting_peers[max_piece]
            random_peer = random.choice(peers_to_avoid)
            peers_to_avoid.remove(random_peer)

            # Order peers by decreasing reciprocation likelihood ratio
            for peer in peers:
                self.ratios[peer.id] = self.download_rates[
                    peer.id] / self.upload_rates[peer.id]

            total_up = 0
            while total_up < self.cap:
                greatest_ratio = max(self.ratios.values())
                greatest_list = [
                    key for key, value in self.ratios.items()
                    if value == greatest_ratio
                ]
                choice = random.choice(greatest_list)
                if (total_up + self.upload_rates[choice]) < self.cap:
                    if choice in request_ids and choice not in peers_to_avoid:
                        chosen.append(choice)
                        bws.append(self.upload_rates[choice])
                self.ratios.pop(choice)
                total_up += self.upload_rates[choice]

            # Update which peers have unchoked this agent
            downloads_last = {}
            if round != 0:
                for download in history.downloads[round - 1]:
                    if download.from_id not in downloads_last:
                        downloads_last[download.from_id] = download.blocks
                    else:
                        downloads_last[download.from_id] += download.blocks

                for peer in peers:
                    if peer.id in downloads_last:
                        self.unchoked[peer.id] += 1
                    else:
                        self.unchoked[peer.id] = 0

            # Update knowledge of downloads
            for peer in peers:
                self.diff_available[peer.id] = abs(
                    len(peer.available_pieces) - self.last_available[peer.id])

            # Update u_j and d_j for all unchoked peers
            for peer in chosen:
                if round != 0:
                    if self.unchoked[peer] == 0:
                        # Update u_j if peer choked
                        self.upload_rates[peer] = self.upload_rates[peer] * (
                            1 + self.alpha)

                        # Update d_j if peer choked
                        self.download_rates[peer] = self.diff_available[peer]
                    else:
                        # Update d_j if peer unchoked
                        self.download_rates[peer] = downloads_last[peer]

                        # Update u_j if peer unchoked for less than r periods
                        if self.unchoked[peer] < self.r:
                            self.upload_rates[peer] = self.upload_rates[
                                peer] * (1 + self.alpha)
                        # Update u_j if peer unchoked for more than r periods
                        else:
                            self.upload_rates[peer] = self.upload_rates[
                                peer] * (1 - self.gamma)

        # create actual uploads out of the list of peer ids and bandwidths
        uploads = [
            Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws)
        ]

        self.cap *= gamma

        return uploads
示例#16
0
    def uploads(self, requests, peers, history):
        """
        requests -- a list of the requests for this peer for this round
        peers -- available info about all the peers
        history -- history for all previous rounds

        returns: list of Upload objects.

        In each round, this will be called after requests().
        """

        round = history.current_round()

        # look at past round to determine previously cooperative peers
        if round >= 1:
            last_round_dl = history.downloads[round - 1]

        # initialize array of chosen peers to unchoke and bandwidths to give
        chosen = []
        bws = []

        # check if no peers need to be unchoked
        if len(requests) != 0:
            # Step 1: Calculate bandwidth for all requesters that have uploaded to us

            # count total downloaded blocks from each peer in last round
            downloads_per_peer = {
                peer.from_id: peer.blocks
                for peer in last_round_dl
            }

            # calculate total blocks downloaded from requesters
            total_blocks = 0
            for blocks in downloads_per_peer.values():
                total_blocks += blocks

            # determine requesters not downloaded from last round
            reqs = set([req.requester_id for req in requests])
            downloaded_reqs = set(downloads_per_peer.keys())
            remaining = reqs - downloaded_reqs

            # allocate 10% for optimistic unchoking if there are requesters who
            # did not upload last round
            if remaining:
                # unchoke each peer and calculate bandwidth
                for peer, blocks in downloads_per_peer.items():
                    chosen.append(peer)
                    bws.append(floor(blocks * 0.9 / total_blocks))

                # Step 2: optimistically unchoke 1 peer not downloaded from
                bws.append(self.up_bw - sum(bws))
                chosen.append(random.choice(list(remaining)))
            # if all requesters uploaded last round, allocate 100% accordingly
            else:
                # unchoke each peer and calculate bandwidth
                for peer, blocks in downloads_per_peer.items():
                    chosen.append(peer)
                    bws.append(floor(blocks / total_blocks))

        # create actual uploads out of the list of peer ids and bandwidths
        uploads = [
            Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws)
        ]

        return uploads
示例#17
0
    def uploads(self, requests, peers, history):
        """
        requests -- a list of the requests for this peer for this round
        peers -- available info about all the peers
        history -- history for all previous rounds

        returns: list of Upload objects.

        In each round, this will be called after requests().
        """
        print(self.slots)
        # drop banana peel after we win the mario kart race
        # in other words, stop uploading to anyone when we've finished all our pieces
        banana_peel = True
        for block_count in self.pieces:
            if block_count != self.conf.blocks_per_piece:
                banana_peel = False
                break

        if banana_peel:
            return []


        round = history.current_round()
        logging.debug("%s again.  It's round %d." % (
            self.id, round))
        # One could look at other stuff in the history too here.
        # For example, history.downloads[round-1] (if round != 0, of course)
        # has a list of Download objects for each Download to this peer in
        # the previous round.

        # at the beginning, initialize all Taos to 1/4 of bandwidth
        if round == 0:
            for p in peers:
                self.tao[p.id] = self.up_bw*.26
                self.unchoked[p.id] = -1 # -1 signifies they have never unchoked me
            return []

        # did I upload to them or download from them in the last turn
        uploaded_to = dict()
        last_turn = dict()
        for p in peers:
            last_turn[p.id] = False
            uploaded_to[p.id] = False

        for u in history.uploads[round-1]:
            uploaded_to[u.to_id] = True

        # if I uploaded to them and they downloaded from me, set f_ji to the bandwidth they gave me
        # also track the number of turns they have unchoked me
        for download in history.downloads[round-1]:
            if uploaded_to[download.from_id]:
                if last_turn[download.from_id]:
                    self.f_ji[download.from_id] += download.blocks
                else:
                    self.f_ji[download.from_id] = download.blocks
                    last_turn[download.from_id] = True
                    if self.unchoked[download.from_id] < 1:
                        self.unchoked[download.from_id] = 1
                    else:
                        self.unchoked[download.from_id] += 1

        # for each peer I unchoked last turn, fine tune tao occording to procedure in the textbook
        for p in peers:
            if uploaded_to[p.id]:
                if not last_turn[p.id]:
                    self.tao[p.id] *= (1+self.alpha)
                    if self.unchoked[p.id] > 0:
                        self.unchoked[p.id] = 0
                elif self.unchoked[p.id] > self.r:
                    self.tao[p.id] *= (1-self.gamma)



        if len(requests) == 0:
            logging.debug("No one wants my pieces!")
            chosen = []
            bws = []

        else:
            logging.debug("Still here: uploading best ROI peers")
            # change my internal state for no reason
            for p in peers:
                if self.unchoked[p.id] <= -1:
                    self.f_ji[p.id] = (float(len(p.available_pieces) * self.conf.blocks_per_piece)/round)*.25

            # record how much each requester is requesting
            bw_requested = dict()
            blocks_per_piece = self.conf.blocks_per_piece
            for r in requests:
                if r.requester_id in bw_requested.keys():
                    bw_requested[r.requester_id] += (blocks_per_piece - r.start)
                else:
                    bw_requested[r.requester_id] = (blocks_per_piece - r.start)

            requesters = bw_requested.keys()
            # sort requesters by ROI in descending order
            requesters.sort(reverse=True, key=lambda r: self.f_ji[r]/self.tao[r])

            # if self.id == "chandTyrant0":
            #     print requesters


            chosen = []
            bws = []

            # # based on number of slots, subdivide bandwidth other than reserved optimistic
            # # unchoking slot based on the f_ji/tao_j ratio proportions
            remaining_bw = int(self.up_bw - ((1.0/self.slots) * self.up_bw)) - 1
            # remaining_slots = self.slots - 1
            #
            # try:
            #     request_subset = requesters[:remaining_slots]
            # except:
            #     request_subset = requesters
            #
            # logging.info(request_subset)
            #
            # ratios = [self.f_ji[r]/self.tao[r] for r in request_subset]
            # logging.info(ratios)
            # normalizing_denominator = sum(ratios)
            #
            # # the bots requesting from us have recorded ratios
            # if normalizing_denominator:
            #     for requester, ratio in zip(request_subset, ratios):
            #         chosen.append(requester)
            #         bws.append(float(ratio/normalizing_denominator) * remaining_bw)
            # else:
            #     normalizing_denominator = sum([bw_requested[requester] for requester in request_subset])
            #     for requester in request_subset:
            #         chosen.append(requester)
            #         bws.append(float(bw_requested[requester] / normalizing_denominator) * remaining_bw)

            # allocate all available bandwidth in order of descending ROI
            # allocate less than tao if they have requested less than tao
            # give remaining bandwidth to whoever is next in line
            # remaining_bw = self.up_bw - 1 # had to make 1 less than total because it was giving an error otherwise
            j = 0
            while remaining_bw > 0 and j < len(requesters):
                to_allocate = min(self.tao[requesters[j]], bw_requested[requesters[j]], remaining_bw)

                # allocate some percentage of the remaining bandwidth

                # if to_allocate > remaining_bw:
                #     remaining_bw = 0
                if to_allocate > 0:
                    chosen.append(requesters[j])
                    bws.append(to_allocate)
                    remaining_bw -= to_allocate
                    j += 1

            # selective unchoking: find our bot to unchoke
            # constraints:
            # is the bot with the most pieces we actually still care about that is actually requesting from us
            peer_id_to_peer = {}
            for peer in peers:
                peer_id_to_peer[peer.id] = peer

            needed = lambda i: self.pieces[i] < self.conf.blocks_per_piece
            needed_pieces = filter(needed, range(len(self.pieces)))
            np_set = set(needed_pieces)

            peer_id_to_interested_set_len = {}
            for peer in peers:
                av_set = set(peer.available_pieces)
                isect = av_set.intersection(np_set)
                peer_id_to_interested_set_len[peer.id] = len(isect)

            unchoking_selection = None
            max_interested_set = float("-inf")
            chosen_set = set(chosen)
            for requester in requesters:
                if requester in chosen_set:
                    continue
                curr_val = peer_id_to_interested_set_len[requester]
                if curr_val > max_interested_set:
                    max_interested_set = curr_val
                    unchoking_selection = requester

            chosen.append(unchoking_selection)
            bws.append(floor(self.up_bw * (1.0 / self.slots)))

            # optimism = 0.25
            # bws = map(lambda a: int((1-optimism)*a), bws)
            # print len(requesters), len(chosen)
            # chosen.append(random.choice(requesters))
            # bws.append(int(optimism*self.up_bw))

        print(zip(chosen, bws))
        logging.info("Bandwidth available:")
        logging.info(self.up_bw)
        logging.info("Bandwidth used:")
        logging.info(sum(bws))
        # create actual uploads out of the list of peer ids and bandwidths
        uploads = [Upload(self.id, peer_id, bw)
                   for (peer_id, bw) in zip(chosen, bws)]

        return uploads
示例#18
0
    def uploads(self, requests, peers, history):
        """
        requests -- a list of the requests for this peer for this round
        peers -- available info about all the peers
        history -- history for all previous rounds

        returns: list of Upload objects.

        In each round, this will be called after requests().
        """
        uploads = []
        round = history.current_round()
        logging.debug("%s again.  It's round %d." % (self.id, round))
        # One could look at other stuff in the history too here.
        # For example, history.downloads[round-1] (if round != 0, of course)
        # has a list of Download objects for each Download to this peer in
        # the previous round.

        if len(requests) == 0:
            logging.debug("No one wants my pieces!")
            chosen = []
            bws = []
        else:
            # logging.debug("Still here: uploading to a random peer")
            # change my internal state for no reason
            self.dummy_state["cake"] = "pie"
            '''
            request = random.choice(requests)
            chosen = [request.requester_id]
            # Evenly "split" my upload bandwidth among the one chosen requester
            bws = even_split(self.up_bw, len(chosen))
            '''

            ratios = []

            if round == 0:
                download = []
                upload = []
                slots = 5
                # determining the ratio of peers
                for i in range(len(peers)):
                    upload.append(self.up_bw / (len(peers) - 1))
                    download.append(peers[i].up_bw / slots)
                    ratios.append((float(download[i] / upload[i]), peers.id))

            else:  # past the first round
                # download history of last round
                download_history = history.downloads[round - 1]
                history_d = {}
                # print(download_history)
                for download in download_history:
                    download_id = download.from_id
                    if download not in history_d.keys():
                        history_d[download_id] = download.blocks
                    else:
                        history_d[download_id] += download.blocks

                # upload history of last round
                upload_history = history.uploads[round - 1]
                history_u = {}

                for upload in upload_history:
                    upload_id = upload.from_id
                    if upload_id not in upload_history:
                        history_u[upload_id] = upload.bw
                    else:
                        history_u[upload_id] += upload.bw

                d = {}
                for peer in peers:
                    if peer.id not in history_d.keys():
                        last_download = len(peer.available_pieces) / round
                        # 5a of page 120 algo
                        if peer.id not in history_u.keys():
                            last_upload = self.up_bw / (len(peers) - 1)
                        else:
                            last_upload = history_u[peer.id] * (1 + 0.2)
                    else:
                        last_download = history_d[peer.id]
                        # r is 3 periods
                        rounds = [False, False, False]
                        if round >= 3:
                            for i in range(3):
                                hist_d = history.downloads[round - 1 - i]
                                for h in hist_d:
                                    if peer.id == h.from_id:
                                        rounds[i] = True

                        if peer.id not in history_u.keys():
                            last_upload = self.up_bw / (len(peers) - 1)
                        else:
                            last_upload = history_u[peer.id]

                        #  5c of page 120 algo
                        if (rounds[0] is True) and (rounds[1] is True) and (
                                rounds[2] is True):
                            last_upload *= 0.9

                    d[peer.id] = [last_download, last_upload]
            rank = []
            for peer in d.keys():
                #     print(peer)
                rank.append((float(d[peer][0] / d[peer][1]), d[peer][1], peer))

            rank.sort()
            rank.reverse()

            cap = self.up_bw
            # print("cap is {0}".format(cap))
            count = 0
            while cap > 0:
                # print("---")
                if count <= len(rank) - 1:
                    # print("----")
                    if cap - rank[count][1] > 1:
                        uploads.append(
                            Upload(self.id, rank[count][2], rank[count][1]))
                    cap -= rank[count][1]
                    count += 1
                else:
                    break

        # create actual uploads out of the list of peer ids and bandwidths
        '''
        uploads = [Upload(self.id, peer_id, bw)
                   for (peer_id, bw) in zip(chosen, bws)]
        '''
        # print(uploads)
        return uploads
示例#19
0
    def uploads(self, requests, peers, history):
        """
        requests -- a list of the requests for this peer for this round
        peers -- available info about all the peers
        history -- history for all previous rounds

        returns: list of Upload objects.

        In each round, this will be called after requests().
        """

        round = history.current_round()
        logging.debug("%s again.  It's round %d." % (self.id, round))
        # One could look at other stuff in the history too here.
        # For example, history.downloads[round-1] (if round != 0, of course)
        # has a list of Download objects for each Download to this peer in
        # the previous round.

        if len(requests) == 0:
            logging.debug("No one wants my pieces!")
            chosen = []
            bws = []
        else:
            logging.debug("Still here: uploading to a random peer")
            # change my internal state for no reason
            self.dummy_state["cake"] = "pie"
            if round == 0:
                chosen = [request.requester_id for request in requests][:4]
                bws = even_split(self.up_bw, len(chosen))
            else:
                pasthist = history.downloads[round - 1]
                ndict = {}
                for item in pasthist:
                    if item.to_id != self.id or 'Seed' in item.from_id:
                        continue
                    pid = item.from_id
                    if pid in ndict.keys():
                        ndict[pid] += item.blocks
                    else:
                        ndict[pid] = item.blocks
                requestids = [request.requester_id for request in requests]
                totaluploads = sum([ndict.get(id, 0) for id in requestids])
                #now actually find the proportionality
                try:
                    bws = [
                        self.up_bw * self.for_sharing *
                        (float(ndict.get(id, 0)) / float(totaluploads))
                        for id in requestids
                    ]
                    randind = random.randint(0, len(requestids) - 1)

                    to_share = min(self.up_bw - sum(bws),
                                   self.up_bw * self.for_sharing)
                    bws[randind] += to_share
                    logging.info((str(self.id) + " " + str(self.up_bw) + ": " +
                                  str(zip(requestids, bws))))
                    chosen = requestids
                except:
                    chosen = [request.requester_id for request in requests][:4]
                    bws = even_split(self.up_bw, len(chosen))
                #now add the optimistic

        # create actual uploads out of the list of peer ids and bandwidths
        uploads = [
            Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws)
        ]

        return uploads
示例#20
0
文件: a000std.py 项目: frw/CS136
    def uploads(self, requests, peers, history):
        """
        requests -- a list of the requests for this peer for this round
        peers -- available info about all the peers
        history -- history for all previous rounds

        returns: list of Upload objects.

        In each round, this will be called after requests().
        """

        round = history.current_round()
        logging.debug("%s again.  It's round %d." % (self.id, round))
        # One could look at other stuff in the history too here.
        # For example, history.downloads[round-1] (if round != 0, of course)
        # has a list of Download objects for each Download to this peer in
        # the previous round.

        # take care of updating the trackers for your friendliest peers
        self.two_round_ago = self.one_round_ago.copy()
        self.one_round_ago = dict()
        if round != 0:
            for d in history.downloads[round - 1]:
                # let's assume that we can just add blocks split among pieces
                # rather than average among pieces
                if d.from_id in self.one_round_ago.keys():
                    self.one_round_ago[d.from_id] += d.blocks
                else:
                    self.one_round_ago[d.from_id] = d.blocks

        logging.debug("Here are my peer histories from two round ago: %s" %
                      self.two_round_ago)
        logging.debug("and from one round ago: %s" % self.one_round_ago)

        # and now add up the last two rounds
        c = Counter(self.two_round_ago)
        c.update(self.one_round_ago)
        best_friends = c.most_common()

        logging.debug("and my best friends!: %s" % best_friends)

        if len(requests) == 0:
            logging.debug("No one wants my pieces!")
            chosen = []
            bws = []
        else:
            logging.debug("Still here: uploading to my favorite peers")
            # change my internal state for no reason
            # No! Bad! Keep the cake!
            # self.dummy_state["cake"] = "pie"

            # **Reciprocal unlocking**
            # let's assume that we only want to give to our
            # mostest bestest friends, even if they don't request from us
            # It promotes charity :)

            # Let's also do some handling to randomize if we have multiple best friends
            # of same bestiness
            # most of this is just ugly handling of cases where we don't have enough friends
            chosen = []
            # handle bestest friends either being clear best or also tied
            if len(best_friends) > 2:
                candidate_best_friends = [best_friends[2]]
                best_friend_counter = 3
                # handle best friends tied for bestinees
                while (best_friend_counter < len(best_friends)
                       and best_friends[best_friend_counter][1]
                       == best_friends[2][1]):
                    candidate_best_friends.append(
                        best_friends[best_friend_counter][0])
                    best_friend_counter += 1
                if best_friends[0][1] > best_friends[2][1]:
                    chosen.append(best_friends[0][0])
                else:
                    candidate_best_friends.append(best_friends[0][0])
                if best_friends[1][1] > best_friends[2][1]:
                    chosen.append(best_friends[1][0])
                else:
                    candidate_best_friends.append(best_friends[1][0])
            else:
                candidate_best_friends = []
                if len(best_friends) > 1:
                    chosen.append(best_friends[1][0])
                if len(best_friends) > 0:
                    chosen.append(best_friends[0][0])
            # finally, we can actually randomize
            random.shuffle(candidate_best_friends)
            for i in xrange(3 - len(chosen)):
                # let's assume we're okay leaving best friend slots empty
                if i < len(candidate_best_friends):
                    chosen.append(candidate_best_friends[i])

            # **Optimistic unlocking**
            # Again, let's assume that our optimistic doesn't necessarily
            # have to be in the requests
            # Let's also assume that we won't give to the optimistic
            # if they're already in our best friends--we can wait until they're not,
            # or a new optimistic is set
            if round % 3 == 0:
                self.optimistic = random.choice(peers).id
            if self.optimistic not in chosen:
                chosen.append(self.optimistic)

            logging.debug("And here are my chosen peers: %s", chosen)
            # request = random.choice(requests)
            # chosen = [request.requester_id]
            # Evenly "split" my upload bandwidth among the chosen requesters
            bws = even_split(self.up_bw, len(chosen))

        # create actual uploads out of the list of peer ids and bandwidths
        uploads = [
            Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws)
        ]

        return uploads
示例#21
0
    def uploads(self, incoming_requests, peers, history):
        """
        incoming_requests: list Request objects.
        peers: list of PeerInfo objects.
        history: AgentHistory object.
        returns: list of Upload objects.
        uploads will be called after requests
        """
        current_round = history.current_round()

        # Initializing data
        if current_round == 0:
            self.expected_peer_download_rate = {peer.id: 0 for peer in peers}
            self.rounds_unchoked_by_peer = {peer.id: 0 for peer in peers}
            self.estimated_min_upload_rate_to_peer = {
                peer.id: self.initial_min_upload_rate
                for peer in peers
            }
        else:
            last_round_download_history = history.downloads[current_round - 1]
            last_round_upload_history = history.uploads[current_round - 1]

            self.receiver_peer_id_set = set(
                map(lambda upload: upload.to_id, last_round_upload_history))
            self.giver_peer_id_set = set(
                map(lambda download: download.from_id,
                    last_round_download_history))

            # Adjusting upload rates
            for receiver_peer_id in self.receiver_peer_id_set:
                # If they are choking us
                if receiver_peer_id not in self.giver_peer_id_set:
                    # Increase min upload speed
                    self.estimated_min_upload_rate_to_peer[
                        receiver_peer_id] *= self.bandwith_increasing_factor
                # If they're unchoking us
                else:
                    # Increase count of unchoked rounds
                    self.rounds_unchoked_by_peer[receiver_peer_id] += 1
                    if self.rounds_unchoked_by_peer[
                            receiver_peer_id] >= self.confidence_unchoked_periods:
                        # Decrease min upload speed
                        self.estimated_min_upload_rate_to_peer[
                            receiver_peer_id] *= self.bandwith_decreasing_factor

            # Accumulating total downloads for each peer
            downloaded_from_peer = {
                peer_id: 0
                for peer_id in self.giver_peer_id_set
            }
            for download in last_round_download_history:
                downloaded_from_peer[download.from_id] += download.blocks

            # Observed download flow
            for peer_id, blocks in downloaded_from_peer.items():
                self.expected_peer_download_rate[peer_id] = blocks

            # Estimated download flow for a single peer
            for peer in peers:
                # These are the peers who didn't upload to us
                if peer.id not in self.giver_peer_id_set:
                    peer_pieces_now = len(peer.available_pieces)

                    # If the peer has the same pieces, we shouldn't be interested in uploading to them
                    interest_in_peer = len(
                        set(self.needed_pieces_list())
                        & set(peer.available_pieces))

                    if interest_in_peer > 0:
                        # Estimating their rate based on the game structure
                        self.expected_peer_download_rate[
                            peer.
                            id] = self.conf.blocks_per_piece * peer_pieces_now / current_round / self.assumed_peer_slots
                    else:
                        # We don't care about this peer
                        self.expected_peer_download_rate[peer.id] = 0

        sorted_requester_id_list = []
        used_bandwidths = []

        if len(incoming_requests) > 0:
            # We don't want duplicates
            requester_id_list = list(
                {r.requester_id
                 for r in incoming_requests})

            # Random order
            random.shuffle(requester_id_list)

            # Sorts from largest to smallest ratio
            sorted_requester_id_list = sorted(
                requester_id_list,
                key=lambda peer_id: self.peer_ratio(peer_id),
                reverse=True)

            # Using up the bandwith
            bandwidth_accumulator = 0
            for index, peer_id in enumerate(sorted_requester_id_list):
                bandwidth_accumulator += int(
                    self.estimated_min_upload_rate_to_peer[peer_id])
                if bandwidth_accumulator > self.up_bw:
                    # Dont include this one or the rest
                    sorted_requester_id_list = sorted_requester_id_list[:index]
                    break

            used_bandwidths = map(
                lambda pid: int(self.estimated_min_upload_rate_to_peer[pid]),
                sorted_requester_id_list)

        # Create actual uploads out of the list of peer ids and bandwidths
        uploads = [
            Upload(self.id, pid, bw)
            for pid, bw in zip(sorted_requester_id_list, used_bandwidths)
        ]
        return uploads
示例#22
0
    def uploads(self, requests, peers, history):
        """
        requests -- a list of the requests for this peer for this round
        peers -- available info about all the peers
        history -- history for all previous rounds

        returns: list of Upload objects.

        In each round, this will be called after requests().
        """

        round = history.current_round()
        logging.debug("%s again.  It's round %d." % (self.id, round))
        # One could look at other stuff in the history too here.
        # For example, history.downloads[round-1] (if round != 0, of course)
        # has a list of Download objects for each Download to this peer in
        # the previous round.

        # at the beginning, initialize all Taos to 1/4 of bandwidth
        if round == 0:
            for p in peers:
                self.tao[p.id] = self.up_bw * .26
                self.unchoked[
                    p.id] = -1  # -1 signifies they have never unchoked me
            return []

        # did I upload to them or download from them in the last turn
        uploaded_to = dict()
        last_turn = dict()
        for p in peers:
            last_turn[p.id] = False
            uploaded_to[p.id] = False

        for u in history.uploads[round - 1]:
            uploaded_to[u.to_id] = True

        # if I uploaded to them and they downloaded from me, set f_ji to the bandwidth they gave me
        # also track the number of turns they have unchoked me
        for download in history.downloads[round - 1]:
            if uploaded_to[download.from_id]:
                if last_turn[download.from_id]:
                    self.f_ji[download.from_id] += download.blocks
                else:
                    self.f_ji[download.from_id] = download.blocks
                    last_turn[download.from_id] = True
                    if self.unchoked[download.from_id] < 1:
                        self.unchoked[download.from_id] = 1
                    else:
                        self.unchoked[download.from_id] += 1

        # for each peer I unchoked last turn, fine tune tao occording to procedure in the textbook
        for p in peers:
            if uploaded_to[p.id]:
                if not last_turn[p.id]:
                    self.tao[p.id] *= (1 + self.alpha)
                    if self.unchoked[p.id] > 0:
                        self.unchoked[p.id] = 0
                elif self.unchoked[p.id] > self.r:
                    self.tao[p.id] *= (1 - self.gamma)

        if len(requests) == 0:
            logging.debug("No one wants my pieces!")
            chosen = []
            bws = []

        else:
            logging.debug("Still here: uploading best ROI peers")
            # change my internal state for no reason
            for p in peers:
                if self.unchoked[p.id] <= -1:
                    self.f_ji[p.id] = (float(
                        len(p.available_pieces) * self.conf.blocks_per_piece) /
                                       round) * .25

            # record how much each requester is requesting
            bw_requested = dict()
            blocks_per_piece = self.conf.blocks_per_piece
            for r in requests:
                if r.requester_id in bw_requested.keys():
                    bw_requested[r.requester_id] += (blocks_per_piece -
                                                     r.start)
                else:
                    bw_requested[r.requester_id] = (blocks_per_piece - r.start)

            requesters = bw_requested.keys()
            # sort requesters by ROI in descending order
            requesters.sort(reverse=True,
                            key=lambda r: self.f_ji[r] / self.tao[r])

            if self.id == "chandTyrant0":
                print requesters

            chosen = []
            bws = []

            # allocate all available bandwidth in order of descending ROI
            # allocate less than tao if they have requested less than tao
            # give remaining bandwidth to whoever is next in line
            remaining_bw = self.up_bw - 1  # had to make 1 less than total because it was giving an error otherwise
            j = 0
            while remaining_bw > 0 and j < len(requesters):
                to_allocate = min(self.tao[requesters[j]],
                                  bw_requested[requesters[j]], remaining_bw)
                # if to_allocate > remaining_bw:
                #     remaining_bw = 0
                if to_allocate > 0:
                    chosen.append(requesters[j])
                    bws.append(to_allocate)
                    remaining_bw -= to_allocate
                    j += 1

            # optimism = 0.25
            # bws = map(lambda a: int((1-optimism)*a), bws)
            # print len(requesters), len(chosen)
            # chosen.append(random.choice(requesters))
            # bws.append(int(optimism*self.up_bw))

        # create actual uploads out of the list of peer ids and bandwidths
        uploads = [
            Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws)
        ]

        return uploads
    def uploads(self, requests, peers, history):
        """
        requests -- a list of the requests for this peer for this round
        peers -- available info about all the peers
        history -- history for all previous rounds

        returns: list of Upload objects.

        In each round, this will be called after requests().
        """

        current_round = history.current_round()
        logging.debug("%s again.  It's round %d." % (self.id, current_round))

        if len(requests) == 0:
            logging.debug("No one wants my pieces!")
            chosen = []
            bws = []
        else:
            logging.debug("Still here: uploading to a random peer")

            n_rounds = 2
            prev_rounds = history.downloads[-n_rounds:]
            peer_blocks_downloaded = pd.DataFrame(
                data=np.zeros(len(peers)), index=[peer.id for peer in peers])
            for prev_round in prev_rounds:
                for download in prev_round:
                    peer_blocks_downloaded.loc[
                        download.from_id] = peer_blocks_downloaded.loc[
                            download.from_id] + download.blocks

            peers_with_pos_upload = peer_blocks_downloaded[
                peer_blocks_downloaded[0] > 0].index.values
            requesters = [request.requester_id for request in requests]

            requesters_with_pos_upload = [
                peer for peer in peers_with_pos_upload if peer in requesters
            ]
            filtered_peer_blocks_downloaded = peer_blocks_downloaded[
                peer_blocks_downloaded.index.isin(requesters_with_pos_upload)]
            filtered_peer_blocks_downloaded_percentage = filtered_peer_blocks_downloaded / filtered_peer_blocks_downloaded.sum(
            )

            # assign some bw to prop sharing, assign some bw to optimisitic unchoking
            optimistic_bw_percentage = .1
            propshare_bw = self.up_bw * (1 - optimistic_bw_percentage)
            propshare_peer_bw = (filtered_peer_blocks_downloaded_percentage *
                                 propshare_bw).round()
            rounded_propshare_bw = propshare_peer_bw.sum().values[0]

            # only assign some BW to optimistic unchoking
            optimistic_bw = self.up_bw - rounded_propshare_bw if rounded_propshare_bw > 0 else round(
                self.up_bw * optimistic_bw_percentage)

            # filtered candidates for optimistic unchocking
            other_requesters = list(
                set([
                    requester for requester in requesters
                    if requester not in requesters_with_pos_upload
                ]))

            # OPTIMISTIC UNCHOKING: unchoke randomly every 3 stages
            optimistic_rounds = 3
            if len(other_requesters) > 0:
                if self.state["optimistic_spot"] is not None:
                    if self.state[
                            "round"] % optimistic_rounds == 0 or self.state[
                                "optimistic_spot"] in requesters_with_pos_upload:
                        optimistic_spot = [random.choice(other_requesters)]
                    else:
                        optimistic_spot = [self.state["optimistic_spot"]]
                else:
                    optimistic_spot = [random.choice(other_requesters)]

                self.state["optimistic_spot"] = optimistic_spot[0]

            else:
                optimistic_spot = []
                self.state["optimistic_spot"] = None

            # create chosen array
            chosen = requesters_with_pos_upload + optimistic_spot

            # Evenly "split" my upload bandwidth among the one chosen requester
            if (len(chosen) > 0):
                bws = list(propshare_peer_bw.values.T[0]) + [optimistic_bw]
            else:
                bws = []

        # create actual uploads out of the list of peer ids and bandwidths
        uploads = [
            Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws)
        ]

        self.state["round"] += 1

        return uploads
示例#24
0
    def uploads(self, requests, peers, history):
        """
        requests -- a list of the requests for this peer for this round
        peers -- available info about all the peers
        history -- history for all previous rounds

        returns: list of Upload objects.

        In each round, this will be called after requests().
        """
        chosen = []
        bws = []

        if len(requests) != 0:
            round = history.current_round()
            last_downloads = history.downloads[
                round - self.state["len_history"]:round] if round != 0 else []
            request_ids = list(
                set([request.requester_id for request in requests]))

            # chosen_bw maps peer to proportion of upload bandwidth to receive
            chosen_bw = dict()

            # first, the proportional unchoking slots
            if last_downloads:
                # calculate total discounted downloads over last len_history rounds
                last_id_blocks = dict()
                for i, download in enumerate(reversed(last_downloads)):
                    discount = self.state["history_discount"]**i
                    for d in download:
                        if d.from_id in last_id_blocks:
                            last_id_blocks[d.from_id] += discount * d.blocks
                        else:
                            last_id_blocks[d.from_id] = discount * d.blocks

                # filter to only those who want to download
                last_id_blocks = {
                    peer: blocks
                    for peer, blocks in last_id_blocks.items()
                    if peer in request_ids
                }

                if last_id_blocks:
                    total_upload_bw = float(sum(last_id_blocks.values()))
                    chosen_bw_list = [
                        (from_id, (1 - self.state["frac_random_bw"]) *
                         (blocks / total_upload_bw))
                        for from_id, blocks in last_id_blocks.items()
                    ]

                    # combine if multiple downloads from same peer
                    for peer, bw in chosen_bw_list:
                        if peer in chosen_bw:
                            chosen_bw[peer] += bw
                        else:
                            chosen_bw[peer] = bw

            if not chosen_bw:
                # there are no previous round downloads to reference, or the previous uploaders
                # don't want to download, so allocate to a single peer randomly
                chosen_bw[random.choice(
                    request_ids)] = 1 - self.state["frac_random_bw"]

            # next, the random upload
            unchosen_requests = list(
                filter(lambda p: p not in chosen_bw.keys(), request_ids))

            if unchosen_requests:
                # get pieces needed
                needed = lambda i: self.pieces[i] < self.conf.blocks_per_piece
                needed_pieces = filter(needed, range(len(self.pieces)))
                np_set = set(needed_pieces)

                # prefer to choose randomly among the unchosen peers, weighting by how many pieces peer has that I want
                weighted_unchosen_requests = []
                for peer in peers:
                    if peer.id in unchosen_requests:
                        num_wanted_pieces = len(
                            np_set.intersection(set(peer.available_pieces)))
                        weighted_unchosen_requests += ([peer.id] *
                                                       (num_wanted_pieces + 1))
                chosen_bw[random.choice(
                    weighted_unchosen_requests)] = self.state["frac_random_bw"]
            else:
                # otherwise, choose randomly a peer to give more bandwidth
                chosen_bw[random.choice(
                    request_ids)] += self.state["frac_random_bw"]

            # total share of bandwidth should sum to 1
            tolerance = 0.0001
            assert abs(
                sum(chosen_bw.values()) -
                1) < tolerance, "total proportion of upload bandwidth is not 1"

            # split bw as calculated
            chosen = chosen_bw.keys()
            bws = [bw * self.up_bw for bw in chosen_bw.values()]

            # fix floating point imprecision
            if sum(bws) > self.up_bw:
                small_decrement = 0.0000001
                bws = list(map(lambda bw: max(0, bw - small_decrement), bws))

        # create actual uploads out of the list of peer ids and bandwidths
        uploads = [
            Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws)
        ]

        return uploads
示例#25
0
    def uploads(self, requests, peers, history):
        """
        requests -- a list of the requests for this peer for this round
        peers -- available info about all the peers
        history -- history for all previous rounds

        returns: list of Upload objects.

        In each round, this will be called after requests().
        """

        round = history.current_round()
        logging.debug("%s again.  It's round %d." % (self.id, round))
        # One could look at other stuff in the history too here.
        # For example, history.downloads[round-1] (if round != 0, of course)
        # has a list of Download objects for each Download to this peer in
        # the previous round.
        # update beliefs by aggregating upload/download speeds for the first few rounds
        if round > 0:
            if round < 5:
                self.update_beliefs(peers,
                                    history,
                                    update_download_sum=True,
                                    update_upload_sum=True,
                                    update_beliefs=False)
                for key, value in self.download_nums.iteritems():
                    self.download_beliefs[key] = value
            else:
                self.update_beliefs(peers, history)
        if len(requests) == 0:
            logging.debug("No one wants my pieces!")
            chosen = []
            bws = []
        else:
            logging.debug("Still here: uploading using brain cells")
            # if it has been fewer than r rounds, we allocate evenly among everyone
            if round < 5:
                logging.debug('even split')
                chosen = [request.requester_id for request in requests]
                bws = even_split(self.up_bw, len(chosen))
            # if it has been r or more rounds, we can use the algorithm
            else:
                cap = self.up_bw
                # sort requesters by calculating ratios of download to upload beliefs and sorting by decreasing
                ratios = dict()
                requesters = [request.requester_id for request in requests]
                for requester in requesters:
                    logging.debug('download_beliefs' +
                                  str(self.download_beliefs))
                    logging.debug(self.download_beliefs[requester])
                    logging.debug(self.upload_beliefs[requester])
                    ratios[requester] = self.download_beliefs[
                        requester] * 1.0 / self.upload_beliefs[requester]

                ratios_sorted = sorted(ratios.items(),
                                       key=lambda x: x[1],
                                       reverse=True)
                logging.debug('ratios')
                logging.debug(str(ratios_sorted))
                bandwidth_used = 0
                chosen, bws = [], []
                for pid, ratio in ratios_sorted:
                    if self.upload_beliefs[pid] + bandwidth_used > self.up_bw:
                        break
                    else:
                        bws.append(self.upload_beliefs[pid])
                        bandwidth_used += self.upload_beliefs[pid]
                        chosen.append(pid)
        # create actual uploads out of the list of peer ids and bandwidths
        uploads = [
            Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws)
        ]

        return uploads
示例#26
0
    def uploads(self, requests, peers, history):

        slots = 4
        round = history.current_round()


        if round >= 2:
            # get download histories of previous rounds
            dl_history1 = history.downloads[round-1]
            dl_history2 = history.downloads[round-2]
            dl_history = dict()

            # fill in dictionary with how many blocks each peer has contributed last 2 turns
            for down in dl_history1:
                source_id = down.from_id
                if source_id not in dl_history.keys():
                    dl_history[source_id] = down.blocks
                else:
                    dl_history[source_id] += down.blocks

            for down in dl_history2:
                source_id = down.from_id
                if source_id not in dl_history.keys():
                    dl_history[source_id] = down.blocks
                else:
                    dl_history[source_id] += down.blocks

        if len(requests) == 0:
            #logging.debug("No one wants my pieces!")
            chosen = []
            bws = []

        else:
            if round >= 2:
                # rank received requests by upload contribution
                all_requesters = []
                requesters_upload = []
                chosen = []

                # make list of all peers making requests
                for request in requests:
                    request_id = request.requester_id
                    if request_id not in all_requesters:
                        all_requesters.append(request_id)

                # make list of how much each peer requested
                for requester in all_requesters:
                    if requester not in dl_history.keys():
                        requesters_upload.append((0, requester))
                    else:
                        requesters_upload.append((dl_history[requester], requester))


                # sort from highest upload contribution to least, and take top 3 requesters
                # leave one slot open every third round for optimistic unchoking
                requesters_upload.sort(key = lambda x:x[0], reverse=True)
                if round%3 != 0:
                    chosen = [x[1] for x in requesters_upload[:slots-1]]
                else:
                    chosen = [x[1] for x in requesters_upload[:slots]]
                
                # get rid of chosen requests from request list
                new_requests = [x for x in requests if not x.requester_id in chosen]
                requests = new_requests                

                # optimistic unchoke every 3 turns
                if round%3 != 0:
                    if len(requests) > 0:
                        # optimistically unchoke random request
                        random_request = random.choice(requests)
                        chosen.append(random_request.requester_id)
                        requests.remove(random_request)

                # fill remaining spots with random requests
                while len(chosen) < slots and len(requests) > 0:
                    random_request = random.choice(requests)
                    chosen.append(random_request.requester_id)
                    requests.remove(random_request)

                
                    

            bws = even_split(self.up_bw, len(chosen))
        # create actual uploads out of the list of peer ids and bandwidths
        uploads = [Upload(self.id, peer_id, bw)
                   for (peer_id, bw) in zip(chosen, bws)]


        return uploads
示例#27
0
    def uploads(self, requests, peers, history):
        """
        requests -- a list of the requests for this peer for this round
        peers -- available info about all the peers
        history -- history for all previous rounds

        returns: list of Upload objects.

        In each round, this will be called after requests().
        """

        # If there are no requests, just terminate
        if len(requests) == 0:
            return []

        # Initialize a counter for keeping track of how much people let us download last round
        uploader_c = Counter()
        last = history.downloads[-1]

        # Get a set of all peers who requested pieces from us
        requester_ids = set(x.requester_id for x in requests)

        for download in last:
            if download.from_id in requester_ids:
                uploader_c.update({download.from_id: download.blocks})

        total = sum(uploader_c.values())

        id_and_bw = []
        # Variable used to add excess bandwidth from flooring
        not_used_float = 0
        # Calculate bandwidth distribution according strategy described in the pset
        for peer_id in uploader_c:
            bw = floor((uploader_c[peer_id] / total) * 0.9 * self.up_bw)
            not_used_float += (uploader_c[peer_id] /
                               total) * 0.9 * self.up_bw - bw
            id_and_bw.append([peer_id, bw])

        not_used_int = floor(not_used_float)

        chosen = set(x[0] for x in uploader_c)
        peer_ids = set(x.requester_id for x in requests).difference(chosen)

        # If there is peers to choose from, add one to optimistically unchoke
        if bool(peer_ids):
            id_and_bw.append(
                [random.choice(list(peer_ids)),
                 floor(self.up_bw * 0.1)])

        # If there is bw not used -> distribute it evenly across the peers we are unchoking
        id_and_bw = sorted(id_and_bw, key=itemgetter(1), reverse=True)
        index = 0
        while not_used_int > 0:
            id_and_bw[index % len(id_and_bw)][1] += 1
            index += 1
            not_used_int -= 1

        # Create uploads out of the list of peer ids and bandwidths
        uploads = [Upload(self.id, peer_id, bw) for (peer_id, bw) in id_and_bw]

        return uploads
示例#28
0
 def uploads(self, requests, peers, history):
     """
     requests -- a list of the requests for this peer for this round
     peers -- available info about all the peers
     history -- history for all previous rounds
     returns: list of Upload objects.
     In each round, this will be called after requests().
     """
     round = history.current_round()
     # parameters (taken from BitTyrant algorithm -- Chapter 5)
     alpha = 0.20
     gamma = 0.10
     r = 3
     bw_capacity = self.up_bw # initial bandwith per round
     # resetting our unchoked peers set everytime (since we only consider
     # last round)
     self.unchoked_peers = set()
     # get list of peers requesting pieces this round
     requesters = set() # set so we don't have duplicates
     for request in requests:
         requesters.add(request.requester_id)
     # final bandwiths and chosen peers to unchoke
     bws, chosen = [], []
     # round 0, initialization round
     if len(requests) == 0:
         chosen, bws = [], []
         # initialize our book-keeping parameters (to avoid KeyError later)
         for peer in peers:
             # arbitrary number of slots to begin, not super important here
             self.num_slots = int(math.sqrt(self.up_bw))
             # divide evenly across all slots, but really not so important
             self.estimate_download_rate[peer.id] = self.up_bw / float(self.num_slots)
             self.expected_threshold_rate[peer.id] = self.up_bw / float(self.num_slots)
             self.previously_unchoked_peers_counter[peer.id] = 0 # starting round
         # upload nothing, just for closure
         uploads = [Upload(self.id, peer_id, bw)
                for (peer_id, bw) in zip(chosen, bws)]
         return uploads # should always be []
     # round 1 or more
     else:
         # change my internal state for no reason
         self.dummy_state["cake"] = "pie"
         # first, we must update our book-keeping parameters before this
         # round (equivalent to end of last round)
         update_prev_unchoked_peers(self, round, history, alpha, gamma, r, peers)
         # excellent, now we can sort by decreasing ratio (f_ij / T_j)
         requesters_lst = list(requesters) # conversion for sorting
         random.shuffle(requesters_lst) # another symmetry breaking, always good before a big operation
         # decreasing order sorting, per the algorithm
         requesters_lst.sort(key=lambda i: float(self.estimate_download_rate[i] / self.expected_threshold_rate[i]), reverse=True)
         # loop over all our requests
         for requester in requesters_lst:
             # try to assign them a proportional bandwith based on Algorithm 5.2
             curr_bw = self.expected_threshold_rate[requester]
             # if we have enough bandwith left, unchoke them and reduce capacity left
             if curr_bw <= bw_capacity:
                 self.unchoked_peers.add(requester)
                 bw_capacity -= curr_bw
                 bws.append(curr_bw)
                 chosen.append(requester)
         # this is vacuously true, we just define the number of slots to be
         # the number of peers we could unchoke with this bandwith scheme and
         # our maximum bandwith
         self.num_slots = len(self.unchoked_peers)
         # upload everything and return
         uploads = [Upload(self.id, peer_id, bw)
                for (peer_id, bw) in zip(chosen, bws)]
     return uploads
 def uploads(self, requests, peers, history):
     """
     requests -- a list of the requests for this peer for this round
     peers -- available info about all the peers
     history -- history for all previous rounds
     returns: list of Upload objects.
     In each round, this will be called after requests().
     """
     round = history.current_round()
     # parameters (taken from BitTyrant algorithm -- Chapter 5)
     alpha = 0.20
     gamma = 0.10
     r = 3
     # we make an altruistic BitTyrant, so we do BitTyrant for 90% of our
     # bandwith and 10% for optimistic unchoknig (to fight a swarm of
     # onlt BitTyrant peers)
     bw_capacity = 0.90 * self.up_bw  # initial bandwith per round
     # resetting our unchoked peers set everytime (since we only consider
     # last round)
     self.unchoked_peers = set()
     # get list of peers requesting pieces this round, and which piece they
     # are requesting -- we don't want to give rarest pieces
     requesters = set()  # set so we don't have duplicates
     # we want to see the rarity of the pieces we are asked for, so that
     # we don't give rare pieces to keep the interest in us
     requested_pieces = []
     # to keep track of which requester wants which piece
     # format: {requester_id: piece_id}
     requester_pieces = dict()
     # fill everything
     for request in requests:
         requesters.add(request.requester_id)
         requested_pieces.append(request.piece_id)
         requester_pieces[request.requester_id] = request.piece_id
     # final bandwiths and chosen peers to unchoke
     bws, chosen = [], []
     # round 0, initialization round
     if len(requests) == 0:
         chosen, bws = [], []
         # initialize our book-keeping parameters (to avoid KeyError later)
         for peer in peers:
             # arbitrary number of slots to begin, not super important here
             self.num_slots = int(math.sqrt(self.up_bw))
             # divide evenly across all slots, but really not so important
             self.estimate_download_rate[peer.id] = self.up_bw / float(
                 self.num_slots)
             self.expected_threshold_rate[peer.id] = self.up_bw / float(
                 self.num_slots)
             self.previously_unchoked_peers_counter[
                 peer.id] = 0  # starting round
         # upload nothing, just for closure
         uploads = [
             Upload(self.id, peer_id, bw)
             for (peer_id, bw) in zip(chosen, bws)
         ]
         return uploads  # should always be []
     # round 1 or more
     else:
         # change my internal state for no reason
         self.dummy_state["cake"] = "pie"
         # first, we must update our book-keeping parameters before this
         # round (equivalent to end of last round)
         update_prev_unchoked_peers(self, round, history, alpha, gamma, r)
         # we use our function to count the pieces
         # first argument: needed_pieces, we set to all pieces requested so it
         # gives us the breakdown for all pieces that matter
         pieces_freq = count_pieces(requested_pieces, peers)
         # On Canvas we counted 27 groups with students in them. We won't
         # give the rarest ~10% pieces, so we only give the pieces if has more
         # than 3 people who own it.
         rare_pieces = set()  # pieces we won't give
         # check all the pieces rarer than our ~10% benchmark
         for piece_count, piece_id_lst in pieces_freq:
             if piece_count <= 3:
                 for piece_id in piece_id_lst:
                     rare_pieces.add(piece_id)
         # excellent, now we can sort by decreasing ratio (f_ij / T_j)
         requesters_lst = list(requesters)  # conversion for sorting
         random.shuffle(
             requesters_lst
         )  # another symmetry breaking, always good before a big operation
         # decreasing order sorting, per the algorithm
         requesters_lst.sort(
             key=lambda i: float(self.estimate_download_rate[i] / self.
                                 expected_threshold_rate[i]),
             reverse=True)
         # loop over all our requests
         for requester in requesters_lst:
             # this is the piece this requester wants
             piece_wanted = requester_pieces[requester]
             # if it is not top ~10% of rarest pieces (in a ~27 peers game)
             # then we give it, otherwise we keep it
             if piece_wanted not in rare_pieces:
                 # try to assign them a proportional bandwith based on Algorithm 5.2
                 curr_bw = self.expected_threshold_rate[requester]
                 # if we have enough bandwith left, unchoke them and reduce capacity left
                 if curr_bw <= bw_capacity:
                     self.unchoked_peers.add(requester)
                     bw_capacity -= curr_bw
                     bws.append(curr_bw)
                     chosen.append(requester)
         # now baby we add optimistic unchoking to combat a BitTyrant only
         # network --> 10% of our bandwith
         potential_peers = requesters.difference(self.unchoked_peers)
         if len(potential_peers) > 0:
             optimistic_peer = random.sample(potential_peers, 1)
             bws.append(0.10 * self.up_bw)
             chosen.append(optimistic_peer[0])
         # this is vacuously true, we just define the number of slots to be
         # the number of peers we could unchoke with this bandwith scheme and
         # our maximum bandwith
         self.num_slots = len(chosen)
         # upload everything and return
         uploads = [
             Upload(self.id, peer_id, bw)
             for (peer_id, bw) in zip(chosen, bws)
         ]
     return uploads
示例#30
0
    def uploads(self, requests, peers, history):
        """
        requests -- a list of the requests for this peer for this round
        peers -- available info about all the peers
        history -- history for all previous rounds

        returns: list of Upload objects.

        In each round, this will be called after requests().
        """

        round = history.current_round()

        # // bookkeeping for other peer's histories
        if round == 0:  # // could switch to check size of .peerHistory dictionary instead
            # // initialize the peer info
            for peer in peers:
                self.peerHistory[peer.id] = [set()]
                self.peerHistoryNum[peer.id] = [0]
                self.peerLastAvailable[peer.id] = set(peer.available_pieces)
                self.myDownloadsByPeer[peer.id] = []
                self.myDownloadBlocksByPeer[peer.id] = []
                self.unchokedPeers.add(peer.id)
        else:
            # // do updates for peer info
            for peer in peers:
                av_set = set(peer.available_pieces)
                new_pieces = av_set.difference(
                    av_set.intersection(self.peerLastAvailable[peer.id]))
                self.peerHistory[peer.id].append(new_pieces)
                self.peerHistoryNum[peer.id].append(len(new_pieces))

            # // update my download information
            # // initialize dictionary entries for this round
            for peer in peers:
                self.myDownloadsByPeer[peer.id].append([])
                self.myDownloadBlocksByPeer[peer.id].append(0)

                # // input info from downloads
            for download in history.downloads[history.last_round()]:
                assert (self.id == download.to_id)

                # // track the downloaded objects by each peer
                self.myDownloadsByPeer[download.from_id][round -
                                                         1].append(download)

                # // track the number of blocks downloaded by each peer
                self.myDownloadBlocksByPeer[download.from_id][
                    round - 1] += download.blocks

            # // estimate the download rates by filling self.peerDownloadRate
            for peer in peers:
                if self.myDownloadBlocksByPeer[peer.id][round - 1] > 0:
                    # // if currently unchoked, note the rate
                    self.peerDownloadRate[
                        peer.id] = self.myDownloadBlocksByPeer[peer.id][round -
                                                                        1]
                else:
                    # // otherwise, estimate split upload as the total download rate divided by the estimated slots
                    # // MAY NEED CASE WORK FOR ROUND 0
                    estimatedSlots = 4
                    avgDownload = sum(self.peerHistoryNum[peer.id]) / float(
                        len(self.peerHistoryNum[peer.id]))
                    self.peerDownloadRate[
                        peer.
                        id] = self.conf.blocks_per_piece * avgDownload / estimatedSlots

            # // estimate the upload rates by filling self.peerUploadRate
            if round <= 10:
                # // initialize the u_j for the first round
                self.peerUploadRate = self.peerDownloadRate
            else:
                for peer_id in self.unchokedPeers:
                    # // update the u_j
                    # // INCLUDE LOGIC FOR LAST R ROUNDS ???
                    i_r = min(round, self.R)
                    if all(self.myDownloadBlocksByPeer[peer_id][round - 1 -
                                                                i] > 0
                           for i in range(i_r)):
                        # // if unchoked, decrease estimated upload
                        self.peerUploadRate[peer_id] = self.peerUploadRate[
                            peer_id] * (1 - self.Gamma)
                    else:
                        # // else increase estimated u_j
                        self.peerUploadRate[peer_id] = self.peerUploadRate[
                            peer_id] * (1 + self.Alpha)

            # // compute scores for peers
            scores = {}
            for peer in peers:
                if self.peerUploadRate[peer.id] == 0:
                    scores[peer.id] = 0.
                else:
                    scores[peer.id] = float(self.peerDownloadRate[
                        peer.id]) / self.peerUploadRate[peer.id]
            sortedScores = sorted(scores.iteritems(),
                                  key=operator.itemgetter(1),
                                  reverse=True)
            sortedIDs = [x[0] for x in sortedScores]
        """ Commented out debugging statements DNY 2/10/2016
        logging.debug("%s again.  It's round %d." % (
            self.id, round))

        # One could look at other stuff in the history too here.
        # For example, history.downloads[round-1] (if round != 0, of course)
        # has a list of Download objects for each Download to this peer in
        # the previous round.
        """
        chosen = []
        bws = []
        if (len(requests) == 0 or round == 0):
            """ Commented out debugging statements DNY 2/10/2016
            logging.debug("No one wants my pieces!")
            """
            pass
        else:
            """ Commented out debugging statements DNY 2/10/2016
            logging.debug("Still here: uploading to a random peer")
            # change my internal state for no reason
            self.dummy_state["cake"] = "pie"
            """

            bw_available = self.up_bw
            """"""
            print
            print "start looking here"
            print "I am agent " + str(self.id)

            for ID in sortedIDs:
                """
                print "upload rate to " + str(ID)
                print self.peerUploadRate[ID]
                """
                bw_available -= int(self.peerUploadRate[ID])
                if bw_available <= 0:
                    break
                chosen.append(ID)
                bws.append(int(self.peerUploadRate[ID]))
            """
            print "here are my bws: " + str(bws)
            print "here is to whom: " + str(chosen)
            """
        # create actual uploads out of the list of peer ids and bandwidths
        uploads = [
            Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws)
        ]
        """ debugging prints 
        print
        print "Agent " + str(self.id) + " here!"
        print "I'm uploading " + str(len(uploads)) + " times"
        for upload in uploads:
            print "here's an upload:"
            print upload
        print 
        """
        return uploads