def uploads(self, requests, peers, history): """ requests -- a list of the requests for this peer for this round peers -- available info about all the peers history -- history for all previous rounds returns: list of Upload objects. In each round, this will be called after requests(). """ # No requests received -> just terminate if len(requests) == 0: return [] # Fin the amount of slots we want to allocate this round n_slots = min(self.slots, len(requests)) # Choose n peers completely at random from the set of peers that requested pieces # This again break symmetry and makes us somewhat strategy proof (more on that in the writeup) # Also, this makes the likelihood of being reciprocated very large # since we give away so much bw to a few peers chosen_requests = random.sample(requests, n_slots) chosen = [request.requester_id for request in chosen_requests] # Distribute the bw among the chosen peers bws = even_split(self.up_bw, len(chosen)) # Create the upload objects out of the list of peer ids and bandwidths uploads = [Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws)] return uploads
def uploads(self, requests, peers, history): if len(requests) == 0: return [] rec_from = defaultdict(lambda: 0) #how much we've received from peers in the last round for x in history.downloads[-1]: rec_from[x.from_id] += x.blocks requests = list(set([x.requester_id for x in requests])) total = sum([rec_from[x] for x in requests]) if total == 0: #nobody has uploaded to this agent yet if len(requests) <= 4: #if less than four requests, just fill them all chosen = requests[:] else: #just randomly take 4 chosen = random.sample(requests, 4) bws = even_split(self.up_bw, len(chosen)) random.shuffle(bws) uploads = [Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws)] else: #people have uploaded to this agent before zeros = [x for x in requests if rec_from[x] == 0] #those who did not give to this agent last round ones = [[int(rec_from[x]/float(total)*self.up_bw*0.9), x] for x in requests if x[0] > 0] #those who have given remain = self.up_bw - sum([x[0] for x in ones]) if remain > 0: #if there's leftover bandwidth if len(zeros): #give to someone who didn't give last time ones.append([remain, random.choice(zeros)]) else: #if everyone gave last time, give someone the extra bandwidth random.choice(ones)[0] += remain uploads = [Upload(self.id, peer_id, bw) for (bw, peer_id) in ones] return uploads
def uploads(self, requests, peers, history): """ requests -- a list of the requests for this peer for this round peers -- available info about all the peers history -- history for all previous rounds returns: list of Upload objects. In each round, this will be called after requests(). """ if len(requests) == 0: return [] n = min(len(requests), 3) chosen_requests = random.sample(requests, n) chosen = [request.requester_id for request in chosen_requests] # Evenly "split" my upload bandwidth among the one chosen requester bws = even_split(self.up_bw, len(chosen)) # create actual uploads out of the list of peer ids and bandwidths uploads = [ Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws) ] return uploads
def uploads(self, requests, peers, history): #count how much people have downloaded to me in the last two rounds download_counts = defaultdict(lambda: 0) for r in history.downloads[-2:]: for down in r: if down.to_id == self.id: download_counts[down.from_id] += down.blocks if len(requests) == 0: return [] #sort all the requests by how much they've given to me chosen = list(set([x.requester_id for x in requests])) chosen.sort(reverse=True, key=lambda x: (download_counts[x], random.random())) if len(chosen) > 3: if self.optimistic_id is None or self.optimistic_id not in chosen[ 3:] or self.optimistic_count >= 3: self.optimistic_id = random.choice(chosen[3:]) self.optimistic_count = 0 else: self.optimistic_count += 1 chosen = chosen[:3] + [self.optimistic_id] bws = even_split(self.up_bw, len(chosen)) random.shuffle(bws) uploads = [ Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws) ] return uploads
def uploads(self, requests, peers, history): """ requests -- a list of the requests for this peer for this round peers -- available info about all the peers history -- history for all previous rounds returns: list of Upload objects. In each round, this will be called after requests(). """ # No need to waste compute if there are no requests if len(requests) == 0: return [] # Initialize a counter to keep track of how much the agent downloaded from all other # peers the last two rounds uploader_c = Counter() two_last = history.downloads[-2:] requester_ids = set(x.requester_id for x in requests) # Iterate over all download objects that we recieved the previous two rounds for download in chain(*two_last): # Only count the downloads that are from a peer that actually requested a piece from us # requster_ids is a set so this operation is luckily O(1) if download.from_id in requester_ids: uploader_c.update({download.from_id: download.blocks}) # Choose the n peers who uploaded most to us as the ones we are going to reciprocate # Choose n to be the smaller of how many normal slots there are and how many requesters there are chosen = set(x[0] for x in uploader_c.most_common( min(len(requester_ids), self.normal_slots))) peer_ids = set(x.requester_id for x in requests).difference(chosen) # If there still are peers left to unchoke # Select a peer to optimistically unchoke if we either do not currently have unchoked anyone # or if 3 rounds have passed if bool(peer_ids) and (len(history.downloads) % self.optimistic_unchoke_interval == 0 or not self.optimistic_unchoke): unchoke = random.choice(list(peer_ids)) self.optimistic_unchoke = unchoke # Add the optimistically unchoked peer to the set if self.optimistic_unchoke: chosen.add(self.optimistic_unchoke) # No need to go any further if no peers were chosen if len(chosen) < 1: return [] # Distribute as evenly as possible the bw across the chosen peer(s) bws = even_split(self.up_bw, len(chosen)) # Create upload objects for the peer(s) with their respective allocated bandwidth(s) uploads = [ Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws) ] return uploads
def uploads(self, requests, peers, history): """ requests -- a list of the requests for this peer for this round peers -- available info about all the peers history -- history for all previous rounds returns: list of Upload objects. In each round, this will be called after requests(). """ S = 4 # total unchoke slots round = history.current_round() # look at past rounds to determine previously cooperative peers if round >= 2: last_round_dl = history.downloads[round-1] second_last_round_dl =history.downloads[round-2] # initialize array of chosen peers to unchoke chosen = [] # check if no peers need to be unchoked if len(requests) == 0: bws = [] else: # Step 1: Pick S-1 most cooperative peers with requests to unchoke # count total downloaded blocks from each peer in last 2 rounds downloads_per_peer = {peer.id:0 for peer in peers} for dl in last_round_dl: downloads_per_peer[dl.from_id] += dl.blocks for dl in second_last_round_dl: downloads_per_peer[dl.from_id] += dl.blocks # sort peers with requests by amount downloaded from them requester_ids = set([r.requester_id for r in requests]) cooperative_peers = sorted(requester_ids, key=lambda x:downloads_per_peer[x]) # choose S-1 most cooperative peers that have requests to unchoke chosen = cooperative_peers[:S-1] # Step 2: optimistically unchoke 1 peer every 3 rounds, store choice in class state if round % 3 == 0 or not self.optimistic_id: unchosen_requesters = set(requester_ids) - set(chosen) if len(unchosen_requesters) > 0: self.optimistic_id = random.choice(tuple(unchosen_requesters)) chosen.append(self.optimistic_id) else: chosen.append(self.optimistic_id) # Evenly "split" upload bandwidth among the chosen requesters bws = even_split(self.up_bw, len(chosen)) # create actual uploads out of the list of peer ids and bandwidths uploads = [Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws)] return uploads
def uploads(self, requests, peers, history): max_upload = 4 # max num of peers to upload to at a time requester_ids = list(set(map(lambda r: r.requester_id, requests))) n = min(max_upload, len(requester_ids)) if n == 0: return [] bws = even_split(self.up_bw, n) uploads = [Upload(self.id, p_id, bw) for (p_id, bw) in zip(random.sample(requester_ids, n), bws)] return uploads
def uploads(self, requests, peers, history): """ requests -- a list of the requests for this peer for this round peers -- available info about all the peers history -- history for all previous rounds returns: list of Upload objects. In each round, this will be called after requests(). """ round = history.current_round() logging.debug("%s again. It's round %d." % (self.id, round)) # One could look at other stuff in the history too here. # For example, history.downloads[round-1] (if round != 0, of course) # has a list of Download objects for each Download to this peer in # the previous round. if len(requests) == 0: logging.debug("No one wants my pieces!") chosen = [] bws = [] else: # chosen = [] download_bandwidth = {} for peer in peers: download_bandwidth[peer.id] = 0 for downloads in history.downloads: for d in downloads: download_bandwidth[d.from_id] += d.blocks # pick top 3 as unchoked peers chosen = sorted(download_bandwidth, key=download_bandwidth.get, reverse=True)[:3] # each 3 rounds, randomly unchoke another peer if round % 3 == 0: chosen.append(random.choice(peers).id) # logging.debug("Still here: uploading to a random peer") # change my internal state for no reason # self.dummy_state["cake"] = "pie" # request = random.choice(requests) # chosen = [request.requester_id] # Evenly "split" my upload bandwidth among the one chosen requester bws = even_split(self.up_bw, len(chosen)) # create actual uploads out of the list of peer ids and bandwidths uploads = [ Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws) ] return uploads
def uploads(self, requests, peers, history): """ requests -- a list of the requests for this peer for this round peers -- available info about all the peers history -- history for all previous rounds returns: list of Upload objects. In each round, this will be called after requests(). """ ############################################################################## # The code and suggestions here will get you started for the standard client # # You'll need to change things for the other clients # ############################################################################## round = history.current_round() logging.debug("%s again. It's round %d." % ( self.id, round)) # One could look at other stuff in the history too here. # For example, history.downloads[round-1] (if round != 0, of course) # has a list of Download objects for each Download to this peer in # the previous round. if len(requests) == 0: logging.debug("No one wants my pieces!") chosen = [] bws = [] else: logging.debug("Still here: uploading to a random peer") ######################################################################## # The dummy client picks a single peer at random to unchoke. # # You should decide a set of peers to unchoke accoring to the protocol # ######################################################################## request = random.choice(requests) chosen = [request.requester_id] # Now that we have chosen who to unchoke, the standard client evenly shares # its bandwidth among them bws = even_split(self.up_bw, len(chosen)) # create actual uploads out of the list of peer ids and bandwidths # You don't need to change this uploads = [Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws)] return uploads
def uploads(self, requests, peers, history): """ requests -- a list of the requests for this peer for this round peers -- available info about all the peers history -- history for all previous rounds returns: list of Upload objects. In each round, this will be called after requests(). """ round = history.current_round() #logging.debug("%s again. It's round %d." % ( # self.id, round)) # One could look at other stuff in the history too here. # For example, history.downloads[round-1] (if round != 0, of course) # has a list of Download objects for each Download to this peer in # the previous round. if len(requests) == 0: #logging.debug("No one wants my pieces!") chosen = [] bws = [] else: #logging.debug("Still here: uploading to a random peer") # change my internal state for no reason self.dummy_state["cake"] = "pie" request = random.choice(requests) chosen = [request.requester_id] # Evenly "split" my upload bandwidth among the one chosen requester bws = even_split(self.up_bw, len(chosen)) # create actual uploads out of the list of peer ids and bandwidths uploads = [ Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws) ] return uploads
def uploads(self, requests, peers, history): """ requests -- a list of the requests for this peer for this round peers -- available info about all the peers history -- history for all previous rounds returns: list of Upload objects. In each round, this will be called after requests(). """ round = history.current_round() logging.debug("%s again. It's round %d." % ( self.id, round)) # One could look at other stuff in the history too here. # For example, history.downloads[round-1] (if round != 0, of course) # has a list of Download objects for each Download to this peer in # the previous round. if len(requests) == 0: logging.debug("No one wants my pieces!") chosen = [] bws = [] else: logging.debug("Still here: uploading to a random peer") # change my internal state for no reason self.dummy_state["cake"] = "pie" request = random.choice(requests) chosen = [request.requester_id] # Evenly "split" my upload bandwidth among the one chosen requester bws = even_split(self.up_bw, len(chosen)) # create actual uploads out of the list of peer ids and bandwidths uploads = [Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws)] return uploads
def uploads(self, requests, peers, history): """ requests -- a list of the requests for this peer for this round peers -- available info about all the peers history -- history for all previous rounds returns: list of Upload objects. In each round, this will be called after requests(). """ round = history.current_round() """ debugging statements print print "my requests:" print(requests) print print "my peers:" print(peers) print print "my history:" print(history) """ if len(requests) == 0: chosen = [] bws = [] else: # // Score each requester requesters = [] scores = {} for request in requests: # // 'requesters' is a list of requester_id's requesters.append(request.requester_id) requesters = list(set(requesters)) numRequesters = len(requesters) if (numRequesters > 4 and round > 0): # // loop through requester and sum downloaded blocks from previous two rounds # dictionary of zero counts scores = dict(zip(requesters, [0]*numRequesters)) if round > 1: for i in [round-1,round-2]: for dl in history.downloads[i]: if dl.from_id in requesters: scores[dl.from_id] += dl.blocks else: for dl in history.downloads[round-1]: if dl.from_id in requesters: scores[dl.from_id] += dl.blocks # // get top three requesters sortedScores = sorted(scores.iteritems(), key=operator.itemgetter(1), reverse=True) chosen = [x[0] for x in sortedScores[:3]] # // if round % 3 == 0, optimistically unchoke someone (self.optimisticID) if (round % 3) == 0: self.optimisticID = random.choice(sortedScores[3:]) # // add uptimistic unchoked peer to chosen chosen.append(self.optimisticID) # // create upload objects, probably using even split bws = even_split(self.up_bw, len(chosen)) else: chosen = requesters bws = even_split(self.up_bw, numRequesters) # create actual uploads out of the list of peer ids and bandwidths uploads = [Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws)] return uploads
def uploads(self, requests, peers, history): """ requests -- a list of the requests for this peer for this round peers -- available info about all the peers history -- history for all previous rounds returns: list of Upload objects. In each round, this will be called after requests(). """ round = history.current_round() logging.debug("%s again. It's round %d." % ( self.id, round)) # One could look at other stuff in the history too here. # For example, history.downloads[round-1] (if round != 0, of course) # has a list of Download objects for each Download to this peer in # the previous round. # take care of updating the trackers for your friendliest peers self.two_round_ago = self.one_round_ago.copy() self.one_round_ago = dict() if round != 0: for d in history.downloads[round-1]: # let's assume that we can just add blocks split among pieces # rather than average among pieces if d.from_id in self.one_round_ago.keys(): self.one_round_ago[d.from_id] += d.blocks else: self.one_round_ago[d.from_id] = d.blocks logging.debug("Here are my peer histories from two round ago: %s" % self.two_round_ago) logging.debug("and from one round ago: %s" % self.one_round_ago) # and now add up the last two rounds c = Counter(self.two_round_ago) c.update(self.one_round_ago) best_friends = c.most_common() logging.debug("and my best friends!: %s" % best_friends) if len(requests) == 0: logging.debug("No one wants my pieces!") chosen = [] bws = [] else: logging.debug("Still here: uploading to my favorite peers") # change my internal state for no reason # No! Bad! Keep the cake! # self.dummy_state["cake"] = "pie" # **Reciprocal unlocking** # let's assume that we only want to give to our # mostest bestest friends, even if they don't request from us # It promotes charity :) # Let's also do some handling to randomize if we have multiple best friends # of same bestiness # most of this is just ugly handling of cases where we don't have enough friends chosen = [] # handle bestest friends either being clear best or also tied if len(best_friends) > 2: candidate_best_friends = [best_friends[2]] best_friend_counter = 3 # handle best friends tied for bestinees while(best_friend_counter < len(best_friends) and best_friends[best_friend_counter][1] == best_friends[2][1]): candidate_best_friends.append(best_friends[best_friend_counter][0]) best_friend_counter += 1 if best_friends[0][1] > best_friends[2][1]: chosen.append(best_friends[0][0]) else: candidate_best_friends.append(best_friends[0][0]) if best_friends[1][1] > best_friends[2][1]: chosen.append(best_friends[1][0]) else: candidate_best_friends.append(best_friends[1][0]) else: candidate_best_friends = [] if len(best_friends) > 1: chosen.append(best_friends[1][0]) if len(best_friends) > 0: chosen.append(best_friends[0][0]) # finally, we can actually randomize random.shuffle(candidate_best_friends) for i in xrange(3 - len(chosen)): # let's assume we're okay leaving best friend slots empty if i < len(candidate_best_friends): chosen.append(candidate_best_friends[i]) # **Optimistic unlocking** # Again, let's assume that our optimistic doesn't necessarily # have to be in the requests # Let's also assume that we won't give to the optimistic # if they're already in our best friends--we can wait until they're not, # or a new optimistic is set if round % 3 == 0: self.optimistic = random.choice(peers).id if self.optimistic not in chosen: chosen.append(self.optimistic) logging.debug("And here are my chosen peers: %s", chosen) # request = random.choice(requests) # chosen = [request.requester_id] # Evenly "split" my upload bandwidth among the chosen requesters bws = even_split(self.up_bw, len(chosen)) # create actual uploads out of the list of peer ids and bandwidths uploads = [Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws)] return uploads
def uploads(self, requests, peers, history): """ requests -- a list of the requests for this peer for this round peers -- available info about all the peers history -- history for all previous rounds returns: list of Upload objects. In each round, this will be called after requests(). """ curr_round = history.current_round() logging.debug("%s again. It's round %d." % (self.id, curr_round)) # One could look at other stuff in the history too here. # For example, history.downloads[round-1] (if round != 0, of course) # has a list of Download objects for each Download to this peer in # the previous round. past_downloads = {} if curr_round > 0: past_downloads = history.downloads[curr_round - 1] if curr_round > 1: temp_downloads = history.downloads[curr_round - 2] for i in temp_downloads: temp = False for j in past_downloads: if i.from_id == j.from_id: j.blocks = (j.blocks + i.blocks) / 2.0 temp = True if temp == False: past_downloads.append(i) # past downlaods is all past downloads from last two rounds, but averaged # if got something from agent in both rounds # initialize a dictionary with past download objects # get the total spead from each peer given over different files download_dict = {} for download in past_downloads: if download.from_id in download_dict: download_dict[download.from_id] += download.blocks else: download_dict[download.from_id] = download.blocks if len(requests) == 0: logging.debug("No one wants my pieces!") chosen = [] bws = [] else: logging.debug("Still here: uploading to peers") # change my internal state for no reason # list of who we will upload to chosen = [] dl_dict_items = download_dict.items() dl_dict_items.sort(key=lambda x: x[1], reverse=True) for tup in dl_dict_items: num = tup[0] speed = tup[1] double_count = False for request in requests: if num == request.requester_id and double_count == False and len( chosen) < self.num_slots: chosen.append(num) double_count = True # print 'CHOSEN' # print chosen # optimistic unchoking random_request = random.choice(requests) # boolean variable for if we have chosen all the requests already, # no need for unchoking in that case. unchoke = False for req in requests: if req.requester_id not in chosen: unchoke = True while random_request.requester_id in chosen and unchoke == True: random_request = random.choice(requests) if unchoke == True: chosen.append(random_request.requester_id) # Evenly "split" my upload bandwidth among the one chosen requesters bws = even_split(self.up_bw, len(chosen)) print(chosen) print(bws) # create actual uploads out of the list of peer ids and bandwidths uploads = [ Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws) ] return uploads
def uploads(self, requests, peers, history): ################################################################## ########### updating d_js ######################################## ################################################################## alpha = 0.2 gamma = 0.1 round = history.current_round() self.bandwidthHistory.append(self.up_bw) if round == 0: bw_list = even_split(self.up_bw,len(peers)) for peer,i in zip(peers,range(len(peers))): self.downloadRate[peer.id] = (self.conf.max_up_bw - self.conf.min_up_bw)/(4*2) if bw_list[i] == 0: self.uploadRate[peer.id] = 0.5 else: self.uploadRate[peer.id] = bw_list[i] self.slots[peer.id] = 4 self.downloadUploadRatio[peer.id] = 1 else: for peer in peers: self.downloadRate[peer.id] = 0 for download in history.downloads[round-1]: if peer.id == download.from_id: self.downloadRate[peer.id] += download.blocks if download.blocks == 0: print "!!!!!! %s uploaded %s block(s)" % (peer.id, download.blocks) self.slots[peer.id] = mean(self.bandwidthHistory)/float(self.downloadRate[peer.id]) # Find how to find out max and min bw or infer from personal history if round >= 3: peer_download = 0 for download2 in history.downloads[round-2]: if peer.id == download2.from_id: for download3 in history.downloads[round-3]: if peer.id == download3.from_id: peer_download += 1 if peer_download > 0: self.uploadRate[peer.id] *= 1 - gamma break if len(peer.available_pieces) > 0: av_pieces = float(len(peer.available_pieces)) rnd = float(round) slots = float(self.slots[peer.id]) self.downloadRate[peer.id] = av_pieces/(rnd * self.conf.blocks_per_piece * slots) self.uploadRate[peer.id] *= 1 + alpha self.downloadUploadRatio[peer.id] = self.downloadRate[peer.id]/self.uploadRate[peer.id] if len(requests) == 0: logging.debug("No one wants my pieces!") chosen = [] bws = [] uploads = [] else: logging.debug("Still here: uploading to a random peer") # change my internal state for no reason self.dummy_state["cake"] = "pie" ################################################################## ########### Building upload list ################################# ################################################################## sumUpload = 0 chosen = {} downloadUploadRatio_tmp = {} # creating list with ratios for only peers in requests for request in requests: downloadUploadRatio_tmp[request.requester_id] = self.downloadUploadRatio[request.requester_id] while (sumUpload <= self.up_bw and len(downloadUploadRatio_tmp) > 0): peer_id = max(downloadUploadRatio_tmp, key = downloadUploadRatio_tmp.get) chosen[peer_id] = downloadUploadRatio_tmp.pop(peer_id) sumUpload += self.uploadRate[peer_id] """ Calculate the total proportional BW allocated to other peers """ totalUploadBW = 0 for choice in chosen: totalUploadBW += chosen[choice] """ Make each BW as a proportion of totalUploadBW """ if float(totalUploadBW) == 0: uploads = [] else: for choice in chosen: chosen[choice] = 100 * float(chosen[choice]) / float(totalUploadBW) """ Now need to divide our BW as integers according to chosen vector """ peerWeights = [value for (key, value) in sorted(chosen.items())] peerNames = sorted(chosen) bws = proportional_split(self.up_bw, peerWeights) # create actual uploads out of the list of peer ids and bandwidths uploads = [Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws)] return uploads
def uploads(self, requests, peers, history): """ ** must add up to no more than the peer's bandwidth cap ** requests -- a list of the requests for this peer for this round peers -- available info about all the peers history -- history for all previous rounds returns: list of Upload objects. In each round, this will be called after requests(). """ #todo: check we don't upload more than self.up_bw? rnd = history.current_round() logging.debug("%s again. It's round %d." % (self.id, rnd)) # One could look at other stuff in the history too here. # For example, history.downloads[round-1] (if round != 0, of course) # has a list of Download objects for each Download to this peer in # the previous round. bws = [] # get requester ids rids_set = set(map(lambda i: i.requester_id, requests)) chosen_list_ids = [] res = [] logging.debug('RIDS %s' % (rids_set)) if len(requests) == 0: logging.debug("No one wants my pieces!") chosen = [] bws = [] else: CUTOFF = 4 # reciprocation # definitely upload to those who we have recently downloaded from - use similar logic for PropShare client also helpers = [] empty_helpers = [] for d in history.downloads[rnd - 1]: if d.to_id == self.id and d.from_id in rids_set: helpers.append(d) # sort by highest value providers in preceding round helpers.sort(key=lambda i: i.blocks) helper_ids = list(map(lambda k: k.from_id, helpers)) # lazy_nodes = list(filter(lambda x: x not in tmp, tmp)) # if peers provided 0 value at beginning, append them if len(helper_ids) < CUTOFF: lazy_nodes = [] for lazy_req in requests: if lazy_req.requester_id not in helper_ids: lazy_nodes.append(lazy_req) amount_to_add = min(CUTOFF - len(helper_ids), len(lazy_nodes)) for x in range(amount_to_add): empty_helpers.append(lazy_nodes[x]) empty_ids = list(map(lambda l: l.requester_id, empty_helpers)) # set complements chosen_list_ids = helper_ids + empty_ids chosen_set_ids = set(chosen_list_ids) not_chosen_ids = rids_set - chosen_set_ids # todo: kick them out after 3 rounds if not helping # optimistic unchoking if rnd % 3 == 0 and len(not_chosen_ids) != 0: request_id = random.choice(list(not_chosen_ids)) res = chosen_list_ids[:CUTOFF - 1] + [request_id] self.dummy_state['unchoked_id'] = request_id else: try: res = chosen_list_ids[:CUTOFF - 1] + [ self.dummy_state['unchoked_id'] ] except: res = chosen_list_ids[:CUTOFF] # todo: ? add something to internal state to remember who we # optimisticly unchoked so we can stop uploading to them after 3 rounds? logging.debug("Still here: uploading to a random peer") # Evenly "split" my upload bandwidth among the one chosen requester # only call this when chosen isn't empty? if len(res) != 0: bws = even_split(self.up_bw, len(res)) else: return {} # change my internal state for no reason self.dummy_state["cake"] = "pie" #todo: decide how many requests to actually fill #(i.e. how many slots each peer should have - maybe some function of self.up_bw) # create actual uploads out of the list of peer ids and bandwidths uploads = [ Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(res, bws) ] return uploads
def uploads(self, requests, peers, history): """ requests -- a list of the requests for this peer for this round peers -- available info about all the peers history -- history for all previous rounds returns: list of Upload objects. In each round, this will be called after requests(). """ ### CAPPED Version of Tyrant because we have Max Bandwidth ## # initially assume 4 upload spots, with same bw_cap init_spots, bw_cap, = 4, self.up_bw initializer = float(even_split(bw_cap, init_spots)[0]) round = history.current_round() logging.debug("%s again. It's round %d." % ( self.id, round)) # used for calculating the download rate for peer in peers: if round == 0: # initialize dictionaries self.state["blocks_downloaded_last_round"][peer.id] = 0 self.state["conseq_rounds_unchoked_by"][peer.id] = 0 # we are estimating the other peers have the same number # of upload spots and bw in the first round self.state["exp_down_bw"][peer.id] = initializer self.state["exp_min_up_bw"][peer.id] = initializer else: piece_delta = len(peer.available_pieces) - self.state["previously_available_pieces"][peer.id] self.state["blocks_downloaded_last_round"][peer.id] = piece_delta * self.conf.blocks_per_piece # update previously available pieces at "end of round" - this will not be adjusted after this self.state["previously_available_pieces"][peer.id] = len(peer.available_pieces) if len(requests) == 0: logging.debug("No one wants my pieces!") chosen = [] bws = [] else: if round != 0: prev_round_downloads = history.downloads[-1:] prev_round_unchokers = set([]) # compute how many blocks you have downloaded form each peer j last round blocks_downloaded_last_round = pd.DataFrame(data=np.zeros(len(peers)), index=[peer.id for peer in peers]) for round in prev_round_downloads: for download in round: blocks_downloaded_last_round.loc[download.from_id] += download.blocks prev_round_unchokers.add(download.from_id) # if currently unchoked, expected download rate is the number of blocks you downloaded for prev_round_unchoker in prev_round_unchokers: self.state["exp_down_bw"][prev_round_unchoker] = blocks_downloaded_last_round.loc[prev_round_unchoker].values[0] self.state["conseq_rounds_unchoked_by"][prev_round_unchoker] += 1 # if you have been unchoked for more than 2 rounds, decrease your expected required minimum upload rate for reciprocation if self.state["conseq_rounds_unchoked_by"][prev_round_unchoker] > 2: u_j = self.state["exp_min_up_bw"][prev_round_unchoker] gamma = .1 sigma = .5 self.state["exp_min_up_bw"][prev_round_unchoker] = (1 - gamma - sigma**history.current_round()) * u_j # those not unchoking us prev_round_chokers = [x for x in [peer.id for peer in peers] if x not in prev_round_unchokers] for prev_round_choker in prev_round_chokers: # expect flow to be download_in/4 self.state["exp_down_bw"][prev_round_choker] = self.state["blocks_downloaded_last_round"][prev_round_choker] / float(init_spots) # increase T_j for those who are choking us alpha = .2 sigma = .5 self.state["exp_min_up_bw"][prev_round_choker] = (1 + alpha + sigma**history.current_round()) * self.state["exp_min_up_bw"][prev_round_choker] self.state["conseq_rounds_unchoked_by"][prev_round_choker] = 0 # ratios ratios = dict() f_ji, t_j = self.state["exp_down_bw"], self.state["exp_min_up_bw"] for peer in peers: # if they have been doing no downloading, do not share if t_j[peer.id] == 0: ratios[peer.id] = 0.0 # otherwise, look at the return on investment else: ratios[peer.id] = float(f_ji[peer.id])/float(t_j[peer.id]) sorted_ratios = sorted(ratios, key=ratios.get, reverse=True) # find list of random chokers requesters = [request.requester_id for request in requests] requester_chokers = set(requesters).intersection(set(prev_round_chokers)) # optimism bandwidth if history.current_round() < 15 and len(requester_chokers) > 0: beta = .25 optimism_bw = math.floor(bw_cap * beta) reciprocation_bw_cap = bw_cap - optimism_bw # randomly chose a choker to unchoke random_requester_choker = random.choice(list(requester_chokers)) # allocate exploration bw chosen, bws = [random_requester_choker], [optimism_bw] else: reciprocation_bw_cap = bw_cap chosen, bws = [], [] random_requester_choker = None # iterate requesters in order or sorted ratios requesters = [request.requester_id for request in requests] total_t_j = 0.0 for peer in sorted_ratios: if peer in requesters and peer != random_requester_choker: # round up -> going below expected min threshold will waste BW peer_exp_min_up_bw = math.ceil(self.state["exp_min_up_bw"][peer]) # check to see we don't exceed the upload capacity if peer_exp_min_up_bw + total_t_j < reciprocation_bw_cap: chosen.append(peer) bws.append(peer_exp_min_up_bw) # increment total_t_j += peer_exp_min_up_bw # randomly assign any extra bandwidth left_over_bw = reciprocation_bw_cap - total_t_j requesters_not_allocated_bw = [requester for requester in requesters if requester not in chosen] if len(requesters_not_allocated_bw) > 0: left_over_random_choice = random.choice(requesters_not_allocated_bw) chosen.append(left_over_random_choice) bws.append(left_over_bw) # create actual uploads out of the list of peer ids and bandwidths uploads = [Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws)] return uploads
def uploads(self, requests, peers, history): """ requests -- a list of the requests for this peer for this round peers -- available info about all the peers history -- history for all previous rounds returns: list of Upload objects. In each round, this will be called after requests(). """ round = history.current_round() logging.debug("%s again. It's round %d." % (self.id, round)) # One could look at other stuff in the history too here. # For example, history.downloads[round-1] (if round != 0, of course) # has a list of Download objects for each Download to this peer in # the previous round. peer_contribution = [] if round >= 2: for hist in history.downloads[round - 1]: from_you = hist.from_id num_blocks = hist.blocks if hist.to_id == self.id and "Seed" not in from_you: peer_contribution.append([from_you, num_blocks]) logging.debug("total peer contributions from lsat round %s" % peer_contribution) if len(requests) == 0: logging.debug("No one wants my pieces!") chosen = [] bws = [] else: logging.debug("Still here: uploading to the best peer") # change my internal state for no reason self.dummy_state["cake"] = "pie" ###### history based on number of files they give you that you need n = min(len(requests), 3) contr_lst = [i for [i, j] in peer_contribution] if n == 0: chosen = [] else: chosen = contr_lst[:n - 1] request_remaining = [i for i in contr_lst if i not in chosen] if len(contr_lst) == 0 or len(request_remaining) == 0: requested = random.choice(requests) chosen.append(requested.requester_id) else: requested = random.choice(request_remaining) chosen.append(requested) logging.debug("who is in chosen %s" % (chosen)) logging.debug("who got in chosen because of request %s" % (requested)) # Evenly "split" my upload bandwidth among the one chosen requester total_contr = sum(j for [i, j] in peer_contribution if i in chosen) bws = [ math.floor(float(j) / float(total_contr) * self.up_bw * 0.9) for [i, j] in peer_contribution if i in chosen ] bws.append(0) if len(bws) <= 1: bws = even_split(self.up_bw, len(chosen)) else: bws[-1] = math.floor(0.1 * self.up_bw) # create actual uploads out of the list of peer ids and bandwidths uploads = [ Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws) ] return uploads
def uploads(self, requests, peers, history): round = history.current_round() ################################################################ ################################################################ ########## Implementing optimistic unchoking ################## ################################################################ S = 4 # Upload slots request_bwd_history = {} chosen = [] if round % 3 == 0: id_list = [] for peer_info in peers: id_list.append(peer_info.id) for peer_id in chosen: id_list.remove(peer_id) if len(id_list) > 0: new_opt_unchoke = random.choice(id_list) chosen.append(new_opt_unchoke) self.current_opt_unchoke = new_opt_unchoke else: chosen.append(self.current_opt_unchoke) if len(requests) == 0: logging.debug("No one wants my pieces!") chosen = [] bws = [] else: ################################################################## ########## Implementing reciprocation ############################# ################################################################## for request in requests: request_bwd_history[request.requester_id] = 0 # count bwd from the last round for each peer if round >= 1: for download in history.downloads[round-1]: if download.from_id in request_bwd_history: request_bwd_history[download.from_id] += download.blocks # count bwd from the last last round for each peer if round >= 2: for download in history.downloads[round-2]: if download.from_id in request_bwd_history: request_bwd_history[download.from_id] += download.blocks for i in range(0, S - 1): if len(request_bwd_history) > 0: peer_id = max(request_bwd_history, key = request_bwd_history.get) request_bwd_history.pop(peer_id) chosen.append(peer_id) ############################################################# bws = even_split(self.up_bw, len(chosen)) # create actual uploads out of the list of peer ids and bandwidths uploads = [Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws)] return uploads
def uploads(self, requests, peers, history): """ requests -- a list of the requests for this peer for this round peers -- available info about all the peers history -- history for all previous rounds returns: list of Upload objects. In each round, this will be called after requests(). """ # Assumption: Every round is 10 seconds of time. round = history.current_round() logging.debug("%s again. It's round %d." % ( self.id, round)) num_slots = min(len(peers),self.max_slots) if len(requests) == 0: logging.debug("No one wants my pieces!") chosen = [] bws = [] else: # Figure out who requested blocks. request_ids = [] for request in requests : request_ids.append(request.requester_id) request_ids = set(request_ids) # Use previous history to determine unchoke priority. # Determine who sent us a download request. prev_downloads = [] if round != 0: # Assumption: for round 1, we use the previous round # to choose the agents to reciprocate. if round != 1 : prev_downloads = history.downloads[round-2] prev_downloads += history.downloads[round-1] # Shuffle downloads to prevent symmetry. random.shuffle(prev_downloads) # Determine how many blocks we've downloaded from neighbors download_counts = {} for download in prev_downloads : if download.from_id in download_counts : download_counts[download.from_id] += download.blocks else : download_counts[download.from_id] = download.blocks # If we aren't finding a new peer to optimistically unchoke if (round % 3 != 0) and (self.optimistic != None): # We make sure not to repeat the current peer optimistically unchoked if self.optimistic in download_counts: request_ids -= {self.optimistic} # Order peers in descending order by the amount we downloaded from ordered_peers = [x for (x,y) in sorted( download_counts.items(), key=lambda pair: pair[1], reverse=True )] # Choose peers who have requested from us based off the amount they have uploaded # to us chosen = [] slot_num = 0 for peer in ordered_peers : if peer in request_ids : chosen.append(peer) # Prevent repeats. request_ids -= {peer} slot_num += 1 if slot_num == num_slots - 1 : break # If we haven't used all our slots, we're done, since # we've handled all requests. if slot_num != num_slots - 1 : # Last slot is optimistic, changing every 3rd round. if (self.optimistic == None) or ((round % 3) == 0): if request_ids : optimistic = random.choice(list(request_ids)) chosen += [optimistic] self.optimistic = optimistic else: chosen += [self.optimistic] # Evenly "split" my upload bandwidth among the number of slots we have, # and bws = even_split(self.up_bw, num_slots)[0:len(chosen)] # create actual uploads out of the list of peer ids and bandwidths uploads = [Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws)] return uploads
def uploads(self, requests, peers, history): """ requests -- a list of the requests for this peer for this round peers -- available info about all the peers history -- history for all previous rounds returns: list of Upload objects. In each round, this will be called after requests(). """ round = history.current_round() logging.debug("%s again. It's round %d." % ( self.id, round)) # One could look at other stuff in the history too here. # For example, history.downloads[round-1] (if round != 0, of course) # has a list of Download objects for each Download to this peer in # the previous round. if len(requests) == 0: logging.debug("No one wants my pieces!") chosen = [] bws = [] else: if round == 0: # iterate through all requests and put all ids into chosen. chosen = [request.requester_id for request in requests] bws = even_split(self.up_bw, len(chosen)) else: prop_share_ids = {} unshared = [] total = 0 prev_received = history.downloads[round-1] for dl in prev_received: for request in requests: if dl.from_id == request.requester_id: prop_share_ids[dl.from_id] = dl.blocks total += dl.blocks requests_set = set(requests) chosen = prop_share_ids.keys() chosen_set = set(chosen) unshared_set = requests_set - chosen_set unshared = list(unshared_set) if len(unshared) > 0: request = random.choice(unshared) chosen.append(request) free_bw = 1.0 - self.unchoke_portion bws = [] for peer_id in prop_share_ids: bws.append(int(math.floor(self.up_bw*free_bw*prop_share_ids[peer_id]/total))) if math.floor(self.up_bw*free_bw) > sum(bws): if len(bws) == 0: bws.append(int(math.floor(self.up_bw*free_bw) - sum(bws))) else: bws[-1] += int(math.floor(self.up_bw*free_bw) - sum(bws)) if len(unshared) > 0: bws.append(int(math.floor(self.up_bw-math.floor(self.up_bw*free_bw)))) # create actual uploads out of the list of peer ids and bandwidths uploads = [Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws)] return uploads
def uploads(self, requests, peers, history): """ requests -- a list of the requests for this peer for this round peers -- available info about all the peers history -- history for all previous rounds returns: list of Upload objects. In each round, this will be called after requests(). """ round = history.current_round() logging.debug("%s again. It's round %d." % (self.id, round)) # One could look at other stuff in the history too here. # For example, history.downloads[round-1] (if round != 0, of course) # has a list of Download objects for each Download to this peer in # the previous round. peer_contribution = dict() if round >= 2: for hist in (history.downloads[round - 1] + history.downloads[round - 2]): from_you = hist.from_id num_blocks = hist.blocks if hist.to_id == self.id and "Seed" not in from_you: if from_you not in peer_contribution.keys(): peer_contribution[from_you] = num_blocks else: peer_contribution[from_you] += num_blocks sorted(peer_contribution.items(), key=lambda (k, v): -v) # invert the dictionary byscore = {} for k, v in peer_contribution.iteritems(): byscore.setdefault(v, []).append(k) ranked = byscore.items() ranked.sort(key=lambda t: t[0]) if len(requests) == 0: logging.debug("No one wants my pieces!") chosen = [] bws = [] else: logging.debug("Still here: uploading to the best peer") ###### history based on number of files they give you that you need n = min(len(requests), 3) contr_lst = peer_contribution.values() if n == 0: chosen = [] else: #chosen = contr_lst[:n-1] chosen = [] while n > 0 and ranked != []: candidate = ranked.pop() if n < len(candidate): chosen.append(random.sample(candidate, n)) else: n -= len(candidate) chosen.append(candidate) request_remaining = [i for i in contr_lst if i not in chosen] # optimistic unchoking if len(contr_lst) == 0 or len(request_remaining) == 0: requested = random.choice(requests) chosen.append(requested.requester_id) else: requested = random.choice(request_remaining) chosen.append(requested) # Evenly "split" my upload bandwidth among the one chosen requester bws = even_split(self.up_bw, len(chosen)) # create actual uploads out of the list of peer ids and bandwidths uploads = [ Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws) ] return uploads
def uploads(self, requests, peers, history): round = history.current_round() ################################################################ ################################################################ ########## Implementing optimistic unchoking ################## ################################################################ S = 4 # Upload slots request_bwd_history = {} chosen = [] if round % 3 == 0: id_list = [] for peer_info in peers: id_list.append(peer_info.id) for peer_id in chosen: id_list.remove(peer_id) if len(id_list) > 0: new_opt_unchoke = random.choice(id_list) chosen.append(new_opt_unchoke) self.current_opt_unchoke = new_opt_unchoke else: chosen.append(self.current_opt_unchoke) if len(requests) == 0: logging.debug("No one wants my pieces!") chosen = [] bws = [] else: ################################################################## ########## Implementing reciprocation ############################# ################################################################## for request in requests: request_bwd_history[request.requester_id] = 0 # count bwd from the last round for each peer if round >= 1: for download in history.downloads[round - 1]: if download.from_id in request_bwd_history: request_bwd_history[ download.from_id] += download.blocks # count bwd from the last last round for each peer if round >= 2: for download in history.downloads[round - 2]: if download.from_id in request_bwd_history: request_bwd_history[ download.from_id] += download.blocks for i in range(0, S - 1): if len(request_bwd_history) > 0: peer_id = max(request_bwd_history, key=request_bwd_history.get) request_bwd_history.pop(peer_id) chosen.append(peer_id) ############################################################# bws = even_split(self.up_bw, len(chosen)) # create actual uploads out of the list of peer ids and bandwidths uploads = [ Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws) ] return uploads
def uploads(self, requests, peers, history): """ requests -- a list of the requests for this peer for this round peers -- available info about all the peers history -- history for all previous rounds returns: list of Upload objects. In each round, this will be called after requests(). """ """ The reference client splits its upload bandwidth equally among the S slots, with each assigned the so-called equal-split rate. Optimistic unchoking helps a client to explore its neighborhood and find peers that will reciprocate and provide high upload bandwidth. Optimistic unchoking is also good for the system, helping peers that have just joined the swarm to obtain their first pieces before they have something they can reciprocate with. """ # Symmetry breaking is good... random.shuffle(peers) round = history.current_round() logging.debug("%s again. It's round %d." % ( self.id, round)) # One could look at other stuff in the history too here. # For example, history.downloads[round-1] (if round != 0, of course) # has a list of Download objects for each Download to this peer in # the previous round. if len(requests) == 0: logging.debug("No one wants my pieces!") chosen = [] bws = [] else: logging.debug("Still here: uploading to a random peer") #Get a key, value(id,#recieved blocks from the peer) pair bloks_recieved = {} requester_list = [x.requester_id for x in requests] for br in history.downloads[round - 1]: if br.from_id not in bloks_recieved and br.from_id in requester_list: bloks_recieved[br.from_id] = br.blocks elif br.from_id in requester_list: bloks_recieved[br.from_id] += br.blocks chosen = [] bws = [] #Sort the dict in desc order bloks_recieved = dict(sorted(bloks_recieved.items(), key=operator.itemgetter(1),reverse=True)) #Get the first 3 most upload peers chosen = list(bloks_recieved.keys())[:3] #randomly choose a peer request = random.choice(requests) #added to chosen list chosen.append(request.requester_id) # Evenly "split" my upload bandwidth among the one chosen requester bws = even_split(self.up_bw, len(chosen)) # create actual uploads out of the list of peer ids and bandwidths uploads = [Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws)] return uploads
def uploads(self, requests, peers, history): """ requests -- a list of the requests for this peer for this round peers -- available info about all the peers history -- history for all previous rounds returns: list of Upload objects. In each round, this will be called after requests(). """ round = history.current_round() logging.debug("%s again. It's round %d." % ( self.id, round)) if len(requests) == 0: logging.debug("No one wants my pieces!") chosen = [] bws = [] else: logging.debug("Still here: uploading to a random peer") # Figure out who requested blocks. request_ids = [] for request in requests : request_ids.append(request.requester_id) request_ids = set(request_ids) # Use previous history to determine unchoke priority. # Determine who sent us a download request. prev_downloads = [] if round != 0 : prev_downloads += history.downloads[round-1] download_counts = {} for download in prev_downloads : if download.from_id in download_counts : download_counts[download.from_id] += download.blocks else : download_counts[download.from_id] = download.blocks # Determine allocation of 90% of bandwidth. prev_ids = set(download_counts) isect = request_ids.intersection(prev_ids) total = 0 for i in isect : total += download_counts[i] chosen = [] bws = [] if isect : # Avoid possible issues when deleting from iterated set isect_copy = set(isect) for i in isect : allocated_bw = int(0.9 * float(download_counts[i]) / total * self.up_bw) # If the allocation is too small if allocated_bw == 0 : isect_copy -= {i} else : chosen.append(i) bws.append(allocated_bw) # Randomly allocate remaining 10% to a request id we haven't allocated bandwidth remaining_requests = request_ids - isect_copy if remaining_requests : chosen.append(random.choice(list(remaining_requests))) bws.append(int(self.up_bw * 0.1)) else : # Redistribute among iterated set if we have no more remaining requests. for (index, i) in enumerate(chosen) : allocated_bw = int(0.1 * float(download_counts[i]) / total * self.up_bw) bws[index] += allocated_bw else : # Even distribution across requests chosen = list(request_ids) bws = even_split(self.up_bw, len(chosen)) # create actual uploads out of the list of peer ids and bandwidths uploads = [Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws)] return uploads
def uploads(self, incoming_requests, peers, history): """ incoming_requests: list Request objects. peers: list of PeerInfo objects. history: AgentHistory object. returns: list of Upload objects. uploads will be called after requests """ unchoked_peer_id_list = [] current_round = history.current_round() cooperative_peers = {} if current_round > 1: # Since decisions are made every 10 secs, 20 seconds is best represented by two rounds. cooperative_peers = {d.from_id: d.blocks for d in history.downloads[current_round - 1]} for download in history.downloads[current_round - 2]: if download.from_id in cooperative_peers: cooperative_peers[download.from_id] += download.blocks else: cooperative_peers[download.from_id] = download.blocks # Nobody wants our pieces if len(incoming_requests) == 0: bandwidths = [] else: requester_id_list = list({r.requester_id for r in incoming_requests}) # Requesters shuffled for impartiality random.shuffle(requester_id_list) cooperative_peer_id_list = map(lambda x: x[1], sorted(cooperative_peers.iteritems(), key=lambda (k,v): (v,k), reverse=True)) # Keeps the order of the cooperative peers from most to least cooperative. cooperative_requester_id_list = filter(lambda cp: cp in requester_id_list, cooperative_peer_id_list) # Number of slots not usually optimistically unchoked reciprocative_slots = self.upload_slots - self.optimistic_slots # Add at most 3 peers by download speed ranking unchoked_peer_id_list = cooperative_requester_id_list[:reciprocative_slots] # Use the rest of the reciprocative slots to unchoke optimistically for requester_id in requester_id_list: if len(unchoked_peer_id_list) >= reciprocative_slots: break elif requester_id not in unchoked_peer_id_list: unchoked_peer_id_list.append(requester_id) # If the optimistically unchoked peer is not requesting any longer, replace it. if current_round % 3 == 0 or self.optimistically_unchoked_peer_id not in requester_id_list: # Optimistically unchoke a peer for requester_id in requester_id_list: if requester_id not in unchoked_peer_id_list: unchoked_peer_id_list.append(requester_id) self.optimistically_unchoked_peer_id = requester_id break else: # Unchoke the same agent as in the previous round unchoked_peer_id_list.append(self.optimistically_unchoked_peer_id) # Evenly "split" my upload bandwidth among the unchoked requesters bandwidths = even_split(self.up_bw, len(unchoked_peer_id_list)) # Create actual uploads out of the list of peer ids and bandwidths uploads = [Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(unchoked_peer_id_list, bandwidths)] return uploads
def uploads(self, requests, peers, history): """ requests -- a list of the requests for this peer for this round peers -- available info about all the peers history -- history for all previous rounds returns: list of Upload objects. In each round, this will be called after requests(). """ round = history.current_round() logging.debug("%s again. It's round %d." % (self.id, round)) # One could look at other stuff in the history too here. # For example, history.downloads[round-1] (if round != 0, of course) # has a list of Download objects for each Download to this peer in # the previous round. if len(requests) == 0: logging.debug("No one wants my pieces!") chosen = [] bws = [] else: logging.debug("Still here: uploading to a random peer") # change my internal state for no reason self.dummy_state["cake"] = "pie" if round == 0: chosen = [request.requester_id for request in requests][:4] bws = even_split(self.up_bw, len(chosen)) else: pasthist = history.downloads[round - 1] ndict = {} for item in pasthist: if item.to_id != self.id or 'Seed' in item.from_id: continue pid = item.from_id if pid in ndict.keys(): ndict[pid] += item.blocks else: ndict[pid] = item.blocks requestids = [request.requester_id for request in requests] totaluploads = sum([ndict.get(id, 0) for id in requestids]) #now actually find the proportionality try: bws = [ self.up_bw * self.for_sharing * (float(ndict.get(id, 0)) / float(totaluploads)) for id in requestids ] randind = random.randint(0, len(requestids) - 1) to_share = min(self.up_bw - sum(bws), self.up_bw * self.for_sharing) bws[randind] += to_share logging.info((str(self.id) + " " + str(self.up_bw) + ": " + str(zip(requestids, bws)))) chosen = requestids except: chosen = [request.requester_id for request in requests][:4] bws = even_split(self.up_bw, len(chosen)) #now add the optimistic # create actual uploads out of the list of peer ids and bandwidths uploads = [ Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws) ] return uploads
def uploads(self, requests, peers, history): """ requests -- a list of the requests for this peer for this round peers -- available info about all the peers history -- history for all previous rounds returns: list of Upload objects. In each round, this will be called after requests(). """ round = history.current_round() logging.debug("%s again. It's round %d." % ( self.id, round)) if len(requests) == 0: logging.debug("No one wants my pieces!") chosen = [] bws = [] else: chosen = [] bws = [] # Store peers who requested pieces from you request_ids = [] for request in requests: request_ids.append(request.requester_id) if round == 0: random.shuffle(request_ids) chosen = request_ids[:self.unchoke_slots] else: download_speed = {} for download in history.downloads[round-1]: p_id = download.from_id if p_id not in download_speed: download_speed[p_id] = download.blocks else: download_speed[p_id] += download.blocks for peer in peers: if peer.id not in download_speed: download_speed[peer.id] = 0 sorted_peers = [] for key, value in sorted(download_speed.iteritems(), key=lambda (k,v): (v,k)): sorted_peers.append(key) # Reciprocity index = 0 i = 0 while i < self.unchoke_slots and index < len(sorted_peers): if sorted_peers[index] in request_ids: chosen.append(sorted_peers[i]) i += 1 index += 1 # Optimistic unchoking chosen.append(random.choice(request_ids)) # Evenly "split" my upload bandwidth among the one chosen requester bws = even_split(self.up_bw, len(chosen)) # Create actual uploads out of the list of peer ids and bandwidths uploads = [Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws)] return uploads
def uploads(self, requests, peers, history): """ requests -- a list of the requests for this peer for this round peers -- available info about all the peers history -- history for all previous rounds returns: list of Upload objects. In each round, this will be called after requests(). """ round = history.current_round() logging.debug("%s again. It's round %d." % (self.id, round)) if len(requests) == 0: logging.debug("No one wants my pieces!") chosen = [] bws = [] else: chosen = [] bws = [] # Store peers who requested pieces from you request_ids = [] for request in requests: request_ids.append(request.requester_id) if round == 0: random.shuffle(request_ids) chosen = request_ids[:self.unchoke_slots] else: download_speed = {} for download in history.downloads[round - 1]: p_id = download.from_id if p_id not in download_speed: download_speed[p_id] = download.blocks else: download_speed[p_id] += download.blocks for peer in peers: if peer.id not in download_speed: download_speed[peer.id] = 0 sorted_peers = [] for key, value in sorted(download_speed.iteritems(), key=lambda (k, v): (v, k)): sorted_peers.append(key) # Reciprocity index = 0 i = 0 while i < self.unchoke_slots and index < len(sorted_peers): if sorted_peers[index] in request_ids: chosen.append(sorted_peers[i]) i += 1 index += 1 # Optimistic unchoking chosen.append(random.choice(request_ids)) # Evenly "split" my upload bandwidth among the one chosen requester bws = even_split(self.up_bw, len(chosen)) # Create actual uploads out of the list of peer ids and bandwidths uploads = [ Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws) ] return uploads
def uploads(self, requests, peers, history): slots = 4 round = history.current_round() if round >= 2: # get download histories of previous rounds dl_history1 = history.downloads[round-1] dl_history2 = history.downloads[round-2] dl_history = dict() # fill in dictionary with how many blocks each peer has contributed last 2 turns for down in dl_history1: source_id = down.from_id if source_id not in dl_history.keys(): dl_history[source_id] = down.blocks else: dl_history[source_id] += down.blocks for down in dl_history2: source_id = down.from_id if source_id not in dl_history.keys(): dl_history[source_id] = down.blocks else: dl_history[source_id] += down.blocks if len(requests) == 0: #logging.debug("No one wants my pieces!") chosen = [] bws = [] else: if round >= 2: # rank received requests by upload contribution all_requesters = [] requesters_upload = [] chosen = [] # make list of all peers making requests for request in requests: request_id = request.requester_id if request_id not in all_requesters: all_requesters.append(request_id) # make list of how much each peer requested for requester in all_requesters: if requester not in dl_history.keys(): requesters_upload.append((0, requester)) else: requesters_upload.append((dl_history[requester], requester)) # sort from highest upload contribution to least, and take top 3 requesters # leave one slot open every third round for optimistic unchoking requesters_upload.sort(key = lambda x:x[0], reverse=True) if round%3 != 0: chosen = [x[1] for x in requesters_upload[:slots-1]] else: chosen = [x[1] for x in requesters_upload[:slots]] # get rid of chosen requests from request list new_requests = [x for x in requests if not x.requester_id in chosen] requests = new_requests # optimistic unchoke every 3 turns if round%3 != 0: if len(requests) > 0: # optimistically unchoke random request random_request = random.choice(requests) chosen.append(random_request.requester_id) requests.remove(random_request) # fill remaining spots with random requests while len(chosen) < slots and len(requests) > 0: random_request = random.choice(requests) chosen.append(random_request.requester_id) requests.remove(random_request) bws = even_split(self.up_bw, len(chosen)) # create actual uploads out of the list of peer ids and bandwidths uploads = [Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws)] return uploads
def uploads(self, requests, peers, history): ################################################################## ########### updating d_js ######################################## ################################################################## alpha = 0.2 gamma = 0.1 round = history.current_round() self.bandwidthHistory.append(self.up_bw) if round == 0: bw_list = even_split(self.up_bw, len(peers)) for peer, i in zip(peers, range(len(peers))): self.downloadRate[peer.id] = 1 if bw_list[i] == 0: self.uploadRate[peer.id] = 0.5 else: self.uploadRate[peer.id] = bw_list[i] self.slots[peer.id] = 4 self.downloadUploadRatio[peer.id] = 1 else: for peer in peers: for download in history.downloads[round - 1]: if peer.id == download.from_id: self.downloadRate[peer.id] = download.blocks if download.blocks == 0: print "!!!!!! %s uploaded %s block(s)" % ( peer.id, download.blocks) self.slots[peer.id] = mean( self.bandwidthHistory ) / float( self.downloadRate[peer.id] ) # Find how to find out max and min bw or infer from personal history if round >= 3: peer_download = 0 for download2 in history.downloads[round - 2]: if peer.id == download2.from_id: for download3 in history.downloads[round - 3]: if peer.id == download3.from_id: peer_download += 1 if peer_download > 0: self.uploadRate[peer.id] *= 1 - gamma break if len(peer.available_pieces) > 0: av_pieces = float(len(peer.available_pieces)) rnd = float(round) slots = float(self.slots[peer.id]) self.downloadRate[peer.id] = av_pieces / ( rnd * self.conf.blocks_per_piece * slots) self.uploadRate[peer.id] *= 1 + alpha self.downloadUploadRatio[peer.id] = self.downloadRate[ peer.id] / self.uploadRate[peer.id] logging.debug("%s again. It's round %d." % (self.id, round)) ########### Dynamic Optimistic Unchoking ################################# ########### Eallocate each 3 rounds ###################################### ### Choose peer to uchoke every 3 round ### Choose # of peers divided by x: x = 3 if round % 3 == 0: self.optUnchokedPeers = random.sample(peers, len(peers) / x) ### Initial share to uchoke: a = 0.5 availPiecesShare = float(sum(self.pieces)) / float( self.conf.num_pieces * self.conf.blocks_per_piece) ### Allocate BW to opt unchoking bwToOptUnchoking = (a * self.up_bw - availPiecesShare * self.up_bw * a) + 0.001 ### Divide this BW among number of neighbors divided by x bwToOptUnchoking = bwToOptUnchoking / (len(peers) / x) optUnchokedAllocation = {} for peer in self.optUnchokedPeers: optUnchokedAllocation[peer.id] = float( 100 * bwToOptUnchoking) / (float( (a * self.up_bw - availPiecesShare * self.up_bw * a)) + 0.001) up_bw_available = self.up_bw - bwToOptUnchoking * (len(peers) / x) # Removing optimistically unchoked peers from consideration peers_tmp = list(peers) for peer in self.optUnchokedPeers: if peer in peers_tmp: peers_tmp.remove(peer) ######################################################################### ########### Building upload list ######################################## ######################################################################### if len(requests) == 0: logging.debug("No one wants my pieces!") chosen = [] bws = [] uploads = [] else: sumUpload = 0 chosen = {} downloadUploadRatio_tmp = {} # creating list with ratios for only peers in requests for request in requests: for peer in peers_tmp: if request.requester_id == peer.id: downloadUploadRatio_tmp[ request.requester_id] = self.downloadUploadRatio[ request.requester_id] for key in downloadUploadRatio_tmp: if downloadUploadRatio_tmp[key] in self.optUnchokedPeers: downloadUploadRatio_tmp.pop(key) needed = lambda i: self.pieces[i] < self.conf.blocks_per_piece needed_pieces = filter(needed, range(len(self.pieces))) while (sumUpload <= up_bw_available * 0.75 and len(downloadUploadRatio_tmp) > 0): peer_id = max(downloadUploadRatio_tmp, key=downloadUploadRatio_tmp.get) chosen[peer_id] = downloadUploadRatio_tmp.pop(peer_id) sumUpload += self.uploadRate[peer_id] """ Calculate the total proportional BW allocated to other peers """ totalUploadBW = 0 for choice in chosen: totalUploadBW += chosen[choice] """ Make each BW as a proportion of totalUploadBW """ if (float(totalUploadBW) * len(optUnchokedAllocation) == 0): uploads = [] else: for choice in chosen: chosen[choice] = 100 * float( chosen[choice]) / float(totalUploadBW) ### Connecting optimistic unchoking list to tyrant list """ Now need to divide our BW as integers according to chosen vector """ peerWeights = [ value for (key, value) in sorted(chosen.items()) ] peerNames = sorted(chosen) bws = proportional_split(int(math.floor(up_bw_available)), peerWeights) # create actual uploads out of the list of peer ids and bandwidths uploads = [ Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws) ] peerWeights = [ value for (key, value) in sorted(optUnchokedAllocation.items()) ] peerNames = sorted(optUnchokedAllocation) bws = proportional_split(self.up_bw - int(up_bw_available), peerWeights) uploads2 = [ Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(peerNames, bws) ] uploads = uploads + uploads2 if (round + 1) % 5 == 0: request = random.choice(requests) chosen = [request.requester_id] # Evenly "split" my upload bandwidth among the one chosen requester bws = even_split(self.up_bw, len(chosen)) return uploads
def uploads(self, requests, peers, history): """ requests -- a list of the requests for this peer for this round peers -- available info about all the peers history -- history for all previous rounds returns: list of Upload objects. In each round, this will be called after requests(). """ round = history.current_round() logging.debug("%s again. It's round %d." % ( self.id, round)) # One could look at other stuff in the history too here. # For example, history.downloads[round-1] (if round != 0, of course) # has a list of Download objects for each Download to this peer in # the previous round. if history.current_round() == 0: for p in peers: self.unchoke_me_count[p.id] = 0 else: # need to put here because if first round, history.downloads is empty # and you will get index out of bound error # peers j that I upload to and j downloads to this agent peers_unchoke_me = [] for r in list(itertools.chain(*history.downloads[-1:])): peers_unchoke_me += [r.from_id] peers_I_unchoke = [] for r in list(itertools.chain(*history.uploads[-1:])): peers_unchoke_me += [r.from_id] logging.debug("These peers unchoke me last round: " + str(peers_unchoke_me)) for p in peers: # if p unchokes me last period if p in peers_unchoke_me: self.unchoke_me_count[p.id] += 1 else: self.unchoke_me_count[p.id] = 0 logging.debug("self.unchoke_me_count: " + str(self.unchoke_me_count)) # START estimating d_j and u_j for r in list(itertools.chain(*history.downloads[-1:])): if r.from_id in self.download_rates: self.download_rates[r.from_id] = self.download_rates[r.from_id] + r.blocks else: self.download_rates[r.from_id] = r.blocks for p in peers: # for peers that don't unchoke me, use stale estimate from # block announcement rate # for peer j, if b pieces are available, r rounds so far, then # peer j's download rate is b*blocks_per_piece/r # assume that for peer j, upload rate and download rate are equal, # so this is an estimate for d_j if p not in peers_unchoke_me: self.download_rates[p.id] = len(p.available_pieces)*self.conf.blocks_per_piece/history.current_round() # Now, estimate u_j upload_rates # if first round (current_round==0) initialize with equal split capacities # if not first round, if history.current_round() == 0: for p in peers: # self.upload_rates[p.id] = self.up_bw / self.NUM_SLOTS self.upload_rates[p.id] = 0.01 else: for p in peers: # if peer p unchokes this agent for the last 3 periods # 3 periods - constant from Piatek et al if self.unchoke_me_count[p.id] >= 3 and p in peers_I_unchoke: self.upload_rates[p.id] *= (1-self.GAMMA) if p not in peers_unchoke_me and p in peers_I_unchoke: self.upload_rates[p.id] *= (1+self.ALPHA) # END estimating d_j and u_j logging.debug("self.download_rates: " + str(self.download_rates)) logging.debug("self.upload_rates: " + str(self.upload_rates)) # NOW start processing the request if len(requests) == 0: logging.debug("No one wants my pieces!") # it used to be called "chosen" This name is more descriptive peers_to_unchoke = [] bws = [] else: requests_id = [request.requester_id for request in requests] logging.debug("") # use list, because we want to sort by d_j/u_j # each element in list is (peer_id, d_j, u_j) triples = [] for r_id in requests_id: triples += [(r_id, self.download_rates[r_id], self.upload_rates[r_id])] # sort by d_j/u_j in descending order triples = sorted(triples, key=lambda x: 1.*x[1]/x[2] if x[2]!=0 else sys.float_info.max, reverse=True) logging.debug("triples (id, d_j, u_j): " + str(triples)) peers_to_unchoke = [] sum_of_u = 0 # This can be determined dynamically, but here we will just fix it cap = self.up_bw needed = lambda i: self.pieces[i] < self.conf.blocks_per_piece needed_pieces = filter(needed, range(len(self.pieces))) peer_available = {} for peer in peers: peer_available[peer.id] = list(set(peer.available_pieces).intersection(set(needed_pieces))) # keep adding peers to unchoke until sum of u-s exceed cap for triple in triples: if len(peer_available[triple[0]]) != 0: sum_of_u += triple[2] if sum_of_u <= cap: peers_to_unchoke += [triple[0]] # Evenly "split" my upload bandwidth among the one chosen requester if len(peers_to_unchoke) > 0: bws = even_split(self.up_bw, len(peers_to_unchoke)) else: bws = [] # if we have in peers_to_finish, Peer j: [7,5] means 7 needed to finish first request, # 5 more needed to finish second request, in total of 12 requested peers_to_finish = {} for request in requests: r_id = request.requester_id blocks_needed_to_finish = self.conf.blocks_per_piece - request.start if r_id in peers_to_finish.keys(): # add number of blocks needed to finish this piece to list that peer peers_to_finish[r_id] += [blocks_needed_to_finish] else: peers_to_finish[r_id] = [blocks_needed_to_finish] # print 'peers_to_finish: ' + str(peers_to_finish) # prevent finishing if they are close to finish for (peer_id, bw) in zip(peers_to_unchoke, bws): # if I give more bandwidth than they request, reduce given bandwidth if bw > sum(peers_to_finish[peer_id]): bw = sum(peers_to_finish[peer_id]) # if what we provide will just make them finish, hold one off so they don't finish if bw in cumsum(peers_to_finish[peer_id]): bw -= 1 # create actual uploads out of the list of peer ids and bandwidths uploads = [Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(peers_to_unchoke, bws)] return uploads
def uploads(self, requests, peers, history): slots = 4 round = history.current_round() requesters_ranked = [] needed = lambda i: self.pieces[i] < self.conf.blocks_per_piece needed_pieces = filter(needed, range(len(self.pieces))) np_set = set(needed_pieces) # After finishing all pieces, feed the peers with the least pieces blocks # This way other good algorithms will rank lower :) # Stop uploading here so other peers do worse if len(needed_pieces) == 0: chosen = [] bws = [] uploads = [ Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws) ] return uploads random.shuffle(peers) for peer in peers: av_set = set(peer.available_pieces) isect = list(av_set.intersection(np_set)) requesters_ranked.append([peer.id, len(isect)]) # ranks peers in order of most overlap pieces to least requesters_ranked.sort(key=lambda x: x[1], reverse=True) all_requesters = [] requesters_upload = [] chosen = [] # make list of all peers making requests for request in requests: request_id = request.requester_id if request_id not in all_requesters: all_requesters.append(request_id) #fulfill requests in order of how many pieces they have that we need final_requesters = [] for requester in requesters_ranked: if requester[0] in all_requesters: final_requesters.append(requester[0]) approved = [] #END PASTE if round >= 2: # get download histories of previous rounds dl_history1 = history.downloads[round - 1] dl_history2 = history.downloads[round - 2] dl_history = dict() # fill in dictionary with how many blocks each peer has contributed last 2 turns for down in dl_history1: source_id = down.from_id if source_id not in dl_history.keys(): dl_history[source_id] = down.blocks else: dl_history[source_id] += down.blocks for down in dl_history2: source_id = down.from_id if source_id not in dl_history.keys(): dl_history[source_id] = down.blocks else: dl_history[source_id] += down.blocks if len(requests) == 0: #logging.debug("No one wants my pieces!") chosen = [] bws = [] else: if round >= 2: # rank received requests by upload contribution all_requesters = [] requesters_upload = [] chosen = [] # make list of all peers making requests for request in requests: request_id = request.requester_id if request_id not in all_requesters: all_requesters.append(request_id) # random shuffle? while len(all_requesters) > 0: random_request = random.choice(all_requesters) chosen.append(random_request) all_requesters.remove(random_request) bws = even_split(self.up_bw, len(chosen)) # create actual uploads out of the list of peer ids and bandwidths uploads = [ Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws) ] return uploads
def uploads(self, requests, peers, history): """ requests -- a list of the requests for this peer for this round peers -- available info about all the peers history -- history for all previous rounds returns: list of Upload objects. In each round, this will be called after requests(). """ round = history.current_round() #logging.debug("%s again. It's round %d." % ( # self.id, round)) # One could look at other stuff in the history too here. # For example, history.downloads[round-1] (if round != 0, of course) # has a list of Download objects for each Download to this peer in # the previous round. if len(requests) == 0: logging.debug("No one wants my pieces!") chosen = [] bws = [] else: ################################################################## ########## Implementing recipocation ############################# ################################################################## S = 4 # Upload slots request_bwd_history = {} chosen = [] logging.debug("Requests, all:") logging.debug(requests) for request in requests: request_bwd_history[request.requester_id] = 0 if round >= 1: for download in history.downloads[round-1]: if download.from_id in request_bwd_history: request_bwd_history[download.from_id] += download.blocks if round >= 2: for download in history.downloads[round-2]: if download.from_id in request_bwd_history: request_bwd_history[download.from_id] += download.blocks for i in range(0, S - 2): if len(request_bwd_history) > 0: peer_id = min(request_bwd_history, key = request_bwd_history.get) request_bwd_history.pop(peer_id) chosen.append(peer_id) #if len(request_bwd_history) == 0: # chosen.append(random.choice(request_bwd_history.keys())) ################################################################ ################################################################ ########## Implementing optimistic unchocking ################## ################################################################ if (round % 3 == 0 and len(request_bwd_history) > 0): new_opt_unchock = random.choice(request_bwd_history.keys()) chosen.append(new_opt_unchock) self.current_opt_unchock = new_opt_unchock else: chosen.append(self.current_opt_unchock) ############################################################# # request = random.choice(requests) # chosen = [request.requester_id] # Evenly "split" my upload bandwidth among the one chosen requester bws = even_split(self.up_bw, len(chosen)) # print bws # print len(chosen) # create actual uploads out of the list of peer ids and bandwidths uploads = [Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws)] return uploads
def uploads(self, requests, peers, history): """ requests -- a list of the requests for this peer for this round peers -- available info about all the peers history -- history for all previous rounds returns: list of Upload objects. In each round, this will be called after requests(). """ round = history.current_round() logging.debug("%s again. It's round %d." % (self.id, round)) # One could look at other stuff in the history too here. # For example, history.downloads[round-1] (if round != 0, of course) # has a list of Download objects for each Download to this peer in # the previous round. if len(requests) == 0: logging.debug("No one wants my pieces!") chosen = [] bws = [] else: logging.debug("Still here: uploading to a random peer") # change my internal state for no reason # on first round, evenly split bandwidth speed between 4 random requesters self.dummy_state["cake"] = "pie" if round == 0: random.shuffle(requests) chosen = [request.requester_id for request in requests[0:4]] bws = even_split(self.up_bw, len(chosen)) else: # optimistic unchoking every 3 periods if "unchoked_agent" not in self.dummy_state or round % 3 == 1: self.dummy_state["unchoked_agent"] = random.choice( peers).id pasthist = history.downloads[round - 1] # create a dictionary mapping agents to the number of blocks they gave you last round ndict = {} for item in pasthist: if item.to_id != self.id: continue pid = item.from_id if pid in ndict.keys(): ndict[pid] += item.blocks else: ndict[pid] = item.blocks top = sorted(ndict.items(), key=lambda x: x[1], reverse=True) reqset = set(request.requester_id for request in requests) logging.debug(repr(top)) # get 3 most generous bots top3 = [] for id in top: if id[0] in reqset: logging.debug(id) top3.append(id[0]) if len(top3) >= 3: break chosen = [item for item in top3] logging.debug(chosen) while len(chosen) < 4: chosen.append(self.dummy_state["unchoked_agent"]) bws = even_split(self.up_bw, len(chosen)) # Evenly "split" my upload bandwidth among the one chosen requester # create actual uploads out of the list of peer ids and bandwidths uploads = [ Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws) ] return uploads
def uploads(self, requests, peers, history): """ requests -- a list of the requests for this peer for this round peers -- available info about all the peers history -- history for all previous rounds returns: list of Upload objects. In each round, this will be called after requests(). """ round = history.current_round() logging.debug("%s again. It's round %d." % (self.id, round)) # One could look at other stuff in the history too here. # For example, history.downloads[round-1] (if round != 0, of course) # has a list of Download objects for each Download to this peer in # the previous round. # update beliefs by aggregating upload/download speeds for the first few rounds if round > 0: if round < 5: self.update_beliefs(peers, history, update_download_sum=True, update_upload_sum=True, update_beliefs=False) for key, value in self.download_nums.iteritems(): self.download_beliefs[key] = value else: self.update_beliefs(peers, history) if len(requests) == 0: logging.debug("No one wants my pieces!") chosen = [] bws = [] else: logging.debug("Still here: uploading using brain cells") # if it has been fewer than r rounds, we allocate evenly among everyone if round < 5: logging.debug('even split') chosen = [request.requester_id for request in requests] bws = even_split(self.up_bw, len(chosen)) # if it has been r or more rounds, we can use the algorithm else: cap = self.up_bw # sort requesters by calculating ratios of download to upload beliefs and sorting by decreasing ratios = dict() requesters = [request.requester_id for request in requests] for requester in requesters: logging.debug('download_beliefs' + str(self.download_beliefs)) logging.debug(self.download_beliefs[requester]) logging.debug(self.upload_beliefs[requester]) ratios[requester] = self.download_beliefs[ requester] * 1.0 / self.upload_beliefs[requester] ratios_sorted = sorted(ratios.items(), key=lambda x: x[1], reverse=True) logging.debug('ratios') logging.debug(str(ratios_sorted)) bandwidth_used = 0 chosen, bws = [], [] for pid, ratio in ratios_sorted: if self.upload_beliefs[pid] + bandwidth_used > self.up_bw: break else: bws.append(self.upload_beliefs[pid]) bandwidth_used += self.upload_beliefs[pid] chosen.append(pid) # create actual uploads out of the list of peer ids and bandwidths uploads = [ Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws) ] return uploads
def uploads(self, requests, peers, history): ################################################################## ########### updating d_js ######################################## ################################################################## alpha = 0.2 gamma = 0.1 round = history.current_round() self.bandwidthHistory.append(self.up_bw) if round == 0: bw_list = even_split(self.up_bw, len(peers)) for peer, i in zip(peers, range(len(peers))): self.downloadRate[peer.id] = (self.conf.max_up_bw - self.conf.min_up_bw) / (4 * 2) if bw_list[i] == 0: self.uploadRate[peer.id] = 0.5 else: self.uploadRate[peer.id] = bw_list[i] self.slots[peer.id] = 4 self.downloadUploadRatio[peer.id] = 1 else: for peer in peers: self.downloadRate[peer.id] = 0 for download in history.downloads[round - 1]: if peer.id == download.from_id: self.downloadRate[peer.id] += download.blocks if download.blocks == 0: print "!!!!!! %s uploaded %s block(s)" % ( peer.id, download.blocks) self.slots[peer.id] = mean( self.bandwidthHistory ) / float( self.downloadRate[peer.id] ) # Find how to find out max and min bw or infer from personal history if round >= 3: peer_download = 0 for download2 in history.downloads[round - 2]: if peer.id == download2.from_id: for download3 in history.downloads[round - 3]: if peer.id == download3.from_id: peer_download += 1 if peer_download > 0: self.uploadRate[peer.id] *= 1 - gamma break if len(peer.available_pieces) > 0: av_pieces = float(len(peer.available_pieces)) rnd = float(round) slots = float(self.slots[peer.id]) self.downloadRate[peer.id] = av_pieces / ( rnd * self.conf.blocks_per_piece * slots) self.uploadRate[peer.id] *= 1 + alpha self.downloadUploadRatio[peer.id] = self.downloadRate[ peer.id] / self.uploadRate[peer.id] if len(requests) == 0: logging.debug("No one wants my pieces!") chosen = [] bws = [] uploads = [] else: logging.debug("Still here: uploading to a random peer") # change my internal state for no reason self.dummy_state["cake"] = "pie" ################################################################## ########### Building upload list ################################# ################################################################## sumUpload = 0 chosen = {} downloadUploadRatio_tmp = {} # creating list with ratios for only peers in requests for request in requests: downloadUploadRatio_tmp[ request.requester_id] = self.downloadUploadRatio[ request.requester_id] while (sumUpload <= self.up_bw and len(downloadUploadRatio_tmp) > 0): peer_id = max(downloadUploadRatio_tmp, key=downloadUploadRatio_tmp.get) chosen[peer_id] = downloadUploadRatio_tmp.pop(peer_id) sumUpload += self.uploadRate[peer_id] """ Calculate the total proportional BW allocated to other peers """ totalUploadBW = 0 for choice in chosen: totalUploadBW += chosen[choice] """ Make each BW as a proportion of totalUploadBW """ if float(totalUploadBW) == 0: uploads = [] else: for choice in chosen: chosen[choice] = 100 * float( chosen[choice]) / float(totalUploadBW) """ Now need to divide our BW as integers according to chosen vector """ peerWeights = [ value for (key, value) in sorted(chosen.items()) ] peerNames = sorted(chosen) bws = proportional_split(self.up_bw, peerWeights) # create actual uploads out of the list of peer ids and bandwidths uploads = [ Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws) ] return uploads
def uploads(self, requests, peers, history): """ requests -- a list of the requests for this peer for this round peers -- available info about all the peers history -- history for all previous rounds returns: list of Upload objects. In each round, this will be called after requests(). """ round = history.current_round() logging.debug("%s again. It's round %d." % (self.id, round)) # One could look at other stuff in the history too here. # For example, history.downloads[round-1] (if round != 0, of course) # has a list of Download objects for each Download to this peer in # the previous round. # take care of updating the trackers for your friendliest peers self.two_round_ago = self.one_round_ago.copy() self.one_round_ago = dict() if round != 0: for d in history.downloads[round - 1]: # let's assume that we can just add blocks split among pieces # rather than average among pieces if d.from_id in self.one_round_ago.keys(): self.one_round_ago[d.from_id] += d.blocks else: self.one_round_ago[d.from_id] = d.blocks logging.debug("Here are my peer histories from two round ago: %s" % self.two_round_ago) logging.debug("and from one round ago: %s" % self.one_round_ago) # and now add up the last two rounds c = Counter(self.two_round_ago) c.update(self.one_round_ago) best_friends = c.most_common() logging.debug("and my best friends!: %s" % best_friends) if len(requests) == 0: logging.debug("No one wants my pieces!") chosen = [] bws = [] else: logging.debug("Still here: uploading to my favorite peers") # change my internal state for no reason # No! Bad! Keep the cake! # self.dummy_state["cake"] = "pie" # **Reciprocal unlocking** # let's assume that we only want to give to our # mostest bestest friends, even if they don't request from us # It promotes charity :) # Let's also do some handling to randomize if we have multiple best friends # of same bestiness # most of this is just ugly handling of cases where we don't have enough friends chosen = [] # handle bestest friends either being clear best or also tied if len(best_friends) > 2: candidate_best_friends = [best_friends[2]] best_friend_counter = 3 # handle best friends tied for bestinees while (best_friend_counter < len(best_friends) and best_friends[best_friend_counter][1] == best_friends[2][1]): candidate_best_friends.append( best_friends[best_friend_counter][0]) best_friend_counter += 1 if best_friends[0][1] > best_friends[2][1]: chosen.append(best_friends[0][0]) else: candidate_best_friends.append(best_friends[0][0]) if best_friends[1][1] > best_friends[2][1]: chosen.append(best_friends[1][0]) else: candidate_best_friends.append(best_friends[1][0]) else: candidate_best_friends = [] if len(best_friends) > 1: chosen.append(best_friends[1][0]) if len(best_friends) > 0: chosen.append(best_friends[0][0]) # finally, we can actually randomize random.shuffle(candidate_best_friends) for i in xrange(3 - len(chosen)): # let's assume we're okay leaving best friend slots empty if i < len(candidate_best_friends): chosen.append(candidate_best_friends[i]) # **Optimistic unlocking** # Again, let's assume that our optimistic doesn't necessarily # have to be in the requests # Let's also assume that we won't give to the optimistic # if they're already in our best friends--we can wait until they're not, # or a new optimistic is set if round % 3 == 0: self.optimistic = random.choice(peers).id if self.optimistic not in chosen: chosen.append(self.optimistic) logging.debug("And here are my chosen peers: %s", chosen) # request = random.choice(requests) # chosen = [request.requester_id] # Evenly "split" my upload bandwidth among the chosen requesters bws = even_split(self.up_bw, len(chosen)) # create actual uploads out of the list of peer ids and bandwidths uploads = [ Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws) ] return uploads
def uploads(self, requests, peers, history): """ requests -- a list of the requests for this peer for this round peers -- available info about all the peers history -- history for all previous rounds returns: list of Upload objects. In each round, this will be called after requests(). """ ################################################################## ########### updating d_js ######################################## ################################################################## alpha = 0.2 gamma = 0.1 round = history.current_round() self.bandwidthHistory.append(self.up_bw) if round == 0: bw_list = even_split(self.up_bw,len(peers)) for peer,i in zip(peers,range(len(peers))): self.downloadRate[peer.id] = 1 if bw_list[i] == 0: self.uploadRate[peer.id] = 0.5 else: self.uploadRate[peer.id] = bw_list[i] self.slots[peer.id] = 4 self.downloadUploadRatio[peer.id] = 1 else: for peer in peers: for download in history.downloads[round-1]: if peer.id == download.from_id: self.downloadRate[peer.id] = download.blocks if download.blocks == 0: print "!!!!!! %s uploaded %s block(s)" % (peer.id, download.blocks) self.slots[peer.id] = mean(self.bandwidthHistory)/float(self.downloadRate[peer.id]) # Find how to find out max and min bw or infer from personal history if round >= 3: peer_download = 0 for download2 in history.downloads[round-2]: if peer.id == download2.from_id: for download3 in history.downloads[round-3]: if peer.id == download3.from_id: peer_download += 1 if peer_download > 0: self.uploadRate[peer.id] *= 1 - gamma break if len(peer.available_pieces) > 0: av_pieces = float(len(peer.available_pieces)) rnd = float(round) slots = float(self.slots[peer.id]) self.downloadRate[peer.id] = av_pieces/(rnd * self.conf.blocks_per_piece * slots) self.uploadRate[peer.id] *= 1 + alpha #if self.downloadRate[peer.id] == 0: # print str(peer.id) + ": " + str(peer.available_pieces) # print "Peer %s has %s available pieces" % (peer.id, len(peer.available_pieces)) self.downloadUploadRatio[peer.id] = self.downloadRate[peer.id]/self.uploadRate[peer.id] logging.debug("%s again. It's round %d." % ( self.id, round)) ########### Dynamic Optimistic Unchoking ################################# ########### Eallocate each 3 rounds ###################################### ### Choose peer to uchoke every 3 round ### Choose # of peers divided by x: x = 3 if round % 3 ==0: self.optUnchokedPeers = random.sample(peers, len(peers)/x) ### Initial share to uchoke: a = 0.5 availPiecesShare = float(sum(self.pieces))/float(self.conf.num_pieces*self.conf.blocks_per_piece) ### Allocate BW to opt unchoking bwToOptUnchoking = (a*self.up_bw - availPiecesShare * self.up_bw * a) + 0.001 ### Divide this BW among number of neighbors divided by x bwToOptUnchoking = bwToOptUnchoking/(len(peers)/x) optUnchokedAllocation = {} for peer in self.optUnchokedPeers: optUnchokedAllocation[peer.id] = float(100 * bwToOptUnchoking) /(float((a*self.up_bw - availPiecesShare * self.up_bw * a)) +0.001) up_bw_available = self.up_bw - bwToOptUnchoking*(len(peers)/x) # Removing optimistically unchoked peers from consideration peers_tmp = list(peers) for peer in self.optUnchokedPeers: if peer in peers_tmp: peers_tmp.remove(peer) ######################################################################### ########### Building upload list ######################################## ######################################################################### if len(requests) == 0: logging.debug("No one wants my pieces!") chosen = [] bws = [] uploads = [] else: sumUpload = 0 chosen = {} downloadUploadRatio_tmp = {} # creating list with ratios for only peers in requests for request in requests: for peer in peers_tmp: if request.requester_id == peer.id: downloadUploadRatio_tmp[request.requester_id] = self.downloadUploadRatio[request.requester_id] for key in downloadUploadRatio_tmp: if downloadUploadRatio_tmp[key] in self.optUnchokedPeers: downloadUploadRatio_tmp.pop(key) needed = lambda i: self.pieces[i] < self.conf.blocks_per_piece needed_pieces = filter(needed, range(len(self.pieces))) while (sumUpload <= up_bw_available * 0.75 and len(downloadUploadRatio_tmp) > 0): peer_id = max(downloadUploadRatio_tmp, key = downloadUploadRatio_tmp.get) chosen[peer_id] = downloadUploadRatio_tmp.pop(peer_id) sumUpload += self.uploadRate[peer_id] """ Calculate the total proportional BW allocated to other peers """ totalUploadBW = 0 for choice in chosen: totalUploadBW += chosen[choice] """ Make each BW as a proportion of totalUploadBW """ if (float(totalUploadBW) * len(optUnchokedAllocation) == 0): uploads = [] else: for choice in chosen: chosen[choice] = 100 * float(chosen[choice]) / float(totalUploadBW) ### Connecting optimistic unchoking list to tyrant list # chosen.update(optUnchokedAllocation) # print "Vector of choices for this round:" # print chosen """ Now need to divide our BW as integers according to chosen vector """ peerWeights = [value for (key, value) in sorted(chosen.items())] peerNames = sorted(chosen) # print "original chosen: %s" % (chosen) # print "names: %s" % (peerNames) # print "weights: %s" % (peerWeights) bws = proportional_split(int(math.floor(up_bw_available)), peerWeights) # create actual uploads out of the list of peer ids and bandwidths uploads = [Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws)] peerWeights = [value for (key, value) in sorted(optUnchokedAllocation.items())] peerNames = sorted(optUnchokedAllocation) bws = proportional_split(self.up_bw - int(up_bw_available), peerWeights) uploads2 = [Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(peerNames, bws)] uploads = uploads + uploads2 if (round + 1) % 5 == 0: request = random.choice(requests) chosen = [request.requester_id] # Evenly "split" my upload bandwidth among the one chosen requester bws = even_split(self.up_bw, len(chosen)) #uploads = uploads.append([Upload(self.id, peer_id, bw) # for (peer_id, bw) in zip(optUnchokedAllocation, bws)]) return uploads
def uploads(self, requests, peers, history): """ requests -- a list of the requests for this peer for this round peers -- available info about all the peers history -- history for all previous rounds returns: list of Upload objects. In each round, this will be called after requests(). """ round = history.current_round() logging.debug("%s again. It's round %d." % ( self.id, round)) # get top 3 agents from last 2 rounds and respond to their requests agents = {} for r in list(itertools.chain(*history.downloads[-2:])): if r.from_id in agents: agents[r.from_id] = agents[r.from_id] + r.blocks else: agents[r.from_id] = r.blocks neighbor_list = sorted(agents, key=agents.get, reverse=True) logging.debug("Downloads: " + str(agents)) bws = [] if len(requests) == 0: logging.debug("No one wants my pieces!") else: logging.debug("Still here: uploading to my 3 best peers") #choose favorite neighbors to fill my 3 slots!! count = 0 i = 0 while count != 3 and i != len(neighbor_list): if neighbor_list[i] in [x.requester_id for x in requests]: self.chosen[count] = neighbor_list[i] count += 1 i += 1 #if fewer than 3 people have been interested, fill in my other slots optimistically (randomly) if count < 3: unchosen = [x.requester_id for x in requests if x.requester_id not in self.chosen] needed_random = min(4 - count, len(unchosen)) if len(unchosen) >= needed_random: self.chosen[count:count + needed_random] = random.sample(unchosen, needed_random) #if all slots filled, every 3 rounds, optimistically unchoke last slot elif round % 3 == 0: unchosen = [x.requester_id for x in requests if x.requester_id not in self.chosen] if len(unchosen) > 0: unchoke = random.choice(unchosen) logging.debug("round : " + `round` + ". Optimistically unchoking " + `unchoke`) self.chosen[3] = unchoke logging.debug("Chosen to upload to: " + str(self.chosen)) # Evenly "split" my upload bandwidth among the one chosen requester bws = even_split(self.up_bw, len(self.chosen)) # create actual uploads out of the list of peer ids and bandwidths uploads = [Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(self.chosen, bws)] return uploads
def uploads(self, requests, peers, history): """ requests -- a list of the requests for this peer for this round peers -- available info about all the peers history -- history for all previous rounds returns: list of Upload objects. In each round, this will be called after requests(). """ chosen = [] bws = [] if len(requests) != 0: round = history.current_round() # first, the reciprocal unchoking slots last_downloads = history.downloads[round - 1] if round != 0 else [] request_ids = [request.requester_id for request in requests] if last_downloads: # filter to only those who want to download, then sort by previous upload bandwith last_id_blocks = [(download.from_id, download.blocks) for download in last_downloads] last_id_blocks = list( filter(lambda p: p[0] in request_ids, last_id_blocks)) last_id_blocks.sort(key=lambda download: download[1], reverse=True) last_id = [id for (id, _) in last_id_blocks] # select at most top three peers chosen += last_id[:(self.state["num_slots"] - 1)] # next, the optimistic unchoking slot if (round % 3) == 0 or self.state["optimistic_unchoke"] is None: # every third round, unchoke new agent unchosen_requests = list( filter(lambda p: p not in chosen, request_ids)) if unchosen_requests: opt_peer = random.choice(unchosen_requests) chosen += [opt_peer] self.state["optimistic_unchoke"] = opt_peer else: unchosen_peers = list( filter(lambda p: p.id not in chosen, peers)) if unchosen_peers: opt_peer = random.choice(unchosen_peers).id chosen += [opt_peer] self.state["optimistic_unchoke"] = opt_peer else: # unchoke agent optimistically unchoked previously if self.state["optimistic_unchoke"]: chosen += [self.state["optimistic_unchoke"]] # now try to fill in remaining slots randomly if history is insufficient unchosen_requests = [r for r in request_ids if r not in chosen] randomly_chosen = random.sample( unchosen_requests, max( min(len(unchosen_requests), self.state["num_slots"] - len(chosen)), 0)) chosen += randomly_chosen # evenly split upload bandwidth among the chosen peeres bws = even_split(self.up_bw, len(chosen)) # create actual uploads out of the list of peer ids and bandwidths uploads = [ Upload(self.id, peer_id, bw) for (peer_id, bw) in zip(chosen, bws) ] return uploads