def moderate_the_team(self): while self.alive: # message, sender = self.team_socket.recvfrom() packed_msg, sender = self.team_socket.recvfrom(100) #print("{}: packet={}".format(self.id, packed_msg)) # All parameters are unsigned int msg_format = '!' + 'i' * (len(packed_msg) // 4) try: msg = struct.unpack(msg_format, packed_msg) except struct.error: stderr.write( f"{self.id}: unexpected message {packed_msg} with length={len(packed_msg)} received from {sender}" ) if msg[0] == Messages.GOODBYE: # Message sent by all peers when they leave the team self.process_goodbye(sender) elif msg[0] == Messages.LOST_CHUNK: # Message sent only by monitors when they lost a chunk lost_chunk_number = msg[1] self.process_lost_chunk(lost_chunk_number) self.lg.debug( f"{self.id}: received [lost chunk {msg[1]}] from {sender}") elif msg[0] == Messages.HELLO: # Message sent by peers to create a translation entry # in their NATs. No extra functionality by now. self.lg.info(f"{self.id}: received [hello] from {sender}") else: stderr.write( f"{self.id}: unexpected message {packed_msg} with length={len(packed_msg)} decoded as {msg} received from {sender}" )
def process_lost_chunk(self, lost_chunk_number): self.total_losses += 1 destination = self.get_losser(lost_chunk_number) if destination in self.team: if self.team.index(destination) >= self.number_of_monitors: self.increment_unsupportivity_of_peer(destination) stderr.write(f" {colorama.Fore.RED}{lost_chunk_number}{colorama.Style.RESET_ALL}")
def process_unpacked_message(self, message, sender): chunk_number = message[ChunkStructure.CHUNK_NUMBER] if chunk_number >= 0: # We have received a chunk. self.lg.debug( f"{self.ext_id}: received chunk {message} from {sender}") self.received_chunks += 1 if __debug__: if sender == self.splitter: if self.played > 0 and self.played >= self.number_of_peers: CLR = self.number_of_lost_chunks / ( self.played + self.number_of_lost_chunks ) # Chunk Loss Ratio self.process_chunk(message, sender) self.send_chunks_to_neighbors() else: # message[ChunkStructure.CHUNK_NUMBER] < 0 if chunk_number == Messages.REQUEST: self.process_request(message[1], sender) elif chunk_number == Messages.PRUNE: self.process_prune(message[1], sender) elif chunk_number == Messages.HELLO: self.process_hello(sender) elif chunk_number == Messages.GOODBYE: self.process_goodbye(sender) else: stderr.write( "{self.ext_id}: process_unpacked_message: unexpected control chunk of index={chunk_number}" ) return (chunk_number, sender)
def insert_peer(self, peer): if peer not in self.team: self.team.append(peer) self.losses[peer] = 0 self.lg.debug(f"{self.id}: {peer} inserted in the team") stderr.write( f" {colorama.Fore.MAGENTA}{len(self.team)}{colorama.Style.RESET_ALL}" )
def process_goodbye(self, sender): self.lg.debug(f"{self.ext_id}: received [goodbye] from {sender}") if sender == self.splitter: self.waiting_for_goodbye = False self.player_connected = False else: for peers_list in self.forward.values(): try: peers_list.remove(sender) except ValueError: stderr.write( f"{self.ext_id}: failed to remove peer {sender} from {peers_list}" )
def process_request_origin(self, origin, sender): stderr.write( f" {colorama.Fore.CYAN}{origin[1]}{colorama.Style.RESET_ALL}") self.lg.debug( f"{self.ext_id}: received [request_origin {origin}] from {sender}") if origin != sender: self.update_forward(origin, sender) self.lg.debug( f"{self.ext_id}: process_request: forwarding chunk from {origin} to {sender}" ) else: self.lg.debug( f"{self.ext_id}: process_request: origin {origin} is the sender of the request" )
def increment_unsupportivity_of_peer(self, peer): stderr.write(f" {colorama.Fore.RED}({self.team.index(peer)}){colorama.Style.RESET_ALL}") try: self.losses[peer] += 1 except KeyError: self.lg.debug(f"{self.id}: the unsupportive peer {peer} does not exist in {self.losses}") else: if self.total_losses > self.max_chunk_loss: peer_to_remove = max(self.losses, key=self.losses.get) self.remove_peer(peer_to_remove) # Reset the counters for peer in self.losses.keys(): self.losses[peer] = 0 self.total_losses = 0
def unpack_message(self, packet, sender): msg_format = "!i" + (len(packet) - 4) * 's' chunk_number, *i_dont_know = struct.unpack(msg_format, packet) if chunk_number >= 0: self.unpack_chunk(packet, sender) else: if chunk_number == Messages.HELLO: self.process_hello(sender) elif chunk_number == Messages.GOODBYE: self.process_goodbye(sender) else: stderr.write( f"{self.ext_id}: unexpected control chunk with code={chunk_number}" ) return (chunk_number, sender)
def run(self): chunk_number = 0 total_peers = 0 self.current_round = 0 Thread(target=self.handle_arrivals).start() Thread(target=self.moderate_the_team).start() #Thread(target=self.reset_counters_thread).start() while len(self.team) == 0: time.sleep(1) #self.on_round_beginning() print() while (len(self.team) > 0) and self.alive: chunk = self.retrieve_chunk() if self.peer_number == 0: self.current_round += 1 total_peers += len(self.team) self.on_round_beginning() stderr.write(f" {colorama.Fore.YELLOW}{self.current_round}{colorama.Style.RESET_ALL}") try: peer = self.team[self.peer_number] except IndexError: stderr.write(f"{self.id}: the peer with index {self.peer_number} does not exist. peers_list={self.team} peer_number={self.peer_number}") self.destination_of_chunk[chunk_number % (self.buffer_size)] = peer # self.team.index(peer) self.send_chunk(chunk_number, chunk, peer) chunk_number = (chunk_number + 1) % Limits.MAX_CHUNK_NUMBER try: self.peer_number = (self.peer_number + 1) % len(self.team) except ZeroDivisionError: pass self.is_alive() # Say "goodbye" to the peers and wait for their "goodbye"s counter = 0 while (len(self.team) > 0) and (counter < 10): #while len(self.team) > 0: self.lg.debug("{}: waiting for [goodbye]s from peers (peers_list={})".format(self.id, self.team)) time.sleep(0.1) for p in self.team: self.say_goodbye(p) counter += 1
def connect_to_the_splitter(self, peer_port): self.lg.debug( f"{self.public_endpoint}: connecting to the splitter at {self.splitter}" ) host_name = socket.gethostname() for i in range(3): while True: try: address = socket.gethostbyname(host_name) except socket.gaierror: continue break self.splitter_socket = socket(socket.AF_INET, socket.SOCK_STREAM) # self.splitter_socket.set_id(self.id) # Ojo, simulation dependant #host = socket.gethostbyname(socket.gethostname()) #iface = netifaces.interfaces()[8] # Name of the second interface #stuff = netifaces.ifaddresses(iface) # Configuration data #stderr.write(f"---------------> {stuff}") #time.sleep(1000) #IP_stuff = stuff[netifaces.AF_INET][0] # Only the IP stuff #address = IP_stuff['addr'] # Get local IP addr self.splitter_socket.bind((address, peer_port)) try: self.splitter_socket.connect(self.splitter) except ConnectionRefusedError as error: stderr.write( f"{self.public_endpoint}: {error} when connecting to the splitter {self.splitter}" ) return False except ConnectionResetError as error: stderr.write( f"{self.public_endpoint}: {error} when connecting to the splitter {self.splitter}" ) return False # The index for pending[]. self.splitter = self.splitter_socket.getpeername( ) # Be careful, not "127.0.1.1 hostname" in /etc/hosts #self.private_endpoint = self.splitter_socket.getsockname() self.lg.debug( f"{self.public_endpoint}: connected to the splitter at {self.splitter}" ) return True
def unpack_message(self, packet, sender): msg_format = "!i" + (len(packet) - 4) * 's' chunk_number, *i_dont_know = struct.unpack(msg_format, packet) if chunk_number >= 0: self.received_chunks += 1 chunk = list(struct.unpack(self.packet_format, packet)) chunk[ChunkStructure.ORIGIN_ADDR] = IP_tools.int2ip( chunk[ChunkStructure.ORIGIN_ADDR]) chunk[ChunkStructure.HOPS] += 1 transmission_time = time.time() - chunk[ChunkStructure.TIME] self.accumulated_latency_in_the_round += transmission_time #stderr.write(f" <-{transmission_time}->") self.lg.debug( f"{self.ext_id}: transmission time={transmission_time}") self.lg.debug( f"{self.ext_id}: received chunk {chunk} from {sender}") self.process_chunk(chunk, sender) self.send_chunks_to_the_next_neighbor() else: if chunk_number == Messages.HELLO: self.process_hello(sender) elif chunk_number == Messages.GOODBYE: self.process_goodbye(sender) elif chunk_number == Messages.REQUEST: _, requested_chunk = struct.unpack('!ii', packet) self.process_request(requested_chunk, sender) elif chunk_number == Messages.PRUNE: _, origin_ip, origin_port = struct.unpack('!iIi', packet) #origin = struct.unpack('!iIi', packet) #self.process_prune((IP_tools.int2ip(origin[1]), origin[2]), sender) self.process_prune((IP_tools.int2ip(origin_ip), origin_port), sender) elif chunk_number == Messages.REQUEST_ORIGIN: _, origin_ip, origin_port = struct.unpack('!iIi', packet) self.process_request_origin( (IP_tools.int2ip(origin_ip), origin_port), sender) else: stderr.write( "{self.ext_id}: unexpected control chunk with code={chunk_number}" ) return (chunk_number, sender)
def buffer_data(self): if __debug__: self.lg.debug(f"{self.ext_id}: buffering") start_time = time.time() # Receive a chunk. (chunk_number, sender) = self.process_next_message() self.prev_received_chunk = chunk_number # <----- self.delta = chunk_number while (chunk_number < 0): (chunk_number, sender) = self.process_next_message() if self.player_connected == False: break # The first chunk to play is the firstly received chunk (which # probably will not be the received chunk with the smallest # index). self.chunk_to_play = chunk_number self.lg.debug( f"{self.ext_id}: position in the buffer of the first chunk to play={self.chunk_to_play}" ) while (chunk_number < self.chunk_to_play) or (( (chunk_number - self.chunk_to_play) % self.buffer_size) < (self.buffer_size // 2)): (chunk_number, _) = self.process_next_message() if self.player_connected == False: break while (chunk_number < self.chunk_to_play): (chunk_number, _) = self.process_next_message() if self.player_connected == False: break self.prev_received_chunk = chunk_number if __debug__: buffering_time = time.time() - start_time self.lg.debug(f"{self.ext_id}: buffering time={buffering_time}") stderr.write(f" {buffering_time:.2f}")
def process_request(self, chunk_number, sender): stderr.write( f" {colorama.Back.CYAN}{colorama.Fore.BLACK}{chunk_number}{colorama.Style.RESET_ALL}" ) #stderr.write(f" {colorama.Fore.CYAN}{chunk_number}{colorama.Style.RESET_ALL}") self.lg.debug( f"{self.ext_id}: received [request {chunk_number}] from {sender}") #stderr.write(f" R{self.ext_id}/{chunk_number}/{sender}") position = chunk_number % self.buffer_size buffer_box = self.buffer[position] if buffer_box[ChunkStructure.CHUNK_DATA] != b'L': origin = buffer_box[ChunkStructure.ORIGIN_ADDR], buffer_box[ ChunkStructure.ORIGIN_PORT] if origin != sender: self.update_forward(origin, sender) else: self.lg.debug( f"{self.ext_id}: update_forward: origin {origin} is the sender of the request" ) else: # I haven't the chunk pass '''
def run(self): self.lg.debug("simulator: platform.system()={}".format( platform.system())) # if __debug__: # if platform.system() == 'Linux': # plt.switch_backend("TkAgg") # elif platform.system() == 'Darwin': # plt.switch_backend("macosx") # plt.style.use("seaborn-white") # Removing temporal socket files for pattern in ['*_udp', '*_tcp']: for tmp_file in glob(pattern): os.remove(tmp_file) # Listen to the team for drawing sim.FEEDBACK["DRAW"] = Queue() Process(target=self.store).start() if self.gui is True: Process(target=self.draw).start() # Listen to the team for simulation life sim.FEEDBACK["STATUS"] = Queue() # Create shared list for CIS set of rules (only when cis is choosen?) manager = Manager() sim.SHARED_LIST["faulty"] = manager.list() sim.SHARED_LIST["regular"] = manager.list() sim.SHARED_LIST["attacked"] = manager.list() # Automatic bitrate control only for CIS-SSS sim.RECV_LIST = manager.dict() # sim.LOCK = Semaphore() # Share splitter (ip address,port) with peers self.splitter_id = manager.dict() # Run splitter p = Process(target=self.run_a_splitter, args=[self.splitter_id]) p.start() self.processes["S"] = p.pid self.attended_monitors = 0 self.attended_peers = 0 self.attended_faulty_peers = 0 time.sleep(1) # run a monitor p = Process(target=self.run_a_peer, args=[ self.splitter_id['address'], "monitor", "M" + str(self.attended_monitors + 0), True ]) p.start() self.processes["M" + str(self.attended_monitors + 1)] = p.pid self.attended_monitors += 1 queue = sim.FEEDBACK["STATUS"] m = queue.get() while m[0] != "Bye" and self.current_round < self.number_of_rounds: if m[0] == "R": self.current_round = m[1] r = np.random.uniform(0, 1) if (r <= Simulator.P_in) and (self.current_round < self.number_of_rounds): self.addPeer() m = queue.get() sim.FEEDBACK["DRAW"].put(("Bye", "Bye")) sim.FEEDBACK["STATUS"].put(("Bye", "Bye")) for name, pid in self.processes.items(): self.lg.debug("Killing {}, ...".format(name)) os.system("kill -9 " + str(pid)) self.lg.debug("{} killed".format(name)) stderr.write("\n")
def __init__( self, drawing_log="/tmp/drawing_log.txt", set_of_rules="DBS2", number_of_monitors=1, number_of_peers=7, # Monitor apart number_of_rounds=100, number_of_faulty=0, buffer_size=32, chunk_cadence=0.01, min_activity=-5, # rounds max_chunk_loss=16, speed=1000.0, seed=None, horizon=0, #buffer_size - number_of_peers*4, optimal_neighborhood_degree=3, gui=False): #logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') logging.basicConfig( stream=sys.stdout, format= "%(asctime)s.%(msecs)03d %(message)s %(levelname)-8s %(name)s %(pathname)s:%(lineno)d", datefmt="%H:%M:%S") self.lg = logging.getLogger(__name__) if __debug__: self.lg.setLevel(logging.DEBUG) else: self.lg.setLevel(logging.ERROR) self.set_of_rules = set_of_rules self.number_of_peers = int(number_of_peers) self.number_of_monitors = int(number_of_monitors) self.drawing_log = drawing_log self.number_of_rounds = int(number_of_rounds) self.number_of_faulty = number_of_faulty self.buffer_size = int(buffer_size) self.min_activity = int(min_activity) self.max_chunk_loss = float(max_chunk_loss) self.current_round = 0 self.speed = float(speed) self.seed = seed self.horizon = horizon self.optimal_neighborhood_degree = optimal_neighborhood_degree self.gui = gui self.processes = {} stderr.write(f"Parameters:\n") self.lg.debug(f"set_of_rules=\"{self.set_of_rules}\"") stderr.write(f"| set_of_rules=\"{self.set_of_rules}\"\n") self.lg.debug(f"number_of_peers={self.number_of_peers}") stderr.write(f"| number_of_peers={self.number_of_peers}\n") self.lg.debug(f"number_of_monitors={self.number_of_monitors}") stderr.write(f"| number_of_monitors={self.number_of_monitors}\n") self.lg.debug(f"number_of_rounds={self.number_of_rounds}") stderr.write(f"| number_of_rounds={self.number_of_rounds}\n") self.lg.debug(f"number_of_faulty={self.number_of_faulty}") stderr.write(f"| number_of_faulty={self.number_of_faulty}\n") self.lg.debug(f"buffer_size={self.buffer_size}") stderr.write(f"| buffer_size={self.buffer_size}\n") self.lg.debug(f"min_activity={self.min_activity}") stderr.write(f"| min_activity={self.min_activity}\n") self.lg.debug(f"max_chunk_loss={self.max_chunk_loss}") stderr.write(f"| max_chunk_loss={self.max_chunk_loss}\n") self.lg.debug(f"speed={self.speed}") stderr.write(f"| speed={self.speed}\n") self.lg.debug(f"seed={self.seed}") stderr.write(f"| seed={self.seed}\n") self.lg.debug(f"horizon={self.horizon}") stderr.write(f"| horizon={self.horizon}\n") self.lg.debug( f"optimal_neighborhood_degree={self.optimal_neighborhood_degree}") stderr.write( f"| optimal_neighborhood_degree={self.optimal_neighborhood_degree}\n" ) stderr.write("\n") stderr.write(f"Output synopsis:\n") stderr.write(f"| CPU_usage\n") stderr.write(f"| Buffering.time\n") stderr.write( f"| {colorama.Fore.GREEN}Chunk number{colorama.Style.RESET_ALL}\n") stderr.write( f"| {colorama.Fore.MAGENTA}Team size{colorama.Style.RESET_ALL}\n") stderr.write( f"| {colorama.Fore.YELLOW}Round{colorama.Style.RESET_ALL}\n") stderr.write( f"| {colorama.Fore.RED}Peer/lost chunk{colorama.Style.RESET_ALL}\n" ) stderr.write( f"| {colorama.Fore.BLUE}Deleted peer{colorama.Style.RESET_ALL}\n") if __debug__: stderr.write( f"| {colorama.Back.RED}{colorama.Fore.BLACK}Max hops{colorama.Style.RESET_ALL}\n" ) stderr.write( f"| {colorama.Fore.CYAN}Requested chunk{colorama.Style.RESET_ALL}\n" ) #stderr.write(f"| {colorama.Fore.CYAN}Sender/Requested chunk/Receiver{colorama.Back.CYAN} {colorama.Fore.BLACK}Requested chunk/requesting peer{colorama.Style.RESET_ALL}\n") stderr.write( f"| {colorama.Back.CYAN}{colorama.Fore.BLACK}Prunned origin{colorama.Style.RESET_ALL}\n" ) stderr.write("\n") np.random.seed(self.seed)
def __init__( self, drawing_log="/tmp/drawing_log.txt", set_of_rules="DBS2", number_of_monitors=1, number_of_peers=7, # Monitor apart number_of_rounds=100, number_of_faulty=0, buffer_size=32, chunk_cadence=0.01, max_chunk_loss_at_peers=10, # chunks/secon max_chunk_loss_at_splitter=16, speed=1000.0, gui=False): #logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') logging.basicConfig( stream=sys.stdout, format= "%(asctime)s.%(msecs)03d %(message)s %(levelname)-8s %(name)s %(pathname)s:%(lineno)d", datefmt="%H:%M:%S") self.lg = logging.getLogger(__name__) if __debug__: self.lg.setLevel(logging.DEBUG) else: self.lg.setLevel(logging.ERROR) self.set_of_rules = set_of_rules self.number_of_peers = int(number_of_peers) self.number_of_monitors = int(number_of_monitors) self.drawing_log = drawing_log self.number_of_rounds = int(number_of_rounds) self.number_of_faulty = number_of_faulty self.buffer_size = int(buffer_size) self.max_chunk_loss_at_peers = int(max_chunk_loss_at_peers) self.max_chunk_loss_at_splitter = float(max_chunk_loss_at_splitter) self.current_round = 0 self.speed = float(speed) self.gui = gui self.processes = {} self.lg.debug(f"set_of_rules=\"{self.set_of_rules}\"") stderr.write(f"set_of_rules=\"{self.set_of_rules}\"\n") self.lg.debug(f"number_of_peers={self.number_of_peers}") stderr.write(f"number_of_peers={self.number_of_peers}\n") self.lg.debug(f"number_of_monitors={self.number_of_monitors}") stderr.write(f"number_of_monitors={self.number_of_monitors}\n") self.lg.debug(f"number_of_rounds={self.number_of_rounds}") stderr.write(f"number_of_rounds={self.number_of_rounds}\n") self.lg.debug(f"number_of_faulty={self.number_of_faulty}") stderr.write(f"number_of_faulty={self.number_of_faulty}\n") self.lg.debug(f"buffer_size={self.buffer_size}") stderr.write(f"buffer_size={self.buffer_size}\n") self.lg.debug( f"max_chunk_loss_at_peers={self.max_chunk_loss_at_peers}") stderr.write( f"simulator: max_chunk_loss_at_peers={self.max_chunk_loss_at_peers}\n" ) self.lg.debug( f"max_chunk_loss_at_splitter={self.max_chunk_loss_at_splitter}") stderr.write( f"max_chunk_loss_at_splitter={self.max_chunk_loss_at_splitter}\n") self.lg.debug(f"speed={self.speed}") stderr.write(f"speed={self.speed}\n") stderr.write(f"CPU usage\n") stderr.write( f"{colorama.Fore.MAGENTA}Team size{colorama.Style.RESET_ALL}\n") stderr.write( f"{colorama.Fore.RED}Lost chunk{colorama.Style.RESET_ALL}\n") stderr.write( f"{colorama.Fore.RED}(Unsupportive peer){colorama.Style.RESET_ALL}\n" ) stderr.write( f"{colorama.Fore.BLUE}Deleted peer{colorama.Style.RESET_ALL}\n") stderr.write( f"{colorama.Fore.YELLOW}Round{colorama.Style.RESET_ALL}\n") if __debug__: stderr.write( f"{colorama.Back.RED}{colorama.Fore.BLACK}Max hops{colorama.Style.RESET_ALL}\n" ) stderr.write( f"{colorama.Back.CYAN}{colorama.Fore.BLACK}Requested chunk{colorama.Style.RESET_ALL}\n" ) stderr.write( f"{colorama.Fore.CYAN}Prunned chunk{colorama.Style.RESET_ALL}\n")
def del_peer(self, peer_index): del self.team[peer_index] stderr.write( f" {colorama.Fore.BLUE}{peer_index}({len(self.team)}){colorama.Style.RESET_ALL}" )
def process_prune(self, chunk_number, sender): stderr.write( f" {colorama.Fore.CYAN}{chunk_number}{colorama.Style.RESET_ALL}") self.lg.debug( f"{self.ext_id}: received [prune {chunk_number}] from {sender}") def remove_sender(origin, sender): assert sender in self.forward[ origin], f"{self.ext_id}: {sender} is not in self.forward[{origin}]={self.forward[origin]}" self.forward[origin].remove(sender) self.lg.debug( f"{self.ext_id}: process_prune: sender={sender} has been removed from forward[{origin}]={self.forward[origin]}" ) ''' try: self.forward[origin].remove(sender) self.lg.debug(f"{self.ext_id}: process_prune: sender={sender} has been removed from forward[{origin}]={self.forward[origin]}") except ValueError: self.lg.error(f"{self.ext_id}: process_prune: failed to remove peer {sender} from forward={self.forward[origin]} for origin={origin} ") if len(self.forward[origin])==0: del self.forward[origin] if __debug__: if origin in self.forward: self.lg.debug(f"{self.ext_id}: process_prune: origin {origin} is still in forward[{origin}]={self.forward[origin]}") else: self.lg.debug(f"{self.ext_id}: process_prune: origin={origin} removed from forward={self.forward}") if __debug__: if origin == self.public_endpoint: try: self.lg.debug(f"{self.ext_id}: process_prune: sender={sender} removed from the primary forwarding table (public_endpoint == origin={origin}) now with length {len(self.forward[self.public_endpoint])}") except KeyError: pass ''' position = chunk_number % self.buffer_size buffer_box = self.buffer[position] # Notice that chunk "chunk_number" should be stored in the # buffer because it has been sent to the neighbor that is # requesting the prune. # Only complete prunning if I have the origin of the pruned chunk. if buffer_box[ChunkStructure.CHUNK_NUMBER] == chunk_number: origin = buffer_box[ChunkStructure.ORIGIN_ADDR], buffer_box[ ChunkStructure.ORIGIN_PORT] self.lg.debug( f"{self.ext_id}: process_prune: [prune {chunk_number}] received from {sender} for pruning origin={origin}" ) if origin in self.forward: self.lg.debug( f"{self.ext_id}: process_prune: origin={origin} is in forward" ) if sender in self.forward[origin]: self.lg.debug( f"{self.ext_id}: process_prune: sender={sender} is in forward[{origin}]" ) remove_sender(origin, sender) else: self.lg.debug( f"{self.ext_id}: process_prune: sender={sender} is not in forward[{origin}]={self.forward[origin]}" ) else: self.lg.debug( f"{self.ext_id}: process_prune: origin={origin} is not in forward={self.forward}" ) else: self.lg.debug( f"{self.ext_id}: process_prune: chunk_number={chunk_number} is not in buffer ({self.buffer[position][ChunkStructure.CHUNK_NUMBER]}!={chunk_number})" )
def on_chunk_received_from_the_splitter(self, chunk): chunk_number = chunk[ChunkStructure.CHUNK_NUMBER] origin = chunk[ChunkStructure.ORIGIN_ADDR], chunk[ ChunkStructure.ORIGIN_PORT] self.lg.debug( f"{self.ext_id}: processing chunk {chunk_number} with origin {origin} received from the splitter" ) self.buffer_chunk(chunk) # Remove selfish neighbors. for _origin in list(self.activity): if self.activity[_origin] < -5: del self.activity[_origin] for neighbors in self.forward.values(): if _origin in neighbors: neighbors.remove(_origin) # Increase inactivity for origin in self.activity.keys(): self.activity[origin] -= 1 # Can produce network congestion! #for neighbor in self.pending: # self.send_chunks(neighbor) # Remove empty forwarding tables. for _origin in list(self.forward): if origin != self.public_endpoint: if len(self.forward[_origin]) == 0: del self.forward[_origin] if __debug__: self.rounds_counter += 1 for origin, neighbors in self.forward.items(): buf = '' #for i in neighbors: # buf += str(i) buf = len(neighbors) * "#" self.lg.debug( f"{self.ext_id}: round={self.rounds_counter:03} origin={origin} K={len(neighbors):02} fan-out={buf:10}" ) try: CLR = self.number_of_lost_chunks / ( chunk_number - self.prev_chunk_number_round) self.lg.debug( f"{self.ext_id}: CLR={CLR:1.3} losses={self.number_of_lost_chunks} chunk_number={chunk_number} increment={chunk_number - self.prev_chunk_number_round}" ) except ZeroDivisionError: pass self.prev_chunk_number_round = chunk_number self.number_of_lost_chunks = 0 max = 0 for i in self.buffer: if i[ChunkStructure.CHUNK_DATA] != b'L': hops = i[ChunkStructure.HOPS] if hops > max: max = hops stderr.write( f" {colorama.Back.RED}{colorama.Fore.BLACK}{max}{colorama.Style.RESET_ALL}" )
def play_chunk(self, chunk_number): buffer_box = self.buffer[chunk_number % self.buffer_size] self.lg.debug( f"{self.ext_id}: chunk={chunk_number} hops={buffer_box[ChunkStructure.HOPS]}" ) if buffer_box[ChunkStructure.CHUNK_DATA] != b'L': # Only the data will be empty in order to remember things ... self.buffer[chunk_number % self.buffer_size] = self.clear_entry_in_buffer( buffer_box) self.played += 1 else: # The cell in the buffer is empty. self.complain(chunk_number) # Only monitors #self.complain(self.buffer[chunk_position][ChunkStructure.CHUNK_NUMBER]) # If I'm a monitor self.number_of_lost_chunks += 1 self.lg.debug( f"{self.ext_id}: lost chunk! {self.chunk_to_play} (number_of_lost_chunks={self.number_of_lost_chunks})" ) # The chunk "chunk_number" has not been received on time # and it is quite probable that is not going to change # this in the near future. The action here is to request # the lost chunk to one or more peers using a [request # <chunk_number>]. If after this, I start receiving # duplicate chunks, then a [prune <chunk_number>] should # be sent to those peers which send duplicates. # Request the chunk to the origin peer of the last received chunk. #i = self.prev_received_chunk #destination = self.buffer[i % self.buffer_size][ChunkStructure.ORIGIN] # while destination == None: # i += 1 # destination = self.buffer[i % self.buffer_size][ChunkStructure.ORIGIN] #self.request_chunk(chunk_number, destination) # And remove the peer in forward with higher debt. #print("{}: ------------> {}".format(self.ext_id, self.debt)) # try: # remove = max(self.debt, key=self.debt.get) # except ValueError: # remove = self.neighbor # self.process_goodbye(remove) # We send the request to the neighbor that we have served. #self.request_chunk(chunk_number, self.neighbor) #if self.ext_id[0] == '000': #stderr.write(f" {self.team}") if len(self.team) > 1: peer = random.choice(self.team) self.request_chunk(chunk_number, peer) #stderr.write(f" ->{peer}") if peer == self.ext_id[1]: stderr.write( f" ------------------------->hola!!!<---------------------" ) # Send the request to all neighbors. # for neighbor in self.forward[self.id]: # self.request_chunk(chunk_number, neighbor) # Send the request to all the team. # for peer in self.team: # self.request_chunk(chunk_number, peer) # As an alternative, to selected peer to send to it the # request, we run the buffer towards increasing positions # looking for a chunk whose origin peer is also a # neighbor. Doing that, we will found a neighbor that sent # its chunk to us a long time ago. # Here, self.neighbor has been selected by # simplicity. However, other alternatives such as # requesting the lost chunk to the neighbor with smaller # debt could also be explored. # try: # self.request_chunk(chunk_number, min(self.debt, key=self.debt.get)) # except ValueError: # self.lg.debug("{}: debt={}".format(self.ext_id, self.debt)) # if self.neighbor is not None: # Este if no deberÃa existir # self.request_chunk(chunk_number, self.neighbor) self.number_of_chunks_consumed += 1 if __debug__: #stderr.write(f" {len(self.forward)}") buf = "" for i in self.buffer: if i[ChunkStructure.CHUNK_DATA] != b'L': try: _origin = list(self.team).index( (i[ChunkStructure.ORIGIN_ADDR], i[ChunkStructure.ORIGIN_PORT])) buf += hash(_origin) except ValueError: buf += '-' # Does not exist in their forwarding table. else: buf += " " self.lg.debug(f"{self.ext_id}: buffer={buf}")