def test_02_export_channels(self): Import_Export_Test.exported_data = get_data() monitor_channel = Channel(Import_Export_Test.exported_data["monitor"]) test_category = Category("test") test_category.add("monitor",monitor_channel) Import_Export_Test.exported_data["monitor1"] = get_data()["monitor"] test_category.add("monitor1",monitor_channel) exporter = JSON_Exporter() exporter.to_JSON(path,test_category.get_channels(),Channel_Encoder) print("\n") _print_("exporting channels to json file = " + path + "\n" +json.dumps(Import_Export_Test.exported_data ,indent = 4 , cls = Channel_Encoder)) size = file_util.file_size(path) self.assertNotEqual(0,size) print("----------------------------------------------------------------------")
def receive_the_list_of_peers(self): # {{{ self.debt = {} # Chunks debts per peer. self.peer_list = [] # The list of peers structure. #sys.stdout.write(Color.green) _p_("Requesting", self.number_of_peers, "peers to", self.splitter_socket.getpeername()) #number_of_peers = socket.ntohs(struct.unpack("H",self.splitter_socket.recv(struct.calcsize("H")))[0]) #_print_("The size of the team is", number_of_peers, "(apart from me)") tmp = self.number_of_peers while tmp > 0: message = self.splitter_socket.recv(struct.calcsize("4sH")) IP_addr, port = struct.unpack("4sH", message) # Ojo, !H ???? IP_addr = socket.inet_ntoa(IP_addr) port = socket.ntohs(port) peer = (IP_addr, port) _p_("[hello] sent to", peer) self.say_hello(peer) if __debug__: _p_("[%5d]" % tmp, peer) else: _print_("{:.2%}\r".format((self.number_of_peers-tmp)/self.number_of_peers), end='') self.peer_list.append(peer) self.debt[peer] = 0 tmp -= 1 _p_("List of peers received")
def receive_the_list_of_peers(self): # {{{ self.debt = {} # Chunks debts per peer. self.peer_list = [] # The list of peers structure. #sys.stdout.write(Color.green) _p_("Requesting", self.number_of_peers, "peers to", self.splitter_socket.getpeername()) #number_of_peers = socket.ntohs(struct.unpack("H",self.splitter_socket.recv(struct.calcsize("H")))[0]) #_print_("The size of the team is", number_of_peers, "(apart from me)") tmp = self.number_of_peers while tmp > 0: message = self.splitter_socket.recv(struct.calcsize("4sH")) IP_addr, port = struct.unpack("4sH", message) # Ojo, !H ???? IP_addr = socket.inet_ntoa(IP_addr) port = socket.ntohs(port) peer = (IP_addr, port) _p_("[hello] sent to", peer) self.say_hello(peer) if __debug__: _p_("[%5d]" % tmp, peer) else: _print_("{:.2%}\r".format( (self.number_of_peers - tmp) / self.number_of_peers), end='') self.peer_list.append(peer) self.debt[peer] = 0 tmp -= 1 _p_("List of peers received")
def test_01_delete_channels_data(self): print("\n") _print_("deleting existing sample_data...") file_util.file_del(path) size = file_util.file_size(path) self.assertEqual(0,size) print("----------------------------------------------------------------------")
def receive_dsa_key(self): message = self.splitter_socket.recv(struct.calcsize("256s256s256s40s")) y, g, p, q = struct.unpack("256s256s256s40s", message) y = self.convert_to_long(y) g = self.convert_to_long(g) p = self.convert_to_long(p) q = self.convert_to_long(q) self.dsa_key = DSA.construct((y, g, p, q)) _print_("DSA key received")
def disconnect_from_the_splitter(self): # {{{ try: self.try_to_disconnect_from_the_splitter() except: traceback.print_exc() _print_(Common.NTS_COLOR + "NTS: Probably the splitter removed this peer due to timeout\n" + Color.none) self.player_alive = False sys.exit(1)
def disconnect_from_the_splitter(self): # {{{ try: self.try_to_disconnect_from_the_splitter() except: traceback.print_exc() _print_( Common.NTS_COLOR + "NTS: Probably the splitter removed this peer due to timeout\n" + Color.none) self.player_alive = False sys.exit(1)
def test_03_import_channels(self): importer = JSON_Importer() Import_Export_Test.imported_data = importer.from_JSON(path) print("\n") _print_("importing channels from json file = " + path + "\n" +json.dumps(Import_Export_Test.imported_data,indent = 4)) self.assertEqual(Import_Export_Test.exported_data ,Import_Export_Test.imported_data) print("----------------------------------------------------------------------")
def connect_to_the_splitter(self): # {{{ Setup "splitter" and "splitter_socket" # Nota: Ahora no reconvertimos de TCP a UDP! self.splitter_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.splitter = (self.SPLITTER_ADDR, self.SPLITTER_PORT) _p_("use_localhost =", self.USE_LOCALHOST) if self.USE_LOCALHOST: my_ip = '0.0.0.0' # Or '127.0.0.1' #my_ip = '127.0.0.1' else: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(self.splitter) #my_ip = socket.gethostbyname(socket.gethostname()) my_ip = s.getsockname()[0] s.close() _p_("Connecting to the splitter at", self.splitter, "from", my_ip) if self.PORT != 0: try: self.splitter_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) except Exception as e: _print_(e) pass #sys.stdout.write(Color.purple) _p_("I'm using port the port", self.PORT) #sys.stdout.write(Color.none) self.splitter_socket.bind((my_ip, self.PORT)) else: self.splitter_socket.bind((my_ip, 0)) try: self.splitter_socket.connect(self.splitter) except Exception as e: if __debug__: _p_(e) else: _print_(e) #sys.stdout.write(Color.red) #sys.exit("Sorry. Can't connect to the splitter at " + str(self.splitter)) #sys.stdout.write(Color.none) sys.exit() _p_("Connected to the splitter at", self.splitter)
def process_bad_message(self, message, sender): _print_("bad peer: " + str(sender)) self.bad_peers.append(sender) self.peer_list.remove(sender)
def _p_(*args, **kwargs): """Colorize the output.""" sys.stdout.write(Common.DIS) _print_("DIS (STRPEDS):", *args) sys.stdout.write(Color.none)
def print_the_module_name(self): # {{{ sys.stdout.write(Color.yellow) _print_("Lossy Peer") sys.stdout.write(Color.none)
def _p_(*args, **kwargs): """Colorize the output.""" sys.stdout.write(Common.DBS) _print_("DBS (malicious):", *args) sys.stdout.write(Color.none)
def process_message(self, message, sender): # {{{ Handle NTS messages; pass other messages to base class if sender == self.splitter and \ len(message) == Common.PEER_ID_LENGTH + struct.calcsize("4sHHH"): # say [hello to (X)] received from splitter peer_id = message[:Common.PEER_ID_LENGTH].decode() IP_addr, source_port_to_splitter, port_diff, peer_number = \ struct.unpack("4sHHH", message[Common.PEER_ID_LENGTH:]) IP_addr = socket.inet_ntoa(IP_addr) source_port_to_splitter = socket.ntohs(source_port_to_splitter) port_diff = socket.ntohs(port_diff) peer_number = socket.ntohs(peer_number) peer = (IP_addr, source_port_to_splitter) # Endpoint to splitter _p_("Received [send hello to %s %s]" % (peer_id, peer)) _p_("port_diff = %s" % port_diff) _p_("peer_number = %s" % peer_number) # Here the port prediction happens: additional_ports = \ self.get_probable_source_ports(source_port_to_splitter, port_diff, peer_number) self.say_hello(peer, additional_ports) # Directly start packet sending self.hello_messages_event.set() elif sender == self.splitter and \ len(message) == Common.PEER_ID_LENGTH + struct.calcsize("4sHHHH"): # say [hello to (X)] received from splitter peer_id = message[:Common.PEER_ID_LENGTH].decode() IP_addr, source_port_to_splitter, port_diff, peer_number, \ extra_splitter_port = struct.unpack( \ "4sHHHH", message[Common.PEER_ID_LENGTH:]) # Ojo, !H ???? IP_addr = socket.inet_ntoa(IP_addr) source_port_to_splitter = socket.ntohs(source_port_to_splitter) port_diff = socket.ntohs(port_diff) peer_number = socket.ntohs(peer_number) extra_splitter_port = socket.ntohs(extra_splitter_port) peer = (IP_addr, source_port_to_splitter) # Endpoint to splitter _p_("Received [send hello to %s %s]" % (peer_id, peer)) # Here the port prediction happens: additional_ports = \ self.get_probable_source_ports(source_port_to_splitter, port_diff, peer_number) self.say_hello(peer, additional_ports) # Send to extra splitter port to determine currently allocated # source port self.say_hello((self.splitter[0], extra_splitter_port)) # Directly start packet sending self.hello_messages_event.set() elif message == self.peer_id.encode() or (sender == self.splitter and \ len(message) == Common.PEER_ID_LENGTH + struct.calcsize("H")) or \ (sender == self.splitter and \ len(message) == Common.PEER_ID_LENGTH+1 + struct.calcsize("H")) or \ len(message) == Common.PEER_ID_LENGTH+1: # All sent message sizes # Acknowledge received; stop sending the message with self.hello_messages_lock: for hello_data in self.hello_messages: if message == hello_data[0] \ and sender[0] == hello_data[1][0] \ and sender[1] in self.hello_messages_ports[hello_data]: _p_("Received acknowledge from %s" % (sender,)) self.hello_messages.remove(hello_data) del self.hello_messages_times[hello_data] del self.hello_messages_ports[hello_data] # No chunk number, as no chunk was received return -1 _print_(Common.NTS_COLOR + "NTS: Received acknowledge from unknown host %s" % (sender,) + Color.none) elif len(message) == Common.PEER_ID_LENGTH: peer_id = message.decode() _p_("Received [hello (ID %s)] from %s" % (message, sender)) # Send acknowledge self.team_socket.sendto(message, sender) if sender not in self.peer_list: _p_("Appending peer %s %s to list" % (peer_id, sender)) self.peer_list.append(sender) self.debt[sender] = 0 # Send source port information to splitter message += struct.pack("H", socket.htons(sender[1])) message_data = (message, self.splitter) self.send_message(message_data) if peer_id in self.initial_peer_list: self.initial_peer_list.remove(peer_id) elif message == b'H': _p_("Received [DBS hello] from %s" % str(sender)) # Ignore hello messages that are sent by Peer_DBS instances in # receive_the_list_of_peers() before a Peer_NTS instance is created pass elif sender != self.splitter and sender not in self.peer_list: _p_("Ignoring message of length %d from unknown %s" \ % (len(message), sender)) elif len(self.initial_peer_list) == 0: # Start receiving chunks when fully incorporated return Peer_DBS.process_message(self, message, sender) # No chunk number, as no chunk was received return -1
def _p_(*args, **kwargs): """Colorize the output.""" if __debug__: sys.stdout.write(Common.DBS_COLOR) _print_("DBS:", *args) sys.stdout.write(Color.none)
def buffer_data(self): # {{{ Buffering # {{{ The peer dies if the player disconnects. # }}} self.player_alive = True # {{{ The last chunk sent to the player. # }}} self.played_chunk = 0 # {{{ Counts the number of executions of the recvfrom() # function. # }}} self.recvfrom_counter = 0 # {{{ Label the chunks in the buffer as "received" or "not # received". # }}} self.received_flag = [] # {{{ The buffer of chunks is a structure that is used to delay # the playback of the chunks in order to accommodate the # network jittter. Two components are needed: (1) the "chunks" # buffer that stores the received chunks and (2) the # "received" buffer that stores if a chunk has been received # or not. Notice that each peer can use a different # buffer_size: the smaller the buffer size, the lower start-up # time, the higher chunk-loss ratio. However, for the sake of # simpliticy, all peers will use the same buffer size. self.chunks = [""] * self.buffer_size self.received_flag = [False] * self.buffer_size self.received_counter = 0 # }}} # Wall time (execution time plus waiting time). start_time = time.time() # We will send a chunk to the player when a new chunk is # received. Besides, those slots in the buffer that have not been # filled by a new chunk will not be send to the player. Moreover, # chunks can be delayed an unknown time. This means that (due to the # jitter) after chunk X, the chunk X+Y can be received (instead of the # chunk X+1). Alike, the chunk X-Y could follow the chunk X. Because # we implement the buffer as a circular queue, in order to minimize # the probability of a delayed chunk overwrites a new chunk that is # waiting for traveling the player, we wil fill only the half of the # circular queue. _p_(self.team_socket.getsockname(), "\b: buffering = 000.00%") sys.stdout.flush() # First chunk to be sent to the player. The # process_next_message() procedure returns the chunk number if # a packet has been received or -2 if a time-out exception has # been arised. chunk_number = self.process_next_message() while chunk_number < 0: chunk_number = self.process_next_message() _p_(chunk_number) self.played_chunk = chunk_number _p_("First chunk to play", self.played_chunk) _p_(self.team_socket.getsockname(), "\b: buffering (\b", repr(100.0 / self.buffer_size).rjust(4)) # Now, fill up to the half of the buffer. for x in range(int(self.buffer_size / 2)): _print_("{:.2%}\r".format((1.0 * x) / (self.buffer_size / 2)), end='') BUFFER_STATUS = (100 * x) / (self.buffer_size / 2) + 1 #if Common.CONSOLE_MODE == False : # GObject.idle_add(buffering_adapter.update_widget,BUFFER_STATUS) #else: # pass #print("!", end='') sys.stdout.flush() while self.process_next_message() < 0: pass print() latency = time.time() - start_time _p_('latency =', latency, 'seconds') _p_("buffering done.") sys.stdout.flush()
def dbs_process_message(self, message, sender): # {{{ Now, receive and send. if len(message) == struct.calcsize(self.message_format): # {{{ A video chunk has been received chunk_number, chunk = self.unpack_message(message) self.chunks[chunk_number % self.buffer_size] = chunk self.received_flag[chunk_number % self.buffer_size] = True self.received_counter += 1 if sender == self.splitter: # {{{ Send the previous chunk in burst sending # mode if the chunk has not been sent to all # the peers of the list of peers. # {{{ debug if __debug__: _print_("DBS:", self.team_socket.getsockname(), \ Color.red, "<-", Color.none, chunk_number, "-", sender) # }}} while( (self.receive_and_feed_counter < len(self.peer_list)) and (self.receive_and_feed_counter > 0) ): peer = self.peer_list[self.receive_and_feed_counter] self.send_chunk(peer) # {{{ debug if __debug__: print ("DBS:", self.team_socket.getsockname(), "-",\ socket.ntohs(struct.unpack(self.message_format, \ self.receive_and_feed_previous)[0]),\ Color.green, "->", Color.none, peer) # }}} self.debt[peer] += 1 if self.debt[peer] > self.MAX_CHUNK_DEBT: print (Color.red, "DBS:", peer, 'removed by unsupportive (' + str(self.debt[peer]) + ' lossess)', Color.none) del self.debt[peer] self.peer_list.remove(peer) self.receive_and_feed_counter += 1 self.receive_and_feed_counter = 0 self.receive_and_feed_previous = message # }}} else: # {{{ The sender is a peer # {{{ debug if __debug__: print ("DBS:", self.team_socket.getsockname(), \ Color.green, "<-", Color.none, chunk_number, "-", sender) # }}} if sender not in self.peer_list: # The peer is new self.peer_list.append(sender) self.debt[sender] = 0 print (Color.green, "DBS:", sender, 'added by chunk', \ chunk_number, Color.none) else: self.debt[sender] -= 1 # }}} # {{{ A new chunk has arrived and the # previous must be forwarded to next peer of the # list of peers. if ( self.receive_and_feed_counter < len(self.peer_list) and ( self.receive_and_feed_previous != '') ): # {{{ Send the previous chunk in congestion avoiding mode. peer = self.peer_list[self.receive_and_feed_counter] self.send_chunk(peer) self.debt[peer] += 1 if self.debt[peer] > self.MAX_CHUNK_DEBT: print (Color.red, "DBS:", peer, 'removed by unsupportive (' + str(self.debt[peer]) + ' lossess)', Color.none) del self.debt[peer] self.peer_list.remove(peer) # {{{ debug if __debug__: print ("DBS:", self.team_socket.getsockname(), "-", \ socket.ntohs(struct.unpack(self.message_format, self.receive_and_feed_previous)[0]),\ Color.green, "->", Color.none, peer) # }}} self.receive_and_feed_counter += 1 # }}} # }}} return chunk_number # }}} else: # {{{ A control chunk has been received print("DBS: Control received") if message == 'H': if sender not in self.peer_list: # The peer is new self.peer_list.append(sender) self.debt[sender] = 0 print (Color.green, "DBS:", sender, 'added by [hello]', Color.none) else: if sender in self.peer_list: sys.stdout.write(Color.red) print ("DBS:", self.team_socket.getsockname(), '\b: received "goodbye" from', sender) sys.stdout.write(Color.none) self.peer_list.remove(sender) del self.debt[sender] return -1
def _p_(*args, **kwargs): """Colorize the output.""" sys.stdout.write(common.FLS_COLOR) _print_("ACS:", *args) sys.stdout.write(Color.none)
def __init__(self, peer): # {{{ sys.stdout.write(Color.yellow) _print_("Peer FNS") sys.stdout.write(Color.none)
def send_magic_flags(self, peer_serve_socket): message = struct.pack("B", self.magic_flags) peer_serve_socket.sendall(message) _print_("Magic flags =",bin(self.magic_flags))
def __init__(self): try: colorama.init() except Exception: pass _print_("Running in", end=' ') if __debug__: print("debug mode") else: print("release mode") # {{{ Args handling and object instantiation parser = argparse.ArgumentParser( description='This is the peer node of a P2PSP team.') parser.add_argument('--enable_chunk_loss', help='Forces a lost of chunks') parser.add_argument( '--max_chunk_debt', help= 'The maximun number of times that other peer can not send a chunk to this peer. Defaut = {}' .format(Peer_DBS.MAX_CHUNK_DEBT)) parser.add_argument( '--player_port', help='Port to communicate with the player. Default = {}'.format( Peer_IMS.PLAYER_PORT)) parser.add_argument( '--port_step', help= 'Source port step forced when behind a sequentially port allocating NAT (conflicts with --chunk_loss_period). Default = {}' .format(Symsp_Peer.PORT_STEP)) parser.add_argument( '--splitter_addr', help='IP address or hostname of the splitter. Default = {}.'. format(Peer_IMS.SPLITTER_ADDR)) parser.add_argument( '--splitter_port', help='Listening port of the splitter. Default = {}.'.format( Peer_IMS.SPLITTER_PORT)) parser.add_argument( '--port', help= 'Port to communicate with the peers. Default {} (the OS will chose it).' .format(Peer_IMS.PORT)) parser.add_argument( '--use_localhost', action="store_true", help= 'Forces the peer to use localhost instead of the IP of the adapter to connect to the splitter. Notice that in this case, peers that run outside of the host will not be able to communicate with this peer.' ) parser.add_argument('--malicious', action="store_true", help='Enables the malicious activity for peer.') parser.add_argument( '--persistent', action="store_true", help='Forces the peer to send poisoned chunks to other peers.') parser.add_argument( '--on_off_ratio', help= 'Enables on-off attack and sets ratio for on off (from 1 to 100)') parser.add_argument( '--selective', nargs='+', type=str, help='Enables selective attack for given set of peers.') parser.add_argument( '--bad_mouth', nargs='+', type=str, help='Enables Bad Mouth attack for given set of peers.') parser.add_argument( '--trusted', action="store_true", help='Forces the peer to send hashes of chunks to splitter') parser.add_argument( '--checkall', action="store_true", help= 'Forces the peer to send hashes of every chunks to splitter (works only with trusted option)' ) parser.add_argument('--strpeds', action="store_true", help='Enables STrPe-DS') parser.add_argument( '--strpe_log', help='Logging STrPe & STrPe-DS specific data to file.') parser.add_argument('--show_buffer', action="store_true", help='Shows the status of the buffer of chunks.') try: argcomplete.autocomplete(parser) except Exception: pass #args = parser.parse_known_args()[0] args = parser.parse_args() if args.splitter_addr: Peer_IMS.SPLITTER_ADDR = socket.gethostbyname(args.splitter_addr) _print_('Splitter address =', Peer_IMS.SPLITTER_ADDR) if args.splitter_port: Peer_IMS.SPLITTER_PORT = int(args.splitter_port) _print_('Splitter port =', Peer_IMS.SPLITTER_PORT) if args.port: Peer_IMS.PORT = int(args.port) _print_('(Peer) PORT =', Peer_IMS.PORT) if args.player_port: Peer_IMS.PLAYER_PORT = int(args.player_port) _print_('Listening port (player) =', Peer_IMS.PLAYER_PORT) if args.max_chunk_debt: Peer_DBS.MAX_CHUNK_DEBT = int(args.max_chunk_debt) _print_('Maximun chunk debt =', Peer_DBS.MAX_CHUNK_DEBT) if args.use_localhost: Peer_IMS.USE_LOCALHOST = True _print_('Using localhost address') peer = Peer_IMS() peer.wait_for_the_player() peer.connect_to_the_splitter() peer.receive_the_mcast_endpoint() peer.receive_the_header_size() peer.receive_the_chunk_size() peer.receive_the_header() peer.receive_the_buffer_size() _print_("Using IP Multicast address =", peer.mcast_addr) if args.show_buffer: Peer_IMS.SHOW_BUFFER = True # A multicast address is always received, even for DBS peers. if peer.mcast_addr == "0.0.0.0": # {{{ IP unicast mode. peer = Peer_DBS(peer) peer.receive_my_endpoint() peer.receive_the_number_of_peers() _print_("Number of peers in the team (excluding me) =", peer.number_of_peers) _print_("Am I a monitor peer? =", peer.am_i_a_monitor()) peer.listen_to_the_team() peer.receive_the_list_of_peers() _print_("List of peers received") peer.receive_magic_flags() _print_("Magic flags =", peer.magic_flags) # After receiving the list of peers, the peer can check # whether is a monitor peer or not (only the first # arriving peers are monitors) if peer.am_i_a_monitor(): from core.monitor_dbs import Monitor_DBS peer = Monitor_DBS(peer) _print_("Monitor DBS") # The peer is a monitor. Now it's time to know the sets of rules that control this team. if (peer.magic_flags & common.LRS): from core.monitor_lrs import Monitor_LRS peer = Monitor_LSR(peer) _print_("Monitor LRS") if (peer.magic_flags & common.NTS): from core.monitor_nts import Monitor_NTS peer = Monitor_NTS(peer) _print_("Monitor NTS") else: peer = Peer_DBS(peer) _print_("Peer DBS") # The peer is a normal peer. Let's know the sets of rules that control this team. if (peer.magic_flags & common.ACS): peer = Peer_ACR(peer) _print_("Peer ACS") if (peer.magic_flags & common.LRS): peer = Peer_LSR(peer) _print_("Peer LRS") if (peer.magic_flags & common.NTS): from peer_nts import Peer_NTS peer = Peeer_NTS(peer) _print_("Peer NTS") if args.enable_chunk_loss: if args.chunk_loss_period: Lossy_Peer.CHUNK_LOSS_PERIOD = int( args.chunk_loss_period) print('CHUNK_LOSS_PERIOD =', Lossy_Peer.CHUNK_LOSS_PERIOD) if int(args.chunk_loss_period) != 0: from lossy_peer import Lossy_Peer peer = Lossy_Peer(peer) if args.port_step: Symsp_Peer.PORT_STEP = int(args.port_step) print('PORT_STEP =', Symsp_Peer.PORT_STEP) if int(args.port_step) != 0: peer = Symsp_Peer(peer) if args.strpeds: from core.peer_strpeds import Peer_StrpeDs peer = Peer_StrpeDs(peer) peer.receive_dsa_key() if args.malicious and not args.strpeds: # workaround for malicous strpeds peer from core.malicious_peer import MaliciousPeer peer = MaliciousPeer(peer) if args.persistent: peer.setPersistentAttack(True) if args.on_off_ratio: peer.setOnOffAttack(True, int(args.on_off_ratio)) if args.selective: peer.setSelectiveAttack(True, args.selective) if args.malicious and args.strpeds: from core.peer_strpeds_malicious import Peer_StrpeDsMalicious peer = Peer_StrpeDsMalicious(peer) if args.persistent: peer.setPersistentAttack(True) if args.on_off_ratio: peer.setOnOffAttack(True, int(args.on_off_ratio)) if args.selective: peer.setSelectiveAttack(True, args.selective) if args.bad_mouth: peer.setBadMouthAttack(True, args.bad_mouth) if args.trusted: from core.trusted_peer import TrustedPeer peer = TrustedPeer(peer) if args.checkall: peer.setCheckAll(True) if args.strpe_log != None: peer.LOGGING = True peer.LOG_FILE = open(args.strpe_log, 'w', 0) # }}} else: # {{{ IP multicast mode peer.listen_to_the_team() # }}} # }}} # {{{ Run! peer.disconnect_from_the_splitter() peer.buffer_data() peer.start() print("+-----------------------------------------------------+") print("| Received = Received kbps, including retransmissions |") print("| Sent = Sent kbps |") print("| (Expected values are between parenthesis) |") print("------------------------------------------------------+") print() print( " Time | Received (Expected) | Sent (Expected) | Team description" ) print( "---------+-------------------------+--------------------------+-----------------..." ) last_chunk_number = peer.played_chunk if hasattr(peer, 'sendto_counter'): last_sendto_counter = 0 else: peer.sendto_counter = 0 last_sendto_counter = 0 if not hasattr(peer, 'peer_list'): peer.peer_list = [] last_recvfrom_counter = peer.recvfrom_counter while peer.player_alive: time.sleep(1) kbps_expected_recv = ((peer.played_chunk - last_chunk_number) * peer.chunk_size * 8) / 1000 last_chunk_number = peer.played_chunk kbps_recvfrom = ((peer.recvfrom_counter - last_recvfrom_counter) * peer.chunk_size * 8) / 1000 last_recvfrom_counter = peer.recvfrom_counter team_ratio = len(peer.peer_list) / (len(peer.peer_list) + 1.0) kbps_expected_sent = int(kbps_expected_recv * team_ratio) kbps_sendto = ((peer.sendto_counter - last_sendto_counter) * peer.chunk_size * 8) / 1000 last_sendto_counter = peer.sendto_counter try: if common.CONSOLE_MODE == False: from gi.repository import GObject try: from adapter import speed_adapter except ImportError as msg: pass GObject.idle_add(speed_adapter.update_widget, str(kbps_recvfrom) + ' kbps', str(kbps_sendto) + ' kbps', str(len(peer.peer_list) + 1)) except Exception as msg: pass if kbps_recvfrom > 0 and kbps_expected_recv > 0: nice = 100.0 / float( (float(kbps_expected_recv) / kbps_recvfrom) * (len(peer.peer_list) + 1)) else: nice = 0.0 _print_('|', end=Color.none) if kbps_expected_recv < kbps_recvfrom: sys.stdout.write(Color.red) elif kbps_expected_recv > kbps_recvfrom: sys.stdout.write(Color.green) print(repr(kbps_expected_recv).rjust(12), end=Color.none) print(('(' + repr(kbps_recvfrom) + ')').rjust(12), end=' | ') #print(("{:.1f}".format(nice)).rjust(6), end=' | ') #sys.stdout.write(Color.none) if kbps_expected_sent > kbps_sendto: sys.stdout.write(Color.red) elif kbps_expected_sent < kbps_sendto: sys.stdout.write(Color.green) print(repr(kbps_sendto).rjust(12), end=Color.none) print(('(' + repr(kbps_expected_sent) + ')').rjust(12), end=' | ') #sys.stdout.write(Color.none) #print(repr(nice).ljust(1)[:6], end=' ') print(len(peer.peer_list), end=' ') counter = 0 for p in peer.peer_list: if (counter < 5): print(p, end=' ') counter += 1 else: break print() try: if common.CONSOLE_MODE == False: GObject.idle_add(speed_adapter.update_widget, str(0) + ' kbps', str(0) + ' kbps', str(0)) except Exception as msg: pass
def buffer_data(self): # {{{ Buffering # {{{ The peer dies if the player disconnects. # }}} self.player_alive = True # {{{ The last chunk sent to the player. # }}} self.played_chunk = 0 # {{{ Counts the number of executions of the recvfrom() # function. # }}} self.recvfrom_counter = 0 # {{{ Label the chunks in the buffer as "received" or "not # received". # }}} self.received_flag = [] # {{{ The buffer of chunks is a structure that is used to delay # the playback of the chunks in order to accommodate the # network jittter. Two components are needed: (1) the "chunks" # buffer that stores the received chunks and (2) the # "received" buffer that stores if a chunk has been received # or not. Notice that each peer can use a different # buffer_size: the smaller the buffer size, the lower start-up # time, the higher chunk-loss ratio. However, for the sake of # simpliticy, all peers will use the same buffer size. self.chunks = [""]*self.buffer_size self.received_flag = [False]*self.buffer_size self.received_counter = 0 # }}} # Wall time (execution time plus waiting time). start_time = time.time() # We will send a chunk to the player when a new chunk is # received. Besides, those slots in the buffer that have not been # filled by a new chunk will not be send to the player. Moreover, # chunks can be delayed an unknown time. This means that (due to the # jitter) after chunk X, the chunk X+Y can be received (instead of the # chunk X+1). Alike, the chunk X-Y could follow the chunk X. Because # we implement the buffer as a circular queue, in order to minimize # the probability of a delayed chunk overwrites a new chunk that is # waiting for traveling the player, we wil fill only the half of the # circular queue. _p_(self.team_socket.getsockname(), "\b: buffering = 000.00%") sys.stdout.flush() # First chunk to be sent to the player. The # process_next_message() procedure returns the chunk number if # a packet has been received or -2 if a time-out exception has # been arised. chunk_number = self.process_next_message() while chunk_number < 0: chunk_number = self.process_next_message() _p_(chunk_number) self.played_chunk = chunk_number _p_("First chunk to play", self.played_chunk) _p_(self.team_socket.getsockname(), "\b: buffering (\b", repr(100.0/self.buffer_size).rjust(4)) # Now, fill up to the half of the buffer. for x in range(int(self.buffer_size/2)): _print_("{:.2%}\r".format((1.0*x)/(self.buffer_size/2)), end='') BUFFER_STATUS = (100*x)/(self.buffer_size/2) +1 #if Common.CONSOLE_MODE == False : # GObject.idle_add(buffering_adapter.update_widget,BUFFER_STATUS) #else: # pass #print("!", end='') sys.stdout.flush() while self.process_next_message() < 0: pass print() latency = time.time() - start_time _p_('latency =', latency, 'seconds') _p_("buffering done.") sys.stdout.flush()
def try_to_disconnect_from_the_splitter(self): # {{{ self.start_send_hello_thread() # Receive the generated ID for this peer from splitter self.receive_id() # Note: This peer is *not* the monitor peer. # Send UDP packets to splitter and monitor peers # to create working NAT entries and to determine the # source port allocation type of the NAT of this peer for peer in self.peer_list[:self.number_of_monitors]: self.say_hello(peer) self.say_hello(self.splitter) # Directly start packet sending self.hello_messages_event.set() # A list of peer_ids that contains the peers that were in the team when # starting incorporation and that are not connected yet self.initial_peer_list = [] # Receive the list of peers, except the monitor peer, with their peer # IDs and send hello messages self.receive_the_list_of_peers_2() # Wait for getting connected to all currently known peers incorporation_time = time.time() # A timeout < MAX_PEER_ARRIVING_TIME has to be set for self.team_socket # The monitor is not in initial_peer_list while len(self.initial_peer_list) > 0: if time.time( ) - incorporation_time > Common.MAX_PEER_ARRIVING_TIME: # Retry incorporation into the team _p_("Retrying incorporation with %d peers left: %s" \ % (len(self.initial_peer_list), self.initial_peer_list)) incorporation_time = time.time() # Cleaning hello messages with self.hello_messages_lock: self.hello_messages_times.clear() self.hello_messages_ports.clear() del self.hello_messages[:] # Resetting peer lists del self.initial_peer_list[:] del self.peer_list[self.number_of_monitors:] # Leave monitors # Recreate the socket # Similar to Peer_DBS.listen_to_the_team, binds to a random port self.team_socket.close() self.create_team_socket() try: self.team_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) except Exception as e: _print_(Common.NTS_COLOR + "NTS:" + Color.none, e) self.team_socket.bind(('', 0)) self.team_socket.settimeout(1) # Say hello to splitter again, to retry incorporation # 'N' for 'not incorporated' self.send_message( (self.peer_id.encode() + b'N', self.splitter)) # Say hello to monitors again, to keep the NAT entry alive for peer in self.peer_list[:self.number_of_monitors]: self.send_message((self.peer_id.encode() + b'N', peer)) # Receive all peer endpoints and send hello messages self.receive_the_list_of_peers_2() # Process messages to establish connections to peers try: message, sender = self.team_socket.recvfrom( \ struct.calcsize(self.message_format)) self.process_message(message, sender) except socket.timeout: pass # Close the TCP socket Peer_DBS.disconnect_from_the_splitter(self) # The peer is now successfully incorporated; inform the splitter self.send_message((self.peer_id.encode() + b'Y', self.splitter)) _p_("Incorporation successful")
def process_message(self, message, sender): # {{{ Handle NTS messages; pass other messages to base class if sender == self.splitter and \ len(message) == Common.PEER_ID_LENGTH + struct.calcsize("4sHHH"): # say [hello to (X)] received from splitter peer_id = message[:Common.PEER_ID_LENGTH].decode() IP_addr, source_port_to_splitter, port_diff, peer_number = \ struct.unpack("4sHHH", message[Common.PEER_ID_LENGTH:]) IP_addr = socket.inet_ntoa(IP_addr) source_port_to_splitter = socket.ntohs(source_port_to_splitter) port_diff = socket.ntohs(port_diff) peer_number = socket.ntohs(peer_number) peer = (IP_addr, source_port_to_splitter) # Endpoint to splitter _p_("Received [send hello to %s %s]" % (peer_id, peer)) _p_("port_diff = %s" % port_diff) _p_("peer_number = %s" % peer_number) # Here the port prediction happens: additional_ports = \ self.get_probable_source_ports(source_port_to_splitter, port_diff, peer_number) self.say_hello(peer, additional_ports) # Directly start packet sending self.hello_messages_event.set() elif sender == self.splitter and \ len(message) == Common.PEER_ID_LENGTH + struct.calcsize("4sHHHH"): # say [hello to (X)] received from splitter peer_id = message[:Common.PEER_ID_LENGTH].decode() IP_addr, source_port_to_splitter, port_diff, peer_number, \ extra_splitter_port = struct.unpack( \ "4sHHHH", message[Common.PEER_ID_LENGTH:]) # Ojo, !H ???? IP_addr = socket.inet_ntoa(IP_addr) source_port_to_splitter = socket.ntohs(source_port_to_splitter) port_diff = socket.ntohs(port_diff) peer_number = socket.ntohs(peer_number) extra_splitter_port = socket.ntohs(extra_splitter_port) peer = (IP_addr, source_port_to_splitter) # Endpoint to splitter _p_("Received [send hello to %s %s]" % (peer_id, peer)) # Here the port prediction happens: additional_ports = \ self.get_probable_source_ports(source_port_to_splitter, port_diff, peer_number) self.say_hello(peer, additional_ports) # Send to extra splitter port to determine currently allocated # source port self.say_hello((self.splitter[0], extra_splitter_port)) # Directly start packet sending self.hello_messages_event.set() elif message == self.peer_id.encode() or (sender == self.splitter and \ len(message) == Common.PEER_ID_LENGTH + struct.calcsize("H")) or \ (sender == self.splitter and \ len(message) == Common.PEER_ID_LENGTH+1 + struct.calcsize("H")) or \ len(message) == Common.PEER_ID_LENGTH+1: # All sent message sizes # Acknowledge received; stop sending the message with self.hello_messages_lock: for hello_data in self.hello_messages: if message == hello_data[0] \ and sender[0] == hello_data[1][0] \ and sender[1] in self.hello_messages_ports[hello_data]: _p_("Received acknowledge from %s" % (sender, )) self.hello_messages.remove(hello_data) del self.hello_messages_times[hello_data] del self.hello_messages_ports[hello_data] # No chunk number, as no chunk was received return -1 _print_(Common.NTS_COLOR + "NTS: Received acknowledge from unknown host %s" % (sender, ) + Color.none) elif len(message) == Common.PEER_ID_LENGTH: peer_id = message.decode() _p_("Received [hello (ID %s)] from %s" % (message, sender)) # Send acknowledge self.team_socket.sendto(message, sender) if sender not in self.peer_list: _p_("Appending peer %s %s to list" % (peer_id, sender)) self.peer_list.append(sender) self.debt[sender] = 0 # Send source port information to splitter message += struct.pack("H", socket.htons(sender[1])) message_data = (message, self.splitter) self.send_message(message_data) if peer_id in self.initial_peer_list: self.initial_peer_list.remove(peer_id) elif message == b'H': _p_("Received [DBS hello] from %s" % str(sender)) # Ignore hello messages that are sent by Peer_DBS instances in # receive_the_list_of_peers() before a Peer_NTS instance is created pass elif sender != self.splitter and sender not in self.peer_list: _p_("Ignoring message of length %d from unknown %s" \ % (len(message), sender)) elif len(self.initial_peer_list) == 0: # Start receiving chunks when fully incorporated return Peer_DBS.process_message(self, message, sender) # No chunk number, as no chunk was received return -1
def try_to_disconnect_from_the_splitter(self): # {{{ self.start_send_hello_thread() # Receive the generated ID for this peer from splitter self.receive_id() # Note: This peer is *not* the monitor peer. # Send UDP packets to splitter and monitor peers # to create working NAT entries and to determine the # source port allocation type of the NAT of this peer for peer in self.peer_list[:self.number_of_monitors]: self.say_hello(peer) self.say_hello(self.splitter) # Directly start packet sending self.hello_messages_event.set() # A list of peer_ids that contains the peers that were in the team when # starting incorporation and that are not connected yet self.initial_peer_list = [] # Receive the list of peers, except the monitor peer, with their peer # IDs and send hello messages self.receive_the_list_of_peers_2() # Wait for getting connected to all currently known peers incorporation_time = time.time() # A timeout < MAX_PEER_ARRIVING_TIME has to be set for self.team_socket # The monitor is not in initial_peer_list while len(self.initial_peer_list) > 0: if time.time() - incorporation_time > Common.MAX_PEER_ARRIVING_TIME: # Retry incorporation into the team _p_("Retrying incorporation with %d peers left: %s" \ % (len(self.initial_peer_list), self.initial_peer_list)) incorporation_time = time.time() # Cleaning hello messages with self.hello_messages_lock: self.hello_messages_times.clear() self.hello_messages_ports.clear() del self.hello_messages[:] # Resetting peer lists del self.initial_peer_list[:] del self.peer_list[self.number_of_monitors:] # Leave monitors # Recreate the socket # Similar to Peer_DBS.listen_to_the_team, binds to a random port self.team_socket.close() self.create_team_socket() try: self.team_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) except Exception as e: _print_(Common.NTS_COLOR + "NTS:" + Color.none, e) self.team_socket.bind(('', 0)) self.team_socket.settimeout(1) # Say hello to splitter again, to retry incorporation # 'N' for 'not incorporated' self.send_message((self.peer_id.encode() + b'N', self.splitter)) # Say hello to monitors again, to keep the NAT entry alive for peer in self.peer_list[:self.number_of_monitors]: self.send_message((self.peer_id.encode() + b'N', peer)) # Receive all peer endpoints and send hello messages self.receive_the_list_of_peers_2() # Process messages to establish connections to peers try: message, sender = self.team_socket.recvfrom( \ struct.calcsize(self.message_format)) self.process_message(message, sender) except socket.timeout: pass # Close the TCP socket Peer_DBS.disconnect_from_the_splitter(self) # The peer is now successfully incorporated; inform the splitter self.send_message((self.peer_id.encode() + b'Y', self.splitter)) _p_("Incorporation successful")
def _p_(*args, **kwargs): """Colorize the output.""" sys.stdout.write(Common.NTS_COLOR) _print_("NTS:", *args) sys.stdout.write(Color.none)
def __init__(self): # {{{ colorama.init() try: colorama.init() except Exception: pass # }}} # {{{ Running in debug/release mode _print_("Running in", end=' ') if __debug__: print("debug mode") else: print("release mode") # }}} # {{{ Arguments handling parser = argparse.ArgumentParser( description= 'This is the splitter node of a P2PSP team. The splitter is in charge of defining the Set or Rules (SoR) that will control the team. By default, DBS (unicast transmissions) will be used.' ) #parser.add_argument('--splitter_addr', help='IP address to serve (TCP) the peers. (Default = "{}")'.format(Splitter_IMS.SPLITTER_ADDR)) <- no ahora parser.add_argument( '--buffer_size', help='size of the video buffer in blocks. Default = {}.'.format( Splitter_IMS.BUFFER_SIZE)) parser.add_argument( '--channel', help= 'Name of the channel served by the streaming source. Default = "{}".' .format(Splitter_IMS.CHANNEL)) parser.add_argument('--chunk_size', help='Chunk size in bytes. Default = {}.'.format( Splitter_IMS.CHUNK_SIZE)) parser.add_argument( '--header_size', help='Size of the header of the stream in chunks. Default = {}.'. format(Splitter_IMS.HEADER_SIZE)) parser.add_argument( '--max_chunk_loss', help= 'Maximum number of lost chunks for an unsupportive peer. Makes sense only in unicast mode. Default = {}.' .format(Splitter_DBS.MAX_CHUNK_LOSS)) parser.add_argument( '--max_number_of_monitor_peers', help= 'Maxium number of monitors in the team. The first connecting peers will automatically become monitors. Default = "{}".' .format(Splitter_DBS.MONITOR_NUMBER)) parser.add_argument( '--mcast_addr', help= 'IP multicast address used to serve the chunks. Makes sense only in multicast mode. Default = "{}".' .format(Splitter_IMS.MCAST_ADDR)) parser.add_argument( '--port', help='Port to serve the peers. Default = "{}".'.format( Splitter_IMS.PORT)) parser.add_argument( '--source_addr', help= 'IP address or hostname of the streaming server. Default = "{}".'. format(Splitter_IMS.SOURCE_ADDR)) parser.add_argument( '--source_port', help='Port where the streaming server is listening. Default = {}.'. format(Splitter_IMS.SOURCE_PORT)) parser.add_argument( "--IMS", action="store_true", help= "Uses the IP multicast infrastructure, if available. IMS mode is incompatible with ACS, LRS, DIS and NTS modes." ) parser.add_argument("--NTS", action="store_true", help="Enables NAT traversal.") parser.add_argument("--ACS", action="store_true", help="Enables Adaptive Chunk-rate.") parser.add_argument("--LRS", action="store_true", help="Enables Lost chunk Recovery.") parser.add_argument("--DIS", action="store_true", help="Enables Data Integrity check.") parser.add_argument('--strpe', nargs='+', type=str, help='Selects STrPe model for DIS') parser.add_argument('--strpeds', nargs='+', type=str, help='Selects STrPe-DS model for DIS') parser.add_argument( '--strpeds_majority_decision', help='Sets majority decision ratio for STrPe-DS model.') parser.add_argument( '--strpe_log', help='Logging STrPe & STrPe-DS specific data to file.') parser.add_argument( '--TTL', help='Time To Live of the multicast messages. Default = {}.'. format(Splitter_IMS.TTL)) try: argcomplete.autocomplete(parser) except Exception: pass args = parser.parse_args() #args = parser.parse_known_args()[0] _print_("My IP address is =", socket.gethostbyname(socket.gethostname())) if args.buffer_size: Splitter_IMS.BUFFER_SIZE = int(args.buffer_size) _print_("Buffer size =", Splitter_IMS.BUFFER_SIZE) if args.channel: Splitter_IMS.CHANNEL = args.channel _print_("Channel = \"" + Splitter_IMS.CHANNEL + "\"") if args.chunk_size: Splitter_IMS.CHUNK_SIZE = int(args.chunk_size) _print_("Chunk size =", Splitter_IMS.CHUNK_SIZE) if args.header_size: Splitter_IMS.HEADER_SIZE = int(args.header_size) _print_("Header size =", Splitter_IMS.HEADER_SIZE) if args.port: Splitter_IMS.PORT = int(args.port) _print_("Listening port =", Splitter_IMS.PORT) if args.source_addr: Splitter_IMS.SOURCE_ADDR = socket.gethostbyname(args.source_addr) _print_("Source address = ", Splitter_IMS.SOURCE_ADDR) if args.source_port: Splitter_IMS.SOURCE_PORT = int(args.source_port) _print_("Source port =", Splitter_IMS.SOURCE_PORT) if args.IMS: _print_("IP multicast (IMS) mode selected") if args.mcast_addr: Splitter_IMS.MCAST_ADDR = args.mcast_addr _print_("Multicast address =", Splitter_IMS.MCAST_ADDR) if args.TTL: Splitter_IMS.TTL = args.TTL _print_("Multicast TTL =", Splitter_IMS.TTL) splitter = Splitter_IMS() splitter.peer_list = [] # No peer_list is used in IMS. else: _print_("IP unicast mode selected") if args.max_chunk_loss: Splitter_DBS.MAX_CHUNK_LOSS = int(args.max_chunk_loss) _print_("Maximun chunk loss =", Splitter_DBS.MAX_CHUNK_LOSS) if args.max_number_of_monitor_peers: Splitter_DBS.MONITOR_NUMBER = int( args.max_number_of_monitor_peers) _print_("Maximun number of monitor peers =", Splitter_DBS.MONITOR_NUMBER) splitter = Splitter_DBS() if args.NTS: splitter = Splitter_NTS(splitter) _print_("NTS enabled") if args.ACS: splitter = Splitter_ACS(splitter) _print_("ACS enabled") if args.LRS: from core.splitter_lrs import Splitter_LRS splitter = Splitter_LRS(splitter) _print_("LRS enabled") if args.DIS: from splitter_strpe import StrpeSplitter from splitter_strpeds import StrpeDsSplitter _print_("DIS enabled") if args.strpe: splitter = Splitter_strpe(splitter) print("strpe mode selected") for peer in args.strpe: splitter.add_trusted_peer(peer) if args.strpeds: splitter = StrpeSplitter(splitter) _print_("strpeds mode selected") for peer in args.strpeds: splitter.add_trusted_peer(peer) if args.strpeds_majority_decision: _print_("strpeds_majority_decision mode selected") splitter = Splitter_strpeds_majority_decision(splitter) splitter.setMajorityRatio( float(args.strpeds_majority_decision)) if args.strpe_log: splitter.LOGGING = True splitter.LOG_FILE = open(strpe_log, 'w', 0) #splitter = Splitter_ACS() # if (args.strpe): # splitter = self.init_strpe_splitter('strpe', args.strpe, args.strpe_log) # elif (args.strpeds): # splitter = self.init_strpe_splitter('strpeds', args.strpeds, args.strpe_log) # if args.strpeds_majority_decision: # splitter.setMajorityRatio(float(args.strpeds_majority_decision)) # else: # splitter = Splitter_LRS() # }}} # {{{ Run! splitter.start() # {{{ Prints information until keyboard interruption print(" | Received | Sent | Number losses/ losses") print( " Time | (kbps) | (kbps) | peers (peer) sents threshold period kbps" ) print( "---------+-----------+-----------+-----------------------------------..." ) last_sendto_counter = splitter.sendto_counter last_recvfrom_counter = splitter.recvfrom_counter while splitter.alive: try: time.sleep(1) chunks_sendto = splitter.sendto_counter - last_sendto_counter kbps_sendto = (chunks_sendto * splitter.CHUNK_SIZE * 8) / 1000 chunks_recvfrom = splitter.recvfrom_counter - last_recvfrom_counter kbps_recvfrom = (chunks_recvfrom * splitter.CHUNK_SIZE * 8) / 1000 last_sendto_counter = splitter.sendto_counter last_recvfrom_counter = splitter.recvfrom_counter sys.stdout.write(Color.none) _print_("|" + repr(int(kbps_recvfrom)).rjust(10) + " |" + repr(int(kbps_sendto)).rjust(10), end=" | ") #print('%5d' % splitter.chunk_number, end=' ') sys.stdout.write(Color.cyan) print(len(splitter.peer_list), end=' ') if not __debug__: counter = 0 for p in splitter.peer_list: if not __debug__: if counter > 10: break counter += 1 sys.stdout.write(Color.blue) print(p, end=' ') sys.stdout.write(Color.red) print(str('%3d' % splitter.losses[p]) + '/' + str('%3d' % chunks_sendto), splitter.MAX_CHUNK_LOSS, end=' ') if splitter is Splitter_ACS: try: sys.stdout.write(Color.yellow) print('%3d' % splitter.period[p], end=' ') sys.stdout.write(Color.purple) print(repr( (splitter.number_of_sent_chunks_per_peer[p] * splitter.CHUNK_SIZE * 8) / 1000).rjust(10), end=' ') splitter.number_of_sent_chunks_per_peer[p] = 0 except KeyError as e: print("!", e, "--") print(splitter.period[p]) pass sys.stdout.write(Color.none) print('', end=' ') print() except KeyboardInterrupt: print('Keyboard interrupt detected ... Exiting!') # Say to daemon threads that the work has been finished, splitter.alive = False # Wake up the "moderate_the_team" daemon, which is # waiting in a recvfrom(). if not args.IMS: #splitter.say_goodbye(("127.0.0.1", splitter.PORT), splitter.team_socket) splitter.team_socket.sendto(b'', ("127.0.0.1", splitter.PORT)) # Wake up the "handle_arrivals" daemon, which is waiting # in an accept(). sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect(("127.0.0.1", splitter.PORT)) sock.recv(struct.calcsize("4sH")) # Multicast channel sock.recv(struct.calcsize("H")) # Header size sock.recv(struct.calcsize("H")) # Chunk size sock.recv(splitter.CHUNK_SIZE * splitter.HEADER_SIZE) # Header sock.recv(struct.calcsize("H")) # Buffer size sock.recv(struct.calcsize("4sH")) # Endpoint sock.recv(struct.calcsize("B")) # Magic flags if args.IMS: number_of_peers = 0 else: number_of_peers = socket.ntohs( struct.unpack("H", sock.recv(struct.calcsize("H")))[0]) print("Number of peers =", number_of_peers) # Receive the list while number_of_peers > 0: sock.recv(struct.calcsize("4sH")) number_of_peers -= 1 # Breaks this thread and returns to the parent process # (usually, the shell). break
def __init__(self): try: colorama.init() except Exception: pass _print_("Running in", end=' ') if __debug__: print("debug mode") else: print("release mode") # {{{ Args handling and object instantiation parser = argparse.ArgumentParser(description='This is the peer node of a P2PSP team.') parser.add_argument('--enable_chunk_loss', help='Forces a lost of chunks') parser.add_argument('--max_chunk_debt', help='The maximun number of times that other peer can not send a chunk to this peer. Defaut = {}'.format(Peer_DBS.MAX_CHUNK_DEBT)) parser.add_argument('--player_port', help='Port to communicate with the player. Default = {}'.format(Peer_IMS.PLAYER_PORT)) parser.add_argument('--port_step', help='Source port step forced when behind a sequentially port allocating NAT (conflicts with --chunk_loss_period). Default = {}'.format(Symsp_Peer.PORT_STEP)) parser.add_argument('--splitter_addr', help='IP address or hostname of the splitter. Default = {}.'.format(Peer_IMS.SPLITTER_ADDR)) parser.add_argument('--splitter_port', help='Listening port of the splitter. Default = {}.'.format(Peer_IMS.SPLITTER_PORT)) parser.add_argument('--port', help='Port to communicate with the peers. Default {} (the OS will chose it).'.format(Peer_IMS.PORT)) parser.add_argument('--use_localhost', action="store_true", help='Forces the peer to use localhost instead of the IP of the adapter to connect to the splitter. Notice that in this case, peers that run outside of the host will not be able to communicate with this peer.') parser.add_argument('--malicious', action="store_true", help='Enables the malicious activity for peer.') parser.add_argument('--persistent', action="store_true", help='Forces the peer to send poisoned chunks to other peers.') parser.add_argument('--on_off_ratio', help='Enables on-off attack and sets ratio for on off (from 1 to 100)') parser.add_argument('--selective', nargs='+', type=str, help='Enables selective attack for given set of peers.') parser.add_argument('--bad_mouth', nargs='+', type=str, help='Enables Bad Mouth attack for given set of peers.') parser.add_argument('--trusted', action="store_true", help='Forces the peer to send hashes of chunks to splitter') parser.add_argument('--checkall', action="store_true", help='Forces the peer to send hashes of every chunks to splitter (works only with trusted option)') parser.add_argument('--strpeds', action="store_true", help='Enables STrPe-DS') parser.add_argument('--strpe_log', help='Logging STrPe & STrPe-DS specific data to file.') parser.add_argument('--show_buffer', action="store_true", help='Shows the status of the buffer of chunks.') try: argcomplete.autocomplete(parser) except Exception: pass #args = parser.parse_known_args()[0] args = parser.parse_args() if args.splitter_addr: Peer_IMS.SPLITTER_ADDR = socket.gethostbyname(args.splitter_addr) _print_('Splitter address =', Peer_IMS.SPLITTER_ADDR) if args.splitter_port: Peer_IMS.SPLITTER_PORT = int(args.splitter_port) _print_('Splitter port =', Peer_IMS.SPLITTER_PORT) if args.port: Peer_IMS.PORT = int(args.port) _print_('(Peer) PORT =', Peer_IMS.PORT) if args.player_port: Peer_IMS.PLAYER_PORT = int(args.player_port) _print_('Listening port (player) =', Peer_IMS.PLAYER_PORT) if args.max_chunk_debt: Peer_DBS.MAX_CHUNK_DEBT = int(args.max_chunk_debt) _print_('Maximun chunk debt =', Peer_DBS.MAX_CHUNK_DEBT) if args.use_localhost: Peer_IMS.USE_LOCALHOST = True _print_('Using localhost address') peer = Peer_IMS() peer.wait_for_the_player() peer.connect_to_the_splitter() peer.receive_the_mcast_endpoint() peer.receive_the_header_size() peer.receive_the_chunk_size() peer.receive_the_header() peer.receive_the_buffer_size() _print_("Using IP Multicast address =", peer.mcast_addr) if args.show_buffer: Peer_IMS.SHOW_BUFFER = True # A multicast address is always received, even for DBS peers. if peer.mcast_addr == "0.0.0.0": # {{{ IP unicast mode. peer = Peer_DBS(peer) peer.receive_my_endpoint() peer.receive_magic_flags() _print_("Magic flags =", bin(peer.magic_flags)) peer.receive_the_number_of_peers() _print_("Number of peers in the team (excluding me) =", peer.number_of_peers) _print_("Am I a monitor peer? =", peer.am_i_a_monitor()) peer.listen_to_the_team() peer.receive_the_list_of_peers() _print_("List of peers received") # After receiving the list of peers, the peer can check # whether is a monitor peer or not (only the first # arriving peers are monitors) if peer.am_i_a_monitor(): peer = Monitor_DBS(peer) _print_("Monitor DBS enabled") # The peer is a monitor. Now it's time to know the sets of rules that control this team. if (peer.magic_flags & Common.LRS): peer = Monitor_LRS(peer) _print_("Monitor LRS enabled") if (peer.magic_flags & Common.NTS): peer = Monitor_NTS(peer) _print_("Monitor NTS enabled") else: peer = Peer_DBS(peer) _print_("Peer DBS enabled") # The peer is a normal peer. Let's know the sets of rules that control this team. if (peer.magic_flags & Common.NTS): peer = Peer_NTS(peer) _print_("Peer NTS enabled") if args.enable_chunk_loss: if args.chunk_loss_period: Lossy_Peer.CHUNK_LOSS_PERIOD = int(args.chunk_loss_period) print('CHUNK_LOSS_PERIOD =', Lossy_Peer.CHUNK_LOSS_PERIOD) if int(args.chunk_loss_period) != 0: peer = Lossy_Peer(peer) _print_("Lost of chunks enabled") if args.port_step: Symsp_Peer.PORT_STEP = int(args.port_step) print('PORT_STEP =', Symsp_Peer.PORT_STEP) if int(args.port_step) != 0: peer = Symsp_Peer(peer) if args.strpeds: peer = Peer_StrpeDs(peer) peer.receive_dsa_key() if args.malicious and not args.strpeds: # workaround for malicous strpeds peer peer = MaliciousPeer(peer) if args.persistent: peer.setPersistentAttack(True) if args.on_off_ratio: peer.setOnOffAttack(True, int(args.on_off_ratio)) if args.selective: peer.setSelectiveAttack(True, args.selective) if args.malicious and args.strpeds: peer = Peer_StrpeDsMalicious(peer) if args.persistent: peer.setPersistentAttack(True) if args.on_off_ratio: peer.setOnOffAttack(True, int(args.on_off_ratio)) if args.selective: peer.setSelectiveAttack(True, args.selective) if args.bad_mouth: peer.setBadMouthAttack(True, args.bad_mouth) if args.trusted: peer = TrustedPeer(peer) if args.checkall: peer.setCheckAll(True) if args.strpe_log != None: peer.LOGGING = True peer.LOG_FILE = open(args.strpe_log, 'w', 0) # }}} else: # {{{ IP multicast mode peer.listen_to_the_team() # }}} # }}} # {{{ Run! peer.disconnect_from_the_splitter() peer.buffer_data() peer.start() print("+-----------------------------------------------------+") print("| Received = Received kbps, including retransmissions |") print("| Sent = Sent kbps |") print("| (Expected values are between parenthesis) |") print("------------------------------------------------------+") print() print(" | Received (kbps) | Sent (kbps) |") print(" Time | Real Expected | Real Expected | Team description") print("---------+---------------------+----------------------+-----------------------------------...") last_chunk_number = peer.played_chunk if hasattr(peer, 'sendto_counter'): last_sendto_counter = 0 else: peer.sendto_counter = 0 last_sendto_counter = 0 if not hasattr(peer, 'peer_list'): peer.peer_list = [] last_recvfrom_counter = peer.recvfrom_counter while peer.player_alive: time.sleep(1) kbps_expected_recv = ((peer.played_chunk - last_chunk_number) * peer.chunk_size * 8) / 1000 last_chunk_number = peer.played_chunk kbps_recvfrom = ((peer.recvfrom_counter - last_recvfrom_counter) * peer.chunk_size * 8) / 1000 last_recvfrom_counter = peer.recvfrom_counter team_ratio = len(peer.peer_list) /(len(peer.peer_list) + 1.0) kbps_expected_sent = int(kbps_expected_recv*team_ratio) kbps_sendto = ((peer.sendto_counter - last_sendto_counter) * peer.chunk_size * 8) / 1000 last_sendto_counter = peer.sendto_counter try: if Common.CONSOLE_MODE == False : from gi.repository import GObject try: from adapter import speed_adapter except ImportError as msg: pass GObject.idle_add(speed_adapter.update_widget,str(kbps_recvfrom) + ' kbps' ,str(kbps_sendto) + ' kbps' ,str(len(peer.peer_list)+1)) except Exception as msg: pass if kbps_recvfrom > 0 and kbps_expected_recv > 0: nice = 100.0/float((float(kbps_expected_recv)/kbps_recvfrom)*(len(peer.peer_list)+1)) else: nice = 0.0 _print_('|', end=Color.none) if kbps_expected_recv < kbps_recvfrom: sys.stdout.write(Color.red) elif kbps_expected_recv > kbps_recvfrom: sys.stdout.write(Color.green) print(repr(int(kbps_expected_recv)).rjust(10), end=Color.none) print(repr(int(kbps_recvfrom)).rjust(10), end=' | ') #print(("{:.1f}".format(nice)).rjust(6), end=' | ') #sys.stdout.write(Color.none) if kbps_expected_sent > kbps_sendto: sys.stdout.write(Color.red) elif kbps_expected_sent < kbps_sendto: sys.stdout.write(Color.green) print(repr(int(kbps_sendto)).rjust(10), end=Color.none) print(repr(int(kbps_expected_sent)).rjust(10), end=' | ') #sys.stdout.write(Color.none) #print(repr(nice).ljust(1)[:6], end=' ') print(len(peer.peer_list), end=' ') counter = 0 for p in peer.peer_list: if (counter < 5): print(p, end=' ') counter += 1 else: break print() try: if Common.CONSOLE_MODE == False : GObject.idle_add(speed_adapter.update_widget,str(0)+' kbps',str(0)+' kbps',str(0)) except Exception as msg: pass
def _p_(*args, **kwargs): """Colorize the output.""" sys.stdout.write(Common.DBS) _print_("DBS (trusted peer):", *args) sys.stdout.write(Color.none)
def __init__(self): # {{{ colorama.init() try: colorama.init() except Exception: pass # }}} # {{{ Running in debug/release mode _print_("Running in", end=" ") if __debug__: print("debug mode") else: print("release mode") # }}} # {{{ Arguments handling parser = argparse.ArgumentParser( description="This is the splitter node of a P2PSP team. The splitter is in charge of defining the Set or Rules (SoR) that will control the team. By default, DBS (unicast transmissions) will be used." ) # parser.add_argument('--splitter_addr', help='IP address to serve (TCP) the peers. (Default = "{}")'.format(Splitter_IMS.SPLITTER_ADDR)) <- no ahora parser.add_argument( "--buffer_size", help="size of the video buffer in blocks. Default = {}.".format(Splitter_IMS.BUFFER_SIZE) ) parser.add_argument( "--channel", help='Name of the channel served by the streaming source. Default = "{}".'.format(Splitter_IMS.CHANNEL), ) parser.add_argument("--chunk_size", help="Chunk size in bytes. Default = {}.".format(Splitter_IMS.CHUNK_SIZE)) parser.add_argument( "--header_size", help="Size of the header of the stream in chunks. Default = {}.".format(Splitter_IMS.HEADER_SIZE), ) parser.add_argument( "--max_chunk_loss", help="Maximum number of lost chunks for an unsupportive peer. Makes sense only in unicast mode. Default = {}.".format( Splitter_DBS.MAX_CHUNK_LOSS ), ) parser.add_argument( "--max_number_of_monitor_peers", help='Maxium number of monitors in the team. The first connecting peers will automatically become monitors. Default = "{}".'.format( Splitter_DBS.MONITOR_NUMBER ), ) parser.add_argument( "--mcast_addr", help='IP multicast address used to serve the chunks. Makes sense only in multicast mode. Default = "{}".'.format( Splitter_IMS.MCAST_ADDR ), ) parser.add_argument("--port", help='Port to serve the peers. Default = "{}".'.format(Splitter_IMS.PORT)) parser.add_argument( "--source_addr", help='IP address or hostname of the streaming server. Default = "{}".'.format(Splitter_IMS.SOURCE_ADDR), ) parser.add_argument( "--source_port", help="Port where the streaming server is listening. Default = {}.".format(Splitter_IMS.SOURCE_PORT), ) parser.add_argument( "--IMS", action="store_true", help="Uses the IP multicast infrastructure, if available. IMS mode is incompatible with ACS, LRS, DIS and NTS modes.", ) parser.add_argument("--NTS", action="store_true", help="Enables NAT traversal.") parser.add_argument("--ACS", action="store_true", help="Enables Adaptive Chunk-rate.") parser.add_argument("--LRS", action="store_true", help="Enables Lost chunk Recovery.") parser.add_argument("--DIS", action="store_true", help="Enables Data Integrity check.") parser.add_argument("--strpe", nargs="+", type=str, help="Selects STrPe model for DIS") parser.add_argument("--strpeds", nargs="+", type=str, help="Selects STrPe-DS model for DIS") parser.add_argument("--strpeds_majority_decision", help="Sets majority decision ratio for STrPe-DS model.") parser.add_argument("--strpe_log", help="Logging STrPe & STrPe-DS specific data to file.") parser.add_argument( "--TTL", help="Time To Live of the multicast messages. Default = {}.".format(Splitter_IMS.TTL) ) try: argcomplete.autocomplete(parser) except Exception: pass args = parser.parse_args() # args = parser.parse_known_args()[0] _print_("My IP address is =", socket.gethostbyname(socket.gethostname())) if args.buffer_size: Splitter_IMS.BUFFER_SIZE = int(args.buffer_size) _print_("Buffer size =", Splitter_IMS.BUFFER_SIZE) if args.channel: Splitter_IMS.CHANNEL = args.channel _print_('Channel = "' + Splitter_IMS.CHANNEL + '"') if args.chunk_size: Splitter_IMS.CHUNK_SIZE = int(args.chunk_size) _print_("Chunk size =", Splitter_IMS.CHUNK_SIZE) if args.header_size: Splitter_IMS.HEADER_SIZE = int(args.header_size) _print_("Header size =", Splitter_IMS.HEADER_SIZE) if args.port: Splitter_IMS.PORT = int(args.port) _print_("Listening port =", Splitter_IMS.PORT) if args.source_addr: Splitter_IMS.SOURCE_ADDR = socket.gethostbyname(args.source_addr) _print_("Source address = ", Splitter_IMS.SOURCE_ADDR) if args.source_port: Splitter_IMS.SOURCE_PORT = int(args.source_port) _print_("Source port =", Splitter_IMS.SOURCE_PORT) if args.IMS: _print_("IP multicast (IMS) mode selected") if args.mcast_addr: Splitter_IMS.MCAST_ADDR = args.mcast_addr _print_("Multicast address =", Splitter_IMS.MCAST_ADDR) if args.TTL: Splitter_IMS.TTL = args.TTL _print_("Multicast TTL =", Splitter_IMS.TTL) splitter = Splitter_IMS() splitter.peer_list = [] # No peer_list is used in IMS. else: _print_("IP unicast mode selected") if args.max_chunk_loss: Splitter_DBS.MAX_CHUNK_LOSS = int(args.max_chunk_loss) _print_("Maximun chunk loss =", Splitter_DBS.MAX_CHUNK_LOSS) if args.max_number_of_monitor_peers: Splitter_DBS.MONITOR_NUMBER = int(args.max_number_of_monitor_peers) _print_("Maximun number of monitor peers =", Splitter_DBS.MONITOR_NUMBER) splitter = Splitter_DBS() if args.NTS: splitter = Splitter_NTS(splitter) _print_("NTS enabled") if args.ACS: splitter = Splitter_ACS(splitter) _print_("ACS enabled") if args.LRS: from core.splitter_lrs import Splitter_LRS splitter = Splitter_LRS(splitter) _print_("LRS enabled") if args.DIS: from splitter_strpe import StrpeSplitter from splitter_strpeds import StrpeDsSplitter _print_("DIS enabled") if args.strpe: splitter = Splitter_strpe(splitter) print("strpe mode selected") for peer in args.strpe: splitter.add_trusted_peer(peer) if args.strpeds: splitter = StrpeSplitter(splitter) _print_("strpeds mode selected") for peer in args.strpeds: splitter.add_trusted_peer(peer) if args.strpeds_majority_decision: _print_("strpeds_majority_decision mode selected") splitter = Splitter_strpeds_majority_decision(splitter) splitter.setMajorityRatio(float(args.strpeds_majority_decision)) if args.strpe_log: splitter.LOGGING = True splitter.LOG_FILE = open(strpe_log, "w", 0) # splitter = Splitter_ACS() # if (args.strpe): # splitter = self.init_strpe_splitter('strpe', args.strpe, args.strpe_log) # elif (args.strpeds): # splitter = self.init_strpe_splitter('strpeds', args.strpeds, args.strpe_log) # if args.strpeds_majority_decision: # splitter.setMajorityRatio(float(args.strpeds_majority_decision)) # else: # splitter = Splitter_LRS() # }}} # {{{ Run! splitter.start() # {{{ Prints information until keyboard interruption print(" | Received | Sent | Number losses/ losses") print(" Time | (kbps) | (kbps) | peers (peer) sents threshold period kbps") print("---------+-----------+-----------+-----------------------------------...") last_sendto_counter = splitter.sendto_counter last_recvfrom_counter = splitter.recvfrom_counter while splitter.alive: try: time.sleep(1) chunks_sendto = splitter.sendto_counter - last_sendto_counter kbps_sendto = (chunks_sendto * splitter.CHUNK_SIZE * 8) / 1000 chunks_recvfrom = splitter.recvfrom_counter - last_recvfrom_counter kbps_recvfrom = (chunks_recvfrom * splitter.CHUNK_SIZE * 8) / 1000 last_sendto_counter = splitter.sendto_counter last_recvfrom_counter = splitter.recvfrom_counter sys.stdout.write(Color.none) _print_("|" + repr(int(kbps_recvfrom)).rjust(10) + " |" + repr(int(kbps_sendto)).rjust(10), end=" | ") # print('%5d' % splitter.chunk_number, end=' ') sys.stdout.write(Color.cyan) print(len(splitter.peer_list), end=" ") if not __debug__: counter = 0 for p in splitter.peer_list: if not __debug__: if counter > 10: break counter += 1 sys.stdout.write(Color.blue) print(p, end=" ") sys.stdout.write(Color.red) print( str("%3d" % splitter.losses[p]) + "/" + str("%3d" % chunks_sendto), splitter.MAX_CHUNK_LOSS, end=" ", ) if splitter is Splitter_ACS: try: sys.stdout.write(Color.yellow) print("%3d" % splitter.period[p], end=" ") sys.stdout.write(Color.purple) print( repr( (splitter.number_of_sent_chunks_per_peer[p] * splitter.CHUNK_SIZE * 8) / 1000 ).rjust(10), end=" ", ) splitter.number_of_sent_chunks_per_peer[p] = 0 except KeyError as e: print("!", e, "--") print(splitter.period[p]) pass sys.stdout.write(Color.none) print("", end=" ") print() except KeyboardInterrupt: print("Keyboard interrupt detected ... Exiting!") # Say to daemon threads that the work has been finished, splitter.alive = False # Wake up the "moderate_the_team" daemon, which is # waiting in a recvfrom(). if not args.IMS: # splitter.say_goodbye(("127.0.0.1", splitter.PORT), splitter.team_socket) splitter.team_socket.sendto(b"", ("127.0.0.1", splitter.PORT)) # Wake up the "handle_arrivals" daemon, which is waiting # in an accept(). sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect(("127.0.0.1", splitter.PORT)) sock.recv(struct.calcsize("4sH")) # Multicast channel sock.recv(struct.calcsize("H")) # Header size sock.recv(struct.calcsize("H")) # Chunk size sock.recv(splitter.CHUNK_SIZE * splitter.HEADER_SIZE) # Header sock.recv(struct.calcsize("H")) # Buffer size sock.recv(struct.calcsize("4sH")) # Endpoint sock.recv(struct.calcsize("B")) # Magic flags if args.IMS: number_of_peers = 0 else: number_of_peers = socket.ntohs(struct.unpack("H", sock.recv(struct.calcsize("H")))[0]) print("Number of peers =", number_of_peers) # Receive the list while number_of_peers > 0: sock.recv(struct.calcsize("4sH")) number_of_peers -= 1 # Breaks this thread and returns to the parent process # (usually, the shell). break