def _start_peer(self, connection, address, remote_pubkey=None): log.debug('new connect', connection=connection, incoming=bool(not remote_pubkey)) # create peer peer = Peer(self, connection, remote_pubkey=remote_pubkey) log.debug('created new peer', peer=peer) self.peers.append(peer) # loop peer.start() log.debug('peer started', peer=peer)
def _start_peer(self, connection, address, remote_pubkey=None): # create peer peer = Peer(self, connection, remote_pubkey=remote_pubkey) log.debug('created new peer', peer=peer, fno=connection.fileno()) self.peers.append(peer) # loop peer.start() log.debug('peer started', peer=peer, fno=connection.fileno()) assert not connection.closed return peer
def add_peers(self, message_body): for peer_data in message_body.values(): try: if peer_data['id'] != self.my_id and peer_data[ 'id'] not in self.peers: tcp_connection = TcpConnection() tcp_connection.connect(peer_data['ip'], peer_data['port']) tcp_connection.send( (19).to_bytes(CODE_SIZE, byteorder='big') + (self.info_hash + self.my_id).encode('utf-8')) if int.from_bytes(tcp_connection.receive(), byteorder='big') == 11: peer = Peer.start(peer_data['id'], tcp_connection, self.actor_ref, self.file_object, self.pieces_map) self.peers[peer_data['id']] = { 'actor_ref': peer, 'downloaded_bytes': 0, 'state': 'not_interested' } print( colored( f"added new peer with id: {peer_data['id']}", 'yellow')) if len(self.peers) == 1: self.timer = Timer(TIMER_INTERVAL, self.actor_ref) self.timer.daemon = True self.timer.start() except: pass
def _start_peer(self, connection, address, is_inititator=False): log.debug('new connect', connection=connection) # create peer peer = Peer(self, connection) log.debug('created new peer', peer=peer) # register p2p protocol p2pprotocol = P2PProtocol(peer, cmd_offset=0, is_inititator=is_inititator) peer.register_protocol(p2pprotocol) self.peers.append(peer) # loop peer.start() log.debug('peer started', peer=peer)
def add_peer(self, connection, host, port): # FIXME: should check existance first connection.settimeout(.1) try: peer = Peer(connection, host, port) peer.start() with self.lock: self.connected_peers.add(peer) logger.debug( "new TCP connection {0} {1}:{2}" .format(connection, host, port)) except BaseException as e: logger.error( "cannot start TCP session \"{0}\" {1}:{2} " .format(str(e), host, port)) traceback.print_exc(file=sys.stdout) connection.close() time.sleep(0.1) return peer
def add_peer(self, message_body): peer_id = message_body[0] tcp_connection = message_body[1] if peer_id != self.my_id and peer_id not in self.peers: peer = Peer.start(peer_id, tcp_connection, self.actor_ref, self.file_object, self.pieces_map) self.peers[peer_id] = { 'actor_ref': peer, 'downloaded_bytes': 0, 'state': 'not_interested' } print(colored(f'added new peer with id: {peer_id}', 'yellow')) if len(self.peers) == 1: self.timer = Timer(TIMER_INTERVAL, self.actor_ref) self.timer.daemon = True self.timer.start()
def _start_peer(self, host, port): peer = Peer(host, port) peer.start()
def _start_peer(self, connection, ip, port): peer = Peer(connection, ip, port) peer.start() return peer
import json from peer import Peer """ Script aims to provide execution flow of leecher... """ if __name__ == "__main__": peer = Peer(5, 5) path = input('Path to torrent file: ') peer.start(path)
class Client: def __init__(self, host, port, spec = None, vhost = None): self.host = host self.port = port if spec: self.spec = spec else: from qpid_config import amqp_spec_0_9 self.spec = load(amqp_spec_0_9) self.structs = StructFactory(self.spec) self.sessions = {} self.mechanism = None self.response = None self.locale = None self.vhost = vhost if self.vhost == None: self.vhost = "/" self.queues = {} self.lock = threading.Lock() self.closed = False self.reason = None self.started = threading.Event() def wait(self): self.started.wait() if self.closed: raise Closed(self.reason) def queue(self, key): self.lock.acquire() try: try: q = self.queues[key] except KeyError: q = Queue(0) self.queues[key] = q finally: self.lock.release() return q def start(self, response, mechanism="AMQPLAIN", locale="en_US", tune_params=None): self.mechanism = mechanism self.response = response self.locale = locale self.tune_params = tune_params self.socket = connect(self.host, self.port) self.conn = Connection(self.socket, self.spec) self.peer = Peer(self.conn, ClientDelegate(self), Session) self.conn.init() self.peer.start() self.wait() self.channel(0).connection_open(self.vhost) def channel(self, id): self.lock.acquire() try: ssn = self.peer.channel(id) ssn.client = self self.sessions[id] = ssn finally: self.lock.release() return ssn def session(self): self.lock.acquire() try: id = None for i in xrange(1, 64*1024): if not self.sessions.has_key(id): id = i break finally: self.lock.release() if id == None: raise RuntimeError("out of channels") else: return self.channel(id) def close(self): self.socket.close()
class Client: def __init__(self, host, port, spec = None, vhost = None): self.host = host self.port = port if spec: self.spec = spec else: from specs_config import amqp_spec_0_9 self.spec = load(amqp_spec_0_9) self.structs = StructFactory(self.spec) self.sessions = {} self.mechanism = None self.response = None self.locale = None self.sasl = None self.vhost = vhost if self.vhost == None: self.vhost = "/" self.queues = {} self.lock = threading.Lock() self.closed = False self.reason = None self.started = threading.Event() self.peer = None def wait(self): self.started.wait() if self.closed: raise Closed(self.reason) def queue(self, key): self.lock.acquire() try: try: q = self.queues[key] except KeyError: q = Queue(0) self.queues[key] = q finally: self.lock.release() return q def start(self, response=None, mechanism=None, locale="en_US", tune_params=None, username=None, password=None, client_properties=None, connection_options=None, sasl_options = None, channel_options=None): if response is not None and (username is not None or password is not None): raise RuntimeError("client must not specify both response and (username, password).") if response is not None: self.response = response authzid, self.username, self.password = response.split("\0") else: self.username = username self.password = password self.mechanism = mechanism self.locale = locale self.tune_params = tune_params self.client_properties=get_client_properties_with_defaults(provided_client_properties=client_properties, version_property_key="version") self.sasl_options = sasl_options self.socket = connect(self.host, self.port, connection_options) self.conn = Connection(self.socket, self.spec) self.peer = Peer(self.conn, ClientDelegate(self), Session, channel_options) self.conn.init() self.peer.start() self.wait() self.channel(0).connection_open(self.vhost) def channel(self, id): self.lock.acquire() try: ssn = self.peer.channel(id) ssn.client = self self.sessions[id] = ssn finally: self.lock.release() return ssn def session(self): self.lock.acquire() try: id = None for i in xrange(1, 64*1024): if not self.sessions.has_key(i): id = i break finally: self.lock.release() if id == None: raise RuntimeError("out of channels") else: return self.channel(id) def close(self): if self.peer: try: if not self.closed: channel = self.channel(0); if channel and not channel._closed: try: channel.connection_close(reply_code=200) except: pass self.closed = True finally: self.peer.stop()
class MainInterface(QMainWindow): torrentHeader = ["Name", "File Name", "Size"] taskHeader = ["Torrent Name", "File Name", "Size", "Progress"] def __init__(self): super(MainInterface, self).__init__() self.peer = Peer() self.peer.set_parent(self) self.peer.start() self.initUI() self.initSignals() self.initData() def initMenu(self): openFile = QAction('Make torrent', self) openFile.setStatusTip('Upload file') openFile.triggered.connect(self.makeTorrent) self.statusBar() menuBar = self.menuBar() fileMenu = menuBar.addMenu("&File") fileMenu.addAction(openFile) def makeTorrent(self): fPath = QFileDialog.getOpenFileName(self, 'Open File', "~/Desktop") self.peer.upload_file(unicode(fPath.toUtf8(), encoding="UTF-8")) def initSignals(self): QObject.connect(self, SIGNAL("GET_TORRENT_ON_SERVER"), self.retrieveTorrentOnServer) QObject.connect(self, SIGNAL("FINISH_DOWNLOAD_TORRENT(PyQt_PyObject)"), self.showDownloadFinishMsg) QObject.connect(self.torrentTableView, SIGNAL("doubleClicked(QModelIndex)"), self.downloadTorrentAtIndex) QObject.connect(self.taskTableView, SIGNAL("doubleClicked(QModelIndex)"), self.downloadFileAtIndex) def initData(self): self.emit(SIGNAL("GET_TORRENT_ON_SERVER")) def retrieveTorrentOnServer(self): self.torrentOnServer = self.peer.request_torrent_list() # configure table view of torrents self.torrentTableModel = MyTableModel(self, self.torrentOnServer, self.torrentHeader) self.torrentTableView.setModel(self.torrentTableModel) def retrieveLocalTorrent(self): self.localTorrentData = [] torr_list = os.listdir(local_torrents_folder_path) for name in torr_list: f = open(local_torrents_folder_path + '/' + name) f_name = f.readline().replace('\n', '') f_size = int(f.readline()) self.localTorrentData.append((name, f_name, f_size, 0)) def downloadTorrentAtIndex(self, index): item = self.torrentOnServer[index.row()] Thread(target=self.peer.download_torrent, args=(item[0],)).start() def downloadFileAtIndex(self, index): item = self.localTorrentData[index.row()] Thread(target=self.peer.download_file, args=(item[0],)).start() def finishDownloadTorrentHandler(self, tName): self.emit(SIGNAL("FINISH_DOWNLOAD_TORRENT(PyQt_PyObject)"), tName) def showDownloadFinishMsg(self, tName): f = open(local_torrents_folder_path + '/' + tName) f_name = f.readline().replace('\n', '') f_size = int(f.readline()) self.taskTableModel.layoutAboutToBeChanged.emit() self.localTorrentData.append((tName, f_name, f_size, 0)) self.taskTableModel.layoutChanged.emit() msgBox = QMessageBox() msgBox.setText(tName + " download finish") msgBox.exec_() def initUI(self): self.initMenu() self.retrieveLocalTorrent() self.setGeometry(300, 300, 1000, 500) self.setWindowTitle("P2P Search") self.torrentTableView = QTableView() # self.torrentTableView.resizeColumnsToContents() # self.taskTableView.resizeColumnsToContents() # configure table view of tasks print "1" self.taskTableView = QTableView() print self.localTorrentData[1:4] self.taskTableModel = MyTableModel(self, self.localTorrentData, self.taskHeader) self.taskTableView.setModel(self.taskTableModel) self.taskInfo = QTextEdit() self.taskInfo.setReadOnly(True) self.taskInfo.setLineWrapMode(QTextEdit.NoWrap) self.torrentLabel = QLabel() self.torrentLabel.setText("TORRENTS") rightLayout = QVBoxLayout() rightLayout.addWidget(self.taskTableView) rightLayout.addWidget(self.taskInfo, 2) leftLayout = QVBoxLayout() leftLayout.addWidget(self.torrentLabel) leftLayout.addWidget(self.torrentTableView) mainLayout = QHBoxLayout() mainLayout.addLayout(leftLayout) mainLayout.addLayout(rightLayout, 3) widget = QWidget() widget.setLayout(mainLayout) self.setCentralWidget(widget); self.show()
class Client: def __init__(self, host, port, spec = None, vhost = None): self.host = host self.port = port if spec: self.spec = spec else: from specs_config import amqp_spec_0_9 self.spec = load(amqp_spec_0_9) self.structs = StructFactory(self.spec) self.sessions = {} self.mechanism = None self.response = None self.locale = None self.sasl = None self.vhost = vhost if self.vhost == None: self.vhost = "/" self.queues = {} self.lock = threading.Lock() self.closed = False self.reason = None self.started = threading.Event() def wait(self): self.started.wait() if self.closed: raise Closed(self.reason) def queue(self, key): self.lock.acquire() try: try: q = self.queues[key] except KeyError: q = Queue(0) self.queues[key] = q finally: self.lock.release() return q def start(self, response=None, mechanism=None, locale="en_US", tune_params=None, username=None, password=None, client_properties=None, connection_options=None, sasl_options = None): self.mechanism = mechanism self.response = response self.username = username self.password = password self.locale = locale self.tune_params = tune_params self.client_properties=get_client_properties_with_defaults(provided_client_properties=client_properties, version_property_key="version") self.sasl_options = sasl_options self.socket = connect(self.host, self.port, connection_options) self.conn = Connection(self.socket, self.spec) self.peer = Peer(self.conn, ClientDelegate(self), Session) self.conn.init() self.peer.start() self.wait() self.channel(0).connection_open(self.vhost) def channel(self, id): self.lock.acquire() try: ssn = self.peer.channel(id) ssn.client = self self.sessions[id] = ssn finally: self.lock.release() return ssn def session(self): self.lock.acquire() try: id = None for i in xrange(1, 64*1024): if not self.sessions.has_key(i): id = i break finally: self.lock.release() if id == None: raise RuntimeError("out of channels") else: return self.channel(id) def close(self): self.socket.close()
class SDFS_Node: def __init__(self): host_name = socket.gethostname() if os.path.isdir("sdfs/"): shutil.rmtree("sdfs/") os.system("mkdir sdfs") self.membership_manager = Peer(host_name) self.membership_manager.start() self.padding = "jyuan18?yixinz6" self.file_receive_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM) self.file_receive_socket.bind(("0.0.0.0",2333)) self.file_dict = {"filename":[1,2,3,4,5]} self.file_dict_lock = threading.Lock() self.file_to_node = {"filename":["ip1","ip2","ip3"]} self.ip = socket.gethostname() self.file_receive_socket.listen(100) logging.basicConfig(filename='vm1.log', level=logging.INFO) threading.Thread(target=self.receive_file).start() if self.ip == "fa18-cs425-g26-01.cs.illinois.edu": Master = master(self.membership_manager) threading.Thread(target=Master.listen, args=(Master.op_listen_socket, False)).start() threading.Thread(target=Master.listen, args=(Master.ack_listen_socket, True)).start() #replication, to 3 members in membership(if someone fails, ignore,master take care of it) def receive_file(self): max_data = 8192 while True: file = deque([]) connection, addr = self.file_receive_socket.accept() while True: data = connection.recv(max_data) data = data.decode() file.append(data) if len(data) == 0: break elif len(data) != max_data and (data.endswith("jyuan18?yixinz6")):#marking end of file break elif len(data) < len(self.padding) and data.endswith(self.padding[-1*(len(data)):]): break self.message_handler(file, addr) pass # all the message type def message_handler(self,message,addr): # first chunk has the instruction first_chunk = message[0] instruction = first_chunk.split("jyuan18?yixinz6")[0] if len(json.dumps(instruction)) < 5: print("what the f**k iadjoasinodaasda") print("what the f**k iadjoasinodaasda") print("what the f**k iadjoasinodaasda") print("what the f**k iadjoasinodaasda") print("what the f**k iadjoasinodaasda") print(message) return try: instruction = json.loads(instruction)#convert instruction back to dictionary except: return first_chunk = first_chunk.split("jyuan18?yixinz6")[1] message[0] = first_chunk#change the first part of the message, take out the instruction operation = instruction["op"] #print("operation is:" + operation) if operation == "put": self.put(instruction,message) elif operation == "get": self.get(instruction, addr) elif operation == "del": self.delete(instruction) elif operation == "get-versions": self.get_version(instruction, addr) elif operation == "recover": self.recover(instruction) elif operation == "recover_copy": self.receive_copy(instruction,message) elif operation == "change": self.change(instruction) elif operation == "append": #logging.info("appending to a file") self.append(instruction,message) def append(self,instruction,file): instruction["ttl"] -= 1 sdfs_filename = instruction["s"] self.file_to_node[sdfs_filename] = instruction["ips"] #threading.Thread(target=self.spread, args=(instruction, file)).start() if sdfs_filename in self.file_dict: if len(self.file_dict[sdfs_filename]) < 5: self.file_update(sdfs_filename, instruction, file) else: # deletion of old file self.file_update(sdfs_filename, instruction, file) # starts removing the oldest version obsolete_version = self.file_dict[sdfs_filename].pop(0) file_to_delete = sdfs_filename + "-" + str(obsolete_version) if os.path.isfile(file_to_delete): os.remove(file_to_delete) else: print("There is no file: " + file_to_delete) else: self.file_update(sdfs_filename, instruction, file) #self.reply_ack(instruction) # make a socket connected to master def master_socket(self): master_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) master_sock.connect((self.membership_manager.member_ship_list[0].split("#")[0], 9999)) return master_sock # recover, send the replica of the specificd file to the specified machine. def recover(self,instruction): ips = [instruction["new"]] sdfs_filenames = [instruction["s"]] master_sock = self.master_socket() seq = instruction["seq"] # change the index on this machine about the replica location # send all the versions of a file to the specified node for ip in ips: # send to recipient for sdfs_filename in sdfs_filenames: versions = self.file_dict[sdfs_filename] for version in reversed(versions): try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((ip, 2333)) except: print("connect recover copy to node "+ip+"fail ") continue version_filename = sdfs_filename+ "-"+str(version) with open(version_filename,'r') as file_reader: content = file_reader.read() instruction = {"s": version_filename, "seq": seq, "op": "recover_copy"} if versions.index(version) == 0: instruction["final"] = "true" instruction = json.dumps(instruction) + self.padding message_list = [instruction,content,self.padding] try: sock.send(("".join(message_list)).encode()) except: print("send recover copy to node " + ip + "fail ") continue ack = {"op": "ack", "seq": seq} master_sock.send(json.dumps(ack).encode()) sock.close() master_sock.close() # receive replicas in case of node failure def receive_copy(self,instruction,file): versioned_filename = str(instruction['s']) filename = versioned_filename[:versioned_filename.rfind('-')] version = versioned_filename[versioned_filename.rfind('-') + 1:] self.file_dict_lock.acquire() if filename not in self.file_dict: self.file_dict[filename] = [int(version)] else: self.file_dict[filename].append(int(version)) self.file_dict_lock.release() file_writer = open(versioned_filename, "w") for chunk in itertools.islice(file, 0, max(len(file) - 2, 0)): file_writer.write(chunk) second_last = "" if len(file) > 1: second_last = file[-2] file_writer.write((second_last + file[-1]).split("jyuan18?yixinz6")[0]) if "final" in instruction: master_socket = self.master_socket() message = json.dumps({"seq": instruction["seq"], "op": "ack"}) master_socket.send(message.encode()) master_socket.close() # change the info about which node has a specific file after failure recovery def change(self,instruction): self.file_dict_lock.acquire() self.file_to_node[instruction['s']] = instruction["ips"] self.file_dict_lock.release() logging.info("update membership list after recovery, the file "+instruction['s'] + "is currently at" +str(instruction["ips"])) def delete(self,instruction): versions = self.file_dict.pop(instruction['s']) self.file_to_node.pop(instruction['s']) for version in versions: os.system("rm " + instruction['s'] + "-" + str(version)) def get(self, instruction, addr): sdfs_filename = instruction['s'] conn = socket.socket(socket.AF_INET,socket.SOCK_STREAM) conn.connect((addr[0], 6667)) if sdfs_filename not in self.file_dict: message = "No such file." try: conn.send(message.encode()) except: print("return to client failed") return else: with open(sdfs_filename + "-" + str(self.file_dict[sdfs_filename][-1]), "r") as file_reader: content = file_reader.read() content += self.padding conn.send(content.encode()) def get_version(self,instruction, addr): sdfs_filename = instruction['s'] conn = socket.socket(socket.AF_INET,socket.SOCK_STREAM) num_versions = instruction["num_versions"] conn.connect((addr[0],6667)) if sdfs_filename not in self.file_dict: message = "No such file." try: conn.send(message.encode()) except: print("return to client failed") return else: combined_file = [] versions = self.file_dict[sdfs_filename] if num_versions > len(versions): message = "too many versions." try: conn.send(message.encode()) except: print("return to client failed") return #combine each version versions = versions[(-1)*num_versions:] for version in versions: with open(sdfs_filename + "-" + str(version), "r") as file_reader: content = file_reader.read() combined_file.append("version "+str(version)+"\n\n\n") combined_file.append(content) combined_file.append("\n\n\n") combined_file.append(self.padding) content = "".join(combined_file) conn.send(content.encode()) logging.info("the last "+str(num_versions)+ " of versions of the file " +sdfs_filename+" was read" ) logging.info("this file " + sdfs_filename + "is also stored in "+str(self.file_to_node[sdfs_filename])) # send ack to master when required def reply_ack(self,instruction): sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM) sock.connect((self.membership_manager.member_ship_list[0].split("#")[0],9999)) ins = json.dumps({"seq":instruction["seq"], "op": "ack"}) #print(ins) sock.send(ins.encode()) sock.close() def put(self,instruction,file): instruction["ttl"] -= 1 sdfs_filename = instruction["s"] self.file_to_node[sdfs_filename] = instruction["ips"] threading.Thread(target=self.spread, args=(instruction, file)).start() if sdfs_filename in self.file_dict: if len(self.file_dict[sdfs_filename]) < 5: self.file_save(sdfs_filename,instruction,file) else:#deletion of old file self.file_save(sdfs_filename, instruction, file) #starts removing the oldest version obsolete_version = self.file_dict[sdfs_filename].pop(0) file_to_delete = sdfs_filename + "-"+str(obsolete_version) if os.path.isfile(file_to_delete): os.remove(file_to_delete) else: print("There is no file: " + file_to_delete) else: self.file_save(sdfs_filename, instruction, file) self.reply_ack(instruction) def file_save(self,sdfs_filename,instruction, file):#simply saving this as newest version to file if sdfs_filename not in self.file_dict:# not already exist:initialize self.file_dict[sdfs_filename] = [0] new_version_num = self.file_dict[sdfs_filename][-1] + 1 if self.file_dict[sdfs_filename][0] == 0:#if this time initialization happened self.file_dict[sdfs_filename].pop(0) self.file_dict[sdfs_filename].append(new_version_num) sdfs_filename = sdfs_filename + "-" + str(new_version_num) sum = 0 for c in file: sum+=len(c) logging.info("saving file") file_writer = open(sdfs_filename, "w") for chunk in itertools.islice(file, 0, max(len(file) - 2, 0)): file_writer.write(chunk) second_last = "" if len(file) > 1: second_last = file[-2] file_writer.write((second_last + file[-1]).split("jyuan18?yixinz6")[0]) def file_update(self,sdfs_filename,instruction, file):#simply saving this as newest version to file if sdfs_filename not in self.file_dict:# not already exist:initialize self.file_dict[sdfs_filename] = [0] new_version_num = self.file_dict[sdfs_filename][-1] if self.file_dict[sdfs_filename][0] == 0:#if this time initialization happened self.file_dict[sdfs_filename].pop(0) #self.file_dict[sdfs_filename].append(new_version_num) sdfs_filename = sdfs_filename + "-" + str(new_version_num) sum = 0 for c in file: sum+=len(c) logging.info("updating file") file_writer = open(sdfs_filename, "a+") for chunk in itertools.islice(file, 0, max(len(file) - 2, 0)): file_writer.write(chunk) second_last = "" if len(file) > 1: second_last = file[-2] file_writer.write((second_last + file[-1]).split("jyuan18?yixinz6")[0]) # forward the file to another replica when put until specified number of forward is done def spread(self, instruction, file): if instruction["ttl"] < 1: return next_ip = instruction["ips"][(instruction["ips"].index(self.ip) + 1) % len(instruction["ips"])] temp_list = [] temp_list.append(json.dumps(instruction)+self.padding) for chunk in file: temp_list.append(chunk) temp_list.append(self.padding) sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM) sock.connect((next_ip,2333)) sock.send("".join(temp_list).encode())
def run(self): self.startTime = time.time() for i in range(configs.PEERS_NUM): peerThread = Peer(configs.allNodes[i]) self.threadConnection.append(peerThread) peerThread.start()