def make_stop_training_req(in_task_id): m = TMemoryBuffer() p = TBinaryProtocol(m) msg_name = STOP_TRAINING_REQ nonce = get_random_id() session_id = get_random_id() head = msg_header(get_magic(), msg_name, nonce, session_id) print("stop req.nonce:%s, task_id:%s" % (nonce, in_task_id)) req = stop_training_req_body(in_task_id) message = in_task_id + nonce sign_algo = "ecdsa" origin = get_node_id() exten_info = {} exten_info["origin_id"] = origin exten_info["sign_algo"] = sign_algo exten_info["sign"] = dbc_sign(message) head.exten_info = exten_info head.write(p) req.write(p) p.writeMessageEnd() m.flush() return pack_head(m)
def make_ver_req(peer_ip, peer_port, node_id): m = TMemoryBuffer() # p = TBinaryProtocol(m) p = TCompactProtocol(m) msg_name = "ver_req" nonce = "0" #get_random_id() head = ttypes_header.msg_header(0, msg_name, nonce) head.write(p) # print("nonce:%s, node_id:%s" %(nonce, node_id)) addr_me = network_address("127.0.0.1", 1) addr_you = network_address(peer_ip, peer_port) time_stamp = int(time.time()) req = ver_req_body(node_id, 0, 0, time_stamp, addr_me, addr_you, 1) # p.writeMessageBegin() req.write(p) # p.writeMessageEnd() m.flush() v = m.getvalue() # print v # print [hex(ord(i)) for i in v] c = pack_head(m) # pprint(c) # print [hex(ord(i)) for i in c] return c
def make_get_peer_nodes_resp(dport): m = TMemoryBuffer() p = TBinaryProtocol(m) msg_name = P2P_GET_PEER_NODES_RESP nonce = get_random_id() head = msg_header(get_magic(), msg_name, nonce) head.write(p) print("nonce:%s" % (nonce)) # self.peer_node_id = peer_node_id # self.core_version = core_version # self.protocol_version = protocol_version # self.live_time_stamp = live_time_stamp # self.addr = addr node_list = [] node_info = peer_node_info() node_info.addr = network_address("10.10.254.198", dport) node_info.peer_node_id = gen_node_id() node_info.core_version = core_version node_info.protocol_version = pro_version node_info.live_time_stamp = int(time.time()) node_list.append(node_info) req = get_peer_nodes_resp_body(node_list) req.write(p) p.writeMessageEnd() m.flush() return pack_head(m)
def make_broad_cast_node_info(node_id, name): m = TMemoryBuffer() p = TBinaryProtocol(m) msg_name = SERVICE_BROADCAST_REQ nonce = get_random_id() head = msg_header(get_magic(), msg_name, nonce) head.write(p) node_info = node_service_info() node_info.name = name node_info.time_stamp = int(time.time()) service_list = [] service_list.append("ai_training") node_info.service_list = service_list kvs = {} kvs["gpu"] = "1 * GeForce940MX" kvs["state"] = "idle" node_info.kvs = kvs node_map = {} node_map[node_id] = node_info req = service_broadcast_req_body(node_map) req.write(p) p.writeMessageEnd() m.flush() return pack_head(m)
def make_ver_resp(): m = TMemoryBuffer() p = TBinaryProtocol(m) msg_name = VER_RESP nonce = get_random_id() head = msg_header(get_magic(), msg_name, nonce) head.write(p) req = ver_resp_body(gen_node_id(), core_version, pro_version) req.write(p) p.writeMessageEnd() m.flush() return pack_head(m)
def make_start_training_req(code_hash, entry_file,engine,peer_nodes,hyper_params,data_hash=""): try: m = TMemoryBuffer() p = TBinaryProtocol(m) msg_name = AI_TRAINING_NOTIFICATION_REQ nonce = get_random_id() head = msg_header(get_magic(), msg_name, nonce) # head.write(p) task_id = get_random_id() print ("task_id: %s, nonce:%s"%(task_id,nonce)) # select_mode=bytes(result["select_mode"])[0] select_mode = 0x00 master = "" pns = peer_nodes peer_nodes_list = pns.split(",") server_specification ="" server_count = 0 #training_engine = result["training_engine"] training_engine = engine code_dir = code_hash entry_file = entry_file data_dir = data_hash checkpoint_dir = "" hyper_parameters = hyper_params req = start_training_req_body(task_id, select_mode, master, peer_nodes_list, server_specification, server_count, training_engine, code_dir, entry_file, data_dir, checkpoint_dir, hyper_parameters) message=task_id+code_dir+nonce print("message:",message) sign_algo="ecdsa" origin=get_node_id() # print("sign_origin:", origin) exten_info={} exten_info["origin_id"]=origin exten_info["sign_algo"]=sign_algo exten_info["sign"] = dbc_sign(message) print("sign:", exten_info["sign"]) head.exten_info = exten_info head.write(p) req.write(p) p.writeMessageEnd() m.flush() return task_id, pack_head(m) except EOFError: print("Error: msg body decode failure") return except IOError: print ("Error: IO Error") return
def make_list_training_req(in_task_id): m = TMemoryBuffer() p = TBinaryProtocol(m) msg_name = STOP_TRAINING_REQ nonce = get_random_id() session_id = get_random_id() head = msg_header(get_magic(), msg_name, nonce, session_id) print("list req.nonce:%s, task_id:%s" % (nonce, in_task_id)) req = list_training_req_body(in_task_id) head.write(p) req.write(p) p.writeMessageEnd() m.flush() return pack_head(m)
def make_shake_hand(self): m = TMemoryBuffer() p = TBinaryProtocol(m) msg_name = SHAKE_HAND_REQ head = msg_header(get_magic(), msg_name) head.write(p) req = shake_hand_req() req.write(p) p.writeMessageEnd() m.flush() buff = pack_head(m) # send_s = binascii.hexlify(buff) # print(send_s) self.send(buff) interval = 5 timer = threading.Timer(interval, self.make_shake_hand) timer.start()
def make_ver_req(peer_ip, peer_port, node_id): m = TMemoryBuffer() p = TBinaryProtocol(m) msg_name = VER_REQ nonce = get_random_id() head = msg_header(get_magic(), msg_name, nonce) head.write(p) print("nonce:%s, node_id:%s" % (nonce, node_id)) addr_me = network_address("127.0.0.1", 21107) addr_you = network_address(peer_ip, peer_port) time_stamp = int(time.time()) req = ver_req_body(node_id, core_version, pro_version, time_stamp, addr_me, addr_you, start_height) req.write(p) p.writeMessageEnd() m.flush() return pack_head(m)
def make_logs_req(node_id): m = TMemoryBuffer() p = TBinaryProtocol(m) msg_name = LOGS_REQ nonce = get_random_id() session_id = get_random_id() head = msg_header(get_magic(), msg_name, nonce, session_id) head.write(p) print("logs req.nonce:%s, node_id:%s" %(nonce, node_id)) # def __init__(self, task_id=None, peer_nodes_list=None, head_or_tail=None, number_of_lines=None, ): task_id = "2opWVieBRgGTuc23JGfCxCVA7MrzeJZfMuuXG3dFbUXwih4kNJ" peer_nodes_list=[] peer_nodes_list.append(node_id) req=logs_req_body(task_id, peer_nodes_list, 1, 100) req.write(p) p.writeMessageEnd() m.flush() return pack_head(m)
def make_start_training_req(task_path): try: ifd = open(task_path, 'r') result = {} # print("open file", task_path, "fd is", ifd) for strline in ifd.readlines(): # print(strline) if not len(strline): continue temp_store = strline.split('=') result[temp_store[0]] = temp_store[1].strip("\n").strip("\r") ifd.close() m = TMemoryBuffer() p = TBinaryProtocol(m) msg_name = AI_TRAINING_NOTIFICATION_REQ nonce = get_random_id() head = msg_header(get_magic(), msg_name, nonce) # head.write(p) task_id = get_random_id() print("task_id: %s, nonce:%s" % (task_id, nonce)) # select_mode=bytes(result["select_mode"])[0] select_mode = 0x00 master = "" pns = result["peer_nodes_list"] peer_nodes_list = pns.split(",") # peer_node=gen_node_id() #peer_node_list=[] #peer_node_list.append(peer_node) server_specification = "" server_count = 0 #training_engine = result["training_engine"] training_engine = result["training_engine"] code_dir = result["code_dir"] entry_file = result["entry_file"] data_dir = "" checkpoint_dir = "" hyper_parameters = "" req = start_training_req_body(task_id, select_mode, master, peer_nodes_list, server_specification, server_count, training_engine, code_dir, entry_file, data_dir, checkpoint_dir, hyper_parameters) message = task_id + code_dir + nonce print("message:", message) sign_algo = "ecdsa" origin = get_node_id() # print("sign_origin:", origin) exten_info = {} exten_info["origin_id"] = origin exten_info["sign_algo"] = sign_algo exten_info["sign"] = dbc_sign(message) print("sign:", exten_info["sign"]) head.exten_info = exten_info head.write(p) req.write(p) p.writeMessageEnd() m.flush() return pack_head(m) except EOFError: print "Error: msg body decode failure" return except IOError: print "Error: IO Error" return