def gen_requests(max_requests, batch_size, f): client_key_pub = ecdsa_sig.get_asymm_key(CLIENT_ID - 1, ktype='verify') client_key_pem = ecdsa_sig.get_asymm_key(CLIENT_ID - 1, ktype='sign') keys_to_seq_tracker = defaultdict.fromkeys(range(N), 0) for i in range(max_requests): # print("for request: [%s]" % i) r = random.randint(1, max_requests) r2 = random.randint(max_requests / 2, max_requests) # amount = random.randint(0,100) amount = 50 msg = "TRAN%s%s" % ( str(amount).zfill(4), str(r2).zfill(4), #.decode('latin1') ) msg = bytes(msg, encoding="utf-8") + client_key_pub.to_string() if i == 0: print(len(client_key_pub.to_string()), "ECDSA") _id = r % N # print("current ID: ", _id) current_key = ecdsa_sig.get_asymm_key(_id, ktype='sign') # import pdb; pdb.set_trace() seq = keys_to_seq_tracker[_id] view = 0 # TODO: do we even need view in request from client?? req = message.add_sig(current_key, _id, seq, view, "REQU", msg, i) keys_to_seq_tracker[ _id] += 1 # Increase seq count since same node might send another request req.sig = client_key_pem.to_string() msg = req.SerializeToString() if i == 0: print("one request:", len(msg)) ################################ #padding = "0123456789" * 10 #msg += padding * 1024 #msg += "x" * (400 - len(msg)) msg += msg * (batch_size - 1) ################################ temp = message.add_sig(current_key, _id, 0, 0, "REQU", msg, i + offset) size = temp.ByteSize() if i == 0: print("inner message:", len(msg)) print("message byte size:", size) b = struct.pack("!I", size) f.write(b + temp.SerializeToString()) # if i % batch_size == 0: s = "Generation Progress: {:.1%}".format(i / max_requests) print(s, end='') backspace(len(s)) # time.sleep(0.02) # see the counter print("keys_to_seq_tracker: ", keys_to_seq_tracker)
def vprocess_prepare(self, vprep_dict, vpre_dict, last_checkpoint): max = 0 #[seq][id] -> req counter = {} for k1, v1 in vprep_dict.items(): if (not k1 in vpre_dict): return False, 0 dig = vpre_dict[k1].inner.msg key = get_asymm_key(vpre_dict[k1].inner.id, ktype="sign") if not bool(key): _logger.error( "Node: [%s], ErrorMsg => {get_asymm_key(): -> returned empty key}" % (self._id)) return r = message.check(key, vpre_dict[k1]) if r == None: return False, 0 for k2, v2 in v1.items(): # check sigs key = get_asymm_key(v2.inner.id, ktype="sign") if not bool(key): _logger.error( "Node: [%s], ErrorMsg => {get_asymm_key(): -> returned empty key}" % (self._id)) return r = message.check(key, v2) if r == None: return False, 0 #prepares need to be for the same digest if r.inner.msg != dig: return False if (vpre_dict[k1].inner.id == r.inner.id): return False, 0 if r.inner.seq < last_checkpoint: return False, 0 if r.inner.seq > max: max = r.inner.seq if r.inner.seq not in counter: counter[r.inner.seq] = 1 else: counter[r.inner.seq] += 1 for k in counter: if counter[k] < 2 * self.f: return False, 0 self.add_node_history(vpre_dict[k]) return True, max
def process_new_view(self, req, fd): _logger.info( "Node: [%s], Phase: [PROCESS NEW VIEW], Event FD: [%s], RequestInnerID: [%s]" % (self._id, fd, req.inner.id)) # parse requests by type m = req.inner.msg vchange_list = [] prpr_list = [] # counter = 0 while len(m) > 0: # counter += 1 # _logger.info("Node: [%s], Phase: [PROCESS NEW VIEW], Event FD: [%s], RequestInnerID: [%s]" % (self._id, fd, req.inner.id)) # _logger.info("COUNTER [%s]" % counter) b = m[:4] size = struct.unpack("!I", b)[0] try: r2 = request_pb2.Request() r2.ParseFromString(m[4:size + 4]) record_pbft(self.debuglog, r2) key = get_asymm_key(r2.inner.id, ktype="sign") if not bool(key): _logger.error( "Node: [%s], ErrorMsg => {get_asymm_key(): -> returned empty key}" % (self._id)) return r2 = message.check(key, r2) if r2 == None: _logger.warn("FAILED SIG CHECK IN NEW VIEW") return except: r2 = None _logger.warn("FAILED PROTOBUF EXTRACT IN NEW VIEW") return if r2.inner.type == "VCHA": vchange_list.append(r2) if r2.inner.type == "PRPR": prpr_list.append(r2) m = m[size + 4:] if not self.nvprocess_view(vchange_list): _logger.warn("FAILED VCHANGE VALIDATION IN NEW VIEW") return if req.inner.view >= self.view: self.view = req.inner.view self.view_active = True self.primary = self.view % self.N self.active = {} self.reset_message_log() #TODO: Move this check to execute self.client_message_log = {} self.prepared = {} rc2 = self.nvprocess_prpr(prpr_list) _logger.info("Node: [%s], Msg: [New View Accepted], View: [%s]" % (self._id, self.view)) return
def parse_request(self, request_bytes, fd): # attempt to reconstruct the request object # close connection and return on failure try: # import pdb; pdb.set_trace() req = request_pb2.Request() req.ParseFromString(request_bytes) # _logger.debug(req) _logger.info( "Node: [%s], Phase: [PARSE REQUEST], RequestInnerID: [%s]" % (self._id, req.inner.id)) record_pbft(self.debuglog, req) key = get_asymm_key(req.inner.id, ktype="sign") # if not isinstance(key, ecdsa.SigningKey): if not bool(key): _logger.error( "Node: [%s], ErrorMsg => {get_asymm_key(): -> returned empty key}" % (self._id)) return req = message.check(key, req) if req is None: _logger.error( "Node: [%s], ErrorMsg => {Failed message sig check. 'req' is empty..}" % (self._id)) return except Exception as E: req = None _logger.error( "Node: [%s], ErrorMsg => {ERROR IN PROTOBUF TYPES: %s}" % (self._id, E)) # raise # for debug self.clean(fd) return # print(req.inner.type, len(request_bytes)) # TODO: Check for view number and view change, h/H if req.inner.view != self.view or not self.view_active: if req.inner.type != "VCHA" and req.inner.type != "NEVW" and \ req.inner.type != "CHKP" and req.inner.type != "REQU": debug_msg = "TYPE: %s - ID %s - SEQ %s - VIEW - %s" % ( req.inner.type, req.inner.id, req.inner.seq, req.inner.view) _logger.warn("Bad view number - %s" % debug_msg) return if self.in_node_history(req): _logger.warn("Duplicate node message") # return pass if req.inner.type in self.request_types and not self.in_client_history( req): # call to actual success self.request_types[req.inner.type](req, fd) else: self.clean(fd) _logger.warn("BAD MESSAGE TYPE - %s - %s" % (req.inner.type, req.inner.id))
def execute(self, req): """ clientbuff is used to maintain buffer for failed requests for retries """ seq = req.inner.seq dig = req.inner.msg client_req, t, fd = self.active[dig] t.cancel() key = get_asymm_key(self._id, ktype="sign") if not bool(key): _logger.error( "Node: [%s], ErrorMsg => {get_asymm_key(): -> returned empty key}" % (self._id)) return #time.sleep(1) #rc = ecdsa_sig.verify(self.ecdsa_key, self.hello_sig, "Hello world!") #cond.acquire() ## Horrible hack... #self.last_executed = max(seq,self.last_executed) m = self.bank.process_request(key, self._id, seq, client_req) client_req.inner.msg = "TRAN00000000" #for i in range(1024): discard = self.bank.process_request(key, self._id, seq, client_req) #cond.release() #if self._id == self.primary and fd in self.fdmap: #if fd in self.fdmap: #self.safe_send(self.fdmap[fd], m) #self.clean(fd) time.sleep(0.05) # TODO: hack retry = True #while retry: self.clientlock.acquire() try: self.send_to_client(self.clientbuff + serialize(m)) self.clientbuff = bytes() except: _logger.warn("failed to send, adding to client outbuff") self.clientbuff += serialize(m) #continue self.clientlock.release() #retry = False record(self.debuglog, "EXECUTED " + str(seq)) #print("adding request with sequence number " + str(req.inner.seq) + " to queue") if self.max_requests and seq >= self.max_requests: maxout_msg = "max requests reached, shutting down.." _logger.info(maxout_msg) print(maxout_msg) #sys.exit() t = Timer(5, self.suicide) t.start()
def nvprocess_view(self, vchange_list): for r in vchange_list: key = get_asymm_key(r.inner.id, ktype="sign") if not bool(key): _logger.error( "Node: [%s], ErrorMsg => {get_asymm_key(): -> returned empty key}" % (self._id)) return m = message.check(key, r) if m == None: return False return True
def create_request(self, req_type, seq, msg, outer_req=None): key = get_asymm_key(self._id, ktype="sign") if not bool(key): _logger.error( "Node: [%s], ErrorMsg => {get_asymm_key(): -> returned empty key}" % (self._id)) return # if req_type == "PRPR": # import pdb; pdb.set_trace() # msg = bytes(msg, encoding='utf-8') m = message.add_sig(key, self._id, seq, self.view, req_type, msg) if outer_req: m.outer = outer_req.SerializeToString() return m
def nvprocess_prpr(self, prpr_list): for r in prpr_list: key = get_asymm_key(r.inner.id, ktype="sign") if not bool(key): _logger.error( "Node: [%s], ErrorMsg => {get_asymm_key(): -> returned empty key}" % (self._id)) return m = message.check(key, r) if m == None: return False out = self.create_request("PREP", r.inner.seq, r.inner.msg) self.broadcast_to_nodes(out) #TODO: add view to node_message_log, don't broadcast here #if self._id in self.node_message_log["COMM"][r.inner.seq]: # out = self.create_request("COMM", r.inner.seq, r.inner.msg) # self.broadcast_to_nodes(out) return True
def sign_log(self, from_node_id): key = sig.get_asymm_key(from_node_id, ktype="sign") # TODO: handle conversation of log to bytes (use struct?) signed_log = sig.sign(key, self.log) return signed_log
def sign_transaction(self, txn_tuple, from_node_id): """ """ key = sig.get_asymm_key(from_node_id, ktype="sign") signed_txn = sig.sign(key, txn_tuple) return signed_txn
def process_view_change(self, req, fd): _logger.warn( "Node: [%s], Phase: [PROCESS VIEW CHANGE], Event FD: [%s], RequestInnerID: [%s]" % (self._id, fd, req.inner.id)) _logger.warn("Node: [%s], RequestInnerView: [%s]" % (self._id, req.inner.view)) self.add_node_history(req) new_v = req.inner.view if self._id != req.inner.view or new_v < self.view: return # (NEVW, v+1, V, O) where V is set of valid VCHA, O is set of PRPR #TODO (NO PIGGYBACK) # determine the latest stable checkpoint checkpoint = 0 # [seq][id] -> req # for each view change message #for r in self.view_dict[new_v]: vcheck_list = [] vpre_dict = {} vprep_dict = {} m = req.inner.msg # for each chkp, prpr, prep message it contains while len(m) > 0: b = m[:4] size = struct.unpack("!I", b)[0] try: #if True: r2 = request_pb2.Request() r2.ParseFromString(m[4:size + 4]) record_pbft(self.debuglog, r2) key = get_asymm_key(r2.inner.id, ktype="sign") if not bool(key): _logger.error( "Node: [%s], ErrorMsg => {get_asymm_key(): -> returned empty key}" % (self._id)) return r2 = message.check(key, r2) if r2 is None: print("FAILED SIG CHECK IN VIEW CHANGE") return except Exception as E: #else: r2 = None print("FAILED PROTOBUF EXTRACT IN VIEW CHANGE: %s" % E) raise return if r2.inner.type == "CHKP": vcheck_list.append(r2) if r2.inner.type == "PREP": if r2.inner.seq not in vprep_dict: vprep_dict[r2.inner.seq] = {r2.inner.id: r2} else: vprep_dict[r2.inner.seq][r2.inner.id] = r2 if r2.inner.type == "PRPR": vpre_dict[r2.inner.seq] = r2 m = m[size + 4:] rc1 = self.vprocess_checkpoints(vcheck_list, req.inner.seq) rc2, max = self.vprocess_prepare(vprep_dict, vpre_dict, req.inner.seq) # on success add to list # on error do nothing and return # request list, min, max if rc1 and rc2: if new_v not in self.view_dict: self.view_dict[new_v] = ([req], 0, 0) else: if not self.in_view_dict(req): self.view_dict[new_v][0].append(req) # set min and max if self.view_dict[new_v][1] < req.inner.seq: #self.view_dict[new_v][1] = req.inner.seq temp = self.view_dict[new_v] self.view_dict[new_v] = (temp[0], req.inner.seq, temp[2]) if self.view_dict[new_v][2] < max: #self.view_dict[new_v][2] = max temp = self.view_dict[new_v] self.view_dict[new_v] = (temp[0], temp[1], max) # proccess and send the new view request if ((not self.view_active) or new_v > self.view) and len( self.view_dict[new_v][0]) > 2 * self.f: msg = "" for i in self.view_dict[new_v][0]: msg += serialize(i) for i in range(self.view_dict[new_v][1], self.view_dict[new_v][2]): if i == 0: continue r = self.node_message_log["PRPR"][i][self.primary] temp = self.create_request("PRPR", i, r.inner.msg) msg += serialize(temp) out = self.create_request("NEVW", 0, msg) self.view_active = True self.primary = self.view % self.N self.active = {} self.reset_message_log() #TODO: MOve this check to execute self.client_message_log = {} self.prepared = {} self.seq = self.view_dict[new_v][2] self.broadcast_to_nodes(out) print("Entering New view", self.view)
def process_preprepare(self, req, fd): _logger.info("Node: [%s], Phase: [PRE-PREPARE], Event FD: [%s]" % (self._id, fd)) if req.inner.seq in self.node_message_log["PRPR"]: return None # the msg field for a preprepare should be the digest of the original client request # TODO: make it clearer that req.outer stands for the message that the replica in subject # is going down.. if req.outer != b'': # req.outer should be same format as req.inner (bytes) try: # import pdb; pdb.set_trace() client_req = request_pb2.Request() # client_req.ParseFromString(req.outer) client_req.ParseFromString(req) record_pbft(self.debuglog, client_req) client_key = get_asymm_key(client_req.inner.id, ktype="sign") if not bool(client_key): _logger.error( "Node: [%s], ErrorMsg => {get_asymm_key(): -> returned empty key}" % (self._id)) return client_req = message.check(client_key, client_req) if client_req == None or req.inner.msg != client_req.dig: _logger.warn("FAILED PRPR OUTER SIGCHECK") return except: _logger.error("ERROR IN PRPR OUTER PROTOBUFF") raise # return # TODO: remove replica from replica map as it has by this time probably gone down or is corrupt. else: client_req = None if req.inner.msg not in self.active: # self.active[req.inner.msg] = (client_req, Timer(self.timeout, self.handle_timeout), fd) request_timer = Timer(self.timeout, self.handle_timeout, [req.inner.msg, req.inner.view]) request_timer.daemon = True request_timer.start() self.active[req.inner.msg] = (client_req, request_timer, fd) self.add_node_history(req) m = self.create_request("PREP", req.inner.seq, req.inner.msg) self.add_node_history(m) record_pbft(self.debuglog, m) self.inc_prep_dict(req.inner.msg) self.broadcast_to_nodes(m) if self.check_prepared_margin(req.inner.msg, req.inner.seq): record(self.debuglog, "PREPARED sequence number " + str(req.inner.seq)) m = self.create_request("COMM", req.inner.seq, req.inner.msg) self.broadcast_to_nodes(m) self.add_node_history(m) self.inc_comm_dict(m.inner.msg) record_pbft(self.debuglog, m) self.prepared[req.inner.seq] = req.inner.msg if self.check_committed_margin(m.inner.msg, m): record(self.debuglog, "COMMITTED sequence number " + str(m.inner.seq)) record_pbft(self.commitlog, m) self.execute_in_order(m)