def select_highest_voted_to_chain(self): # ---- Add all ticks with same amount of votes to the dictionary ---- # WARNING: This MUST happen less than 50% of the time and result in # usually only 1 winner, so that chain only branches occasionally # and thus doesn't become an exponentially growing tree. # This is the main condition to achieve network-wide consensus top_tick_refs = self.top_tick_refs() highest_ticks = self.get_ticks_by_ref(top_tick_refs) # TODO: Should always be > 0 but sometimes not, when unsynced... if len(highest_ticks) > 0: logger.debug("Top tick refs with " + str(len(highest_ticks[0]['list'])) + " pings each:" + str(top_tick_refs)) tick_dict = {} for tick in highest_ticks: to_add = self.json_tick_to_chain_tick(tick) tick_dict = {**tick_dict, **to_add} # TODO: Is this atomic? if self.chain.full(): # This removes earliest item from queue self.chain.get() self.chain.put(tick_dict) else: logger.info("Warning!! No ticks added to chain!!") self.restart_cycle()
def __init__(self): self.chain = Queue(maxsize=config['chain_max_length']) self.ping_pool = {} self.vote_pool = {} # Priority queue because we want to sort by cumulative continuity self.tick_pool = PriorityQueue() logger.debug("This node is " + credentials.addr) # TODO: Create valid genesis tick tick = { 'pubkey': 'pubkey', 'nonce': 68696043434, 'list': [{ 'timestamp': 0, 'pubkey': 'pubkey' }], 'prev_tick': 'prev_tick', 'height': 0, 'this_tick': '55f5b323471532d860b11d4fc079ba38' '819567aa0915d83d4636d12e498a8f3e' } genesis_dict = self.json_tick_to_chain_tick(tick) self.chain.put(genesis_dict)
def validate_ping(ping, ping_pool=None, vote=False): stage = 'vote' if vote else 'ping' if not validate_schema(ping, 'ping_schema.json'): logger.debug(stage + " failed schema validation") return False if ping_pool is not None: if vote: if pubkey_to_addr(ping['pubkey']) not in ping_pool: logger.debug("Voters's pubkey not found in pingpool") return False # Voting twice just overwrites your past vote! else: if pubkey_to_addr(ping['pubkey']) in ping_pool: logger.debug(stage + " was already in pool") return False # Check hash and sig, keeping in mind signature might be popped off if not validate_sig_hash(ping): logger.debug(stage + " failed sighash validation") return False # TODO: Do sanity check on a pings timestamp in relation to current time etc if not validate_ping_timestamp(ping): # <-- empty stub function atm.. logger.debug(stage + " failed sanity check on timestamp") return False return True
def send_mutual_add_requests(self, peerslist): successful_adds = 0 # Mutual add peers for peer in peerslist: if peer not in self.peers and len( self.peers) <= config['max_peers']: content = {"port": self.port, 'pubkey': credentials.pubkey} signature = sign(standard_encode(content), credentials.privkey) content['signature'] = signature status_code = None response = None result, success = attempt(requests.post, False, url=peer + '/mutual_add', json=content, timeout=config['timeout']) if success: status_code = result.status_code response = result.text else: logger.debug("Couldn't connect to " + peer) if status_code in [201, 503]: if status_code == 201: logger.info("Adding peer " + str(peer)) peer_addr = response self.register_peer(peer, peer_addr) successful_adds += 1 if status_code == 503: logger.info("Peer was at peer-maximum") return successful_adds
def train(self, train_loader, model, epoch): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() acc = AverageMeter() # switch to train mode model.train() end = time.time() for batch_idx, (input, target) in enumerate(train_loader): data_time.update(time.time() - end) input_var = input.to(self.device) target_var = target.to(self.device) output = model(input_var) loss = self.criterion(output, target_var) self.optimizer.zero_grad() loss.backward() self.optimizer.step() with torch.no_grad(): losses.update(loss.item()) metric_val = self.metric( output, target_var) # todo - add output dimention assertion acc.update(metric_val) self.watcher.display_every_iter(batch_idx, input_var, target, output) # measure elapsed time batch_time.update(time.time() - end) end = time.time() logger.debug('\rEpoch: {0} [{1}/{2}]\t' 'ETA: {time:.0f}/{eta:.0f} s\t' 'data loading: {data_time.val:.3f} s\t' 'loss {loss.avg:.4f}\t' 'metric {acc.avg:.4f}\t'.format(epoch, batch_idx, len(train_loader), eta=batch_time.avg * len(train_loader), time=batch_time.sum, data_time=data_time, loss=losses, acc=acc)) self.watcher.log_value(TRAIN_ACC_OUT, metric_val) self.watcher.log_value(TRAIN_LOSS_OUT, loss.item()) return losses.avg, acc.avg
def register_peer(self, url, peer_addr): """ Add a new peer to the list of peers :param url: <str> Address of peer. Eg. 'http://192.168.0.5:5000' :param peer_addr: <str> Mining addr of peer :return: <bool> Whether it was already in list or not """ netloc = self.get_full_location(url) # Avoid adding self if peer_addr == credentials.addr: logger.debug("Cannot add self") return False self.peers[netloc] = peer_addr return True
def generate_and_process_tick(self): height = self.clockchain.current_height() + 1 tick = { 'list': list(self.clockchain.ping_pool.values()), 'pubkey': credentials.pubkey, 'prev_tick': self.clockchain.prev_tick_ref(), 'height': height } this_tick, nonce = mine(tick) tick['nonce'] = nonce signature = sign(standard_encode(tick), credentials.privkey) tick['signature'] = signature # This is to keep track of the "name" of the tick as debug info # this_tick is not actually necessary according to tick schema tick['this_tick'] = this_tick prev_tick = self.clockchain.latest_selected_tick() possible_previous = self.clockchain.possible_previous_ticks() # Validate own tick retries = 0 while retries < config['tick_retries']: if not validate_tick( tick, prev_tick, possible_previous, verbose=False): retries = retries + 1 time.sleep(config['tick_retries_sleep']) else: self.clockchain.add_to_tick_pool(tick) # Forward to peers (this must be after all validation) self.networker.forward(data_dict=tick, route='tick', origin=credentials.addr, redistribute=0) logger.debug("Forwarded own tick: " + str(tick)) return True logger.debug( "Failed own tick validation too many times. not forwarded") return False
def ping_worker(self): while True: if self.networker.ready and not self.added_ping: self.networker.stage = "ping" logger.debug( "Ping stage--------------------------------------") successful = \ self.generate_and_process_ping( self.clockchain.prev_tick_ref()) if not successful: continue self.added_ping = True else: time.sleep(1)
def build_app(g_port): if config['api_backend'] == "flask": logger.debug("Running in production mode") # Clockchain datastructure and an instance for network messaging g_clockchain = Clockchain() g_networker = Networker() # Timeminer handles all network validation, and API exposes messaging Timeminer(g_clockchain, g_networker) g_api = API(g_clockchain, g_networker) g_app = g_api.create_app() g_networker.activate(g_port) return g_app else: # We don't intend to run Sanic with gunicorn return False
def get_sample_of_peers_from(peers, sample_size=config['max_peers']): peers_of_peers = set() # Get peers of peers and add to set (set has no duplicates) for peer in list(peers): result, success = attempt(requests.get, False, url=peer + '/info/peers', timeout=config['timeout']) if success: next_peers = json.loads(result.text) for next_peer in next_peers['peers']: peers_of_peers.add(next_peer) else: logger.debug("Couldn't connect to " + peer) if sample_size > len(list(peers_of_peers)): sample_size = len(list(peers_of_peers)) return random.sample(list(peers_of_peers), sample_size)
def generate_and_process_ping(self, reference, vote=False): # TODO: Code duplication between here and api.. where to put?? # TODO: Can't be in helpers, and cant be in clockchain/networker.. # Always construct ping in the following order: # 1) Init 2) Mine+nonce 3) Add signature # This is because the order of nonce and sig creation matters ping = { 'pubkey': credentials.pubkey, 'timestamp': utcnow(), 'reference': reference } stage = 'vote' if vote else 'ping' _, nonce = mine(ping) ping['nonce'] = nonce signature = sign(standard_encode(ping), credentials.privkey) ping['signature'] = signature # Validate own ping if not validate_ping(ping, self.clockchain.ping_pool, vote): logger.debug("Failed own " + stage + " validation") return False if vote: self.clockchain.add_to_vote_pool(ping) else: self.clockchain.add_to_ping_pool(ping) route = 'vote' if vote else 'ping' # Forward to peers (this must be after all validation) self.networker.forward(data_dict=ping, route=route, origin=credentials.addr, redistribute=0) logger.debug("Forwarded own " + route + ": " + str(ping)) return True
def validate_sig_hash(item): # The reason this is a combined check on sig+hash (instead of split methods) # Is that check must be atomic, as sig+hash mutate the tick in certain order # Deepcopy used to not modify instance we received item_copy = copy.deepcopy(item) signature = item_copy.pop('signature', None) if signature is None: logger.debug("Could not find signature in validate sighash..") return False # Check hash if not validate_difficulty(hasher(item_copy)): logger.debug("Invalid hash for item: " + str(item_copy) + " " + hasher(item_copy)) return False # Validate signature try: encoded_message = standard_encode(item_copy) if not verify(encoded_message, signature, item_copy['pubkey']): return False except ecdsa.BadSignatureError: # TODO : When new joiner joins, make sure peers relay latest hash logger.debug("Bad signature!" + str(item_copy) + " " + str(signature)) return False return True
def top_tick_refs(self): highest_voted_ticks = [] # Sort by value (amount of votes) sorted_votes = sorted(self.get_vote_counts().items(), key=lambda x: x[1], reverse=True) top_ref, top_score = sorted_votes.pop(0) highest_voted_ticks.append(top_ref) logger.debug("Highest amount of votes achieved was: " + str(top_score)) # If any other refs share the same score, we return those too for vote in sorted_votes: next_ref, next_score = vote if next_score == top_score: highest_voted_ticks.append(next_ref) else: break return highest_voted_ticks
def join_network_worker(self): # Sleeping random amount to not have seed-clash (cannot do circular # adding of peers at the exact same time as seeds) logger.debug("Running on port " + str(self.port)) sleeptime = 2 + random.randrange(3000) / 1000.0 logger.debug("Sleeping for " + str(int(sleeptime)) + "s before network join") time.sleep(sleeptime) # First try to add seeds if self.port < 5003: self.send_mutual_add_requests(config['seeds']) # Then get random sample of peers from them peer_samples = self.get_sample_of_peers_from(config['seeds']) # Then add those peers self.send_mutual_add_requests(peer_samples) # TODO: Sync latest datastructures with peers (choosing the majority?) # Continuously try add new peers until my peerlist is above minimum size while True: time.sleep(4) # TODO: Put in config if len(self.peers) < config['min_peers']: logger.debug( "peerlist below minimum, trying to add more peers") peer_samples = self.get_sample_of_peers_from(self.peers) self.send_mutual_add_requests(peer_samples) self.ready = False else: self.ready = True if len(self.peers) < 1: logger.debug("no peers! adding seeds again") peer_samples = self.get_sample_of_peers_from(config['seeds']) self.send_mutual_add_requests(peer_samples)
def validate_tick(tick, previous_tick=None, possible_previous_ticks=None, verbose=True): # Doing validation on a copy so that the original keeps its "this_tick" ref # Otherwise the tick dict will be modified by any operations done here tick_copy = copy.deepcopy(tick) prev_tick_copy = copy.deepcopy(previous_tick) # This is used to keep track of the hash of the tick as debug information # Popping it off as it is not supposed to be an actual part of a tick tick_copy.pop('this_tick', None) if not validate_schema(tick_copy, 'tick_schema.json'): logger.debug("Tick failed schema validation") return False # Check hash and sig keeping in mind signature might be popped off if not validate_sig_hash(tick_copy): logger.debug("Tick failed signature and hash checking") return False if previous_tick is not None: if tick_copy['height'] != prev_tick_copy['height'] + 1: logger.debug("Tick failed height check") return False if possible_previous_ticks is not None: if not tick_copy['prev_tick'] in possible_previous_ticks: logger.debug( "Tick failed referencing any 1 of prev possible ticks") return False # TODO: This forces lower bound, but should also include upper bound? if not validate_tick_timediff(prev_tick_copy): # Verbose: fails often logger.debug("Tick failed minimum timediff check") if verbose else None return False # Check all pings in list for ping in tick_copy['list']: # TODO: Check if tick's pings are in my own pool? # TODO: So they dont just send any random pings valid_ping = validate_ping(ping) if not valid_ping: logger.debug("tick invalid due to containing invalid ping") return False return True
async def logging_for_sanic(request, response): logger.debug(request.ip + " " + request.method + " " + request.path + ": [" + str(response.status) + "] " + response.body.decode('utf-8').replace('\\', ''))
def tick_worker(self): while True: # added_ping acts as a switch between "pingmode" and "tickmode" if self.networker.ready and self.added_ping: # Always construct tick in the following order: # 1) Init 2) Mine+nonce 3) Add signature # This is because the order of nonce and sig creation matters cycle_time = config['cycle_time'] cycle_multiplier = config['cycle_time_multiplier'] # Dynamic adjusting of sleeping time to match network lockstep prev_tick_ts = median_ts( self.clockchain.latest_selected_tick()) desired_ts = prev_tick_ts + cycle_multiplier * cycle_time wait_time = desired_ts - utcnow() logger.debug("Median ts: " + str(prev_tick_ts) + " min ts: " + str(desired_ts) + " curr ts: " + str(utcnow())) overshoot = 0 if wait_time < 0: if self.clockchain.current_height( ) != 0: # If init, ignore overshoot = -wait_time logger.debug("Overshoot of " + str(int(overshoot)) + "s") wait_time = 0 logger.debug("Adjusted sleeping time: " + str(int(wait_time))) time.sleep(wait_time) # Adjusting to follow network timing logger.debug( "Tick stage--------------------------------------") start = time.time() # Start and end time used to adjust sleep self.networker.stage = "tick" self.generate_and_process_tick() # All in all, there should be a total sleep of # 'cycle_time_multiplier' * 'cycle_time' in this thread. # Gets adjusted dynamically by wait_time mechanism above end = time.time() # Overshoot is used if we slept too long in ping stage, # then we compensate in this tick stage by speeding up sleep second_sleep = cycle_time - (end - start) - overshoot second_sleep = 0 if second_sleep < 0 else second_sleep time.sleep(second_sleep) # 2nd sleep logger.debug( "Vote stage--------------------------------------") start = time.time() self.networker.stage = "vote" # Use a ping to vote for highest continuity tick in tick_pool # TODO: What happens if I just selfishly vote for my own tick? active_tick_ref = self.clockchain.current_tick_ref() self.generate_and_process_ping(active_tick_ref, vote=True) logger.debug("Voted for: " + str(active_tick_ref)) end = time.time() inbetween_sleep = cycle_time / 2 time.sleep(inbetween_sleep) # 2.5th sleep # Clearing ping_pool here already to possibly receive new pings self.clockchain.ping_pool = {} third_sleep = cycle_time - inbetween_sleep - (end - start) third_sleep = 0 if third_sleep < 0 else third_sleep time.sleep(third_sleep) # 3rd sleep logger.debug( "Select ticks stage------------------------------") self.networker.stage = "select" self.clockchain.select_highest_voted_to_chain() # TODO: If nothing was added to chain.. sth obv. wrong! Resync? self.added_ping = False else: time.sleep(1)
def handle_exception(exception): logger.exception("Exception of type " + str(type(exception)) + " occurred, see log for more details", exc_info=False) logger.debug(traceback.format_exc())
def after(response): logger.debug(request.remote_addr + " " + request.method + " " + request.path + ": [" + str(response.status_code) + "] " + response.get_data().decode("utf-8").rstrip()) return response
def run(self): # Download the flow from NodeRed container and parse. s_time = time.time() while True: if self._parse_nodered_flow(): break else: if time.time() - s_time > NODERED_FLOW_TIMEOUT * 60: logger.critical('Failed to download flow, exiting...') return else: time.sleep(1) s_time = time.time() model_name = self.config.get('model_name') detect_mode = str(self.config.get('detect_mode', '')).lower() model_url = self.config.get('custom_model_url') while True: model_dir = download_model( model_name=model_name, device=detect_mode, model_url=model_url if self.config.get('public_model', True) is False else None) if model_dir: break else: if time.time() - s_time > DOWNLOAD_MODEL_TIMEOUT * 60: logger.critical('Failed to download model, existing...') return else: time.sleep(.1) if detect_mode == 'gpu': if model_name == 'yolov3': from utils.object_detect_gpu_yolov3 import VisoGPUODYoloV3 detector = VisoGPUODYoloV3(model_dir=model_dir) else: from utils.object_detect_gpu import VisoGPUOD detector = VisoGPUOD(model_dir=model_dir) else: if model_name == 'yolov3': from utils.openvino_detect_yolov3 import OpenVINODetectYOLOV3 detector = OpenVINODetectYOLOV3( model_dir=model_dir, device='MYRIAD' if detect_mode == 'ncs' else 'CPU') else: if model_url: from utils.object_detect_gpu import VisoGPUOD detector = VisoGPUOD(model_dir=model_dir) else: from utils.openvino_detect import OpenVinoObjectDetect detector = OpenVinoObjectDetect( model_dir=model_dir, device='MYRIAD' if detect_mode == 'ncs' else 'CPU') tracking_mode = str(self.config.get('tracking_algorithm')).upper() tracking_quality = float(self.config.get('tracking_quality', 5)) tracking_cycle = int(self.config.get('tracking_cycle', 2)) trackers = [] cnts = [] for vid_src_id in range(len(self.sources)): trackers.append( ObjectTrack(trk_type=tracking_mode, good_track_quality=tracking_quality)) cnts.append(0) logger.info("Starting detection loop...") r = redis.StrictRedis() while True: for vid_src_id, src in enumerate(self.sources): str_frame = r.get(f"{REDIS_PREFIX}_{src.get('id')}") if str_frame: str_frame = base64.b64decode(str_frame) frame = cv2.imdecode( np.fromstring(str_frame, dtype=np.uint8), -1) h, w = frame.shape[:2] if cnts[vid_src_id] % tracking_cycle == 0: result = detector.detect_frame(frame) filtered_objects = [ r for r in result if r['label'] in self.config.get('labels', []) ] # FIXME: Remove this! if filtered_objects: logger.debug(filtered_objects) cnts[vid_src_id] = 0 trackers[vid_src_id].upgrade_trackers( dets=filtered_objects, trk_img=frame) else: trackers[vid_src_id].keep_trackers(trk_img=frame) cnts[vid_src_id] += 1 result = trackers[vid_src_id].to_list() roi_result = [ x for x in result if not src.get('roi_list', []) or # ROI is not defined? any([ cv2.pointPolygonTest( np.array([cnt], dtype=np.int32), ((x['rect'][0] + x['rect'][2] // 2) * w, (x['rect'][1] + x['rect'][3] // 2) * h), # Center point False) >= 0 for cnt in src.get('roi_list', []) ]) ] if roi_result: logger.info( f'Detected Object from {src.get("id")}({src.get("name")}) - {roi_result}' ) client.publish( topic=f"{MQTT_PREFIX}_{self.config['id']}", payload=json.dumps({ "camera_id": src.get("id"), "result": roi_result })) if SAVE_IMAGE and time.time( ) - self._last_saved_time[vid_src_id] > 60: f_name = os.path.join( SAVE_PATH, f"{src.get('id')}_{datetime.datetime.now().isoformat()}.jpg" ) logger.debug(f"Saving to a file - {f_name}") for r in roi_result: _x, _y, _w, _h = ( np.array(r['rect']) * np.array([w, h, w, h])).astype( np.int).tolist() cv2.rectangle(frame, (_x, _y), (_x + _w, _y + _h), (0, 255, 0), 2) cv2.putText( frame, f'{r["label"]} {round(r["confidence"] * 100, 1)} %', (_x, _y - 7), cv2.FONT_HERSHEY_COMPLEX, 0.6, (0, 255, 0), 1) cv2.imwrite(f_name, frame) self._last_saved_time[vid_src_id] = time.time()