def main(_argv): # get paramters and contract details if Parameters.is_contractor == True: vk_Bytes = OutsourceContract.public_key_outsourcer merkle_tree_interval = OutsourceContract.merkle_tree_interval contractHash = Helperfunctions.hashContract().encode('latin1') model_to_use = OutsourceContract.model tiny = OutsourceContract.tiny else: vk_Bytes = VerifierContract.public_key_outsourcer contractHash = Helperfunctions.hashVerifierContract().encode('latin1') model_to_use = VerifierContract.model tiny = VerifierContract.tiny merkle_tree_interval = 0 port = Parameters.port_outsourcer sendingPort = Parameters.sendingPort hostname = Parameters.ip_outsourcer # Use to receive from other computer minimum_receive_rate_from_contractor = Parameters.minimum_receive_rate_from_contractor dont_show = Parameters.dont_show framework = Parameters.framework weights = Parameters.weights count = Parameters.count info = Parameters.info crop = Parameters.crop iou = Parameters.iou score = Parameters.score input_size = Parameters.input_size edgeTPU_model_path = Parameters.edgeTPU_model_path edgeTPU_label_path = Parameters.edgeTPU_label_Path edgeTPU_confidence_level = Parameters.EdgeTPU_confidence_level # configure thread handler to handle T2 (receiving), T3 (decompressing, verifying), and T4 (postprocssing, signing, sending, displaying) receiver = vss3.ThreadHandler(hostname, port, merkle_tree_interval, contractHash, minimum_receive_rate_from_contractor, vk_Bytes, input_size, sendingPort) # configure model model = Model() model.load_model(edgeTPU_model_path) model.load_labels(edgeTPU_label_path) model.set_confidence_level(edgeTPU_confidence_level) print('Receiver Initialized') # configure and iniitialize statistic variables moving_average_points = 50 moving_average_fps = MovingAverage(moving_average_points) moving_average_thread3_waiting_time = MovingAverage(moving_average_points) moving_average_thread4_waiting_time = MovingAverage(moving_average_points) moving_average_model_inference_time = MovingAverage(moving_average_points) moving_average_img_preprocessing_time = MovingAverage( moving_average_points) image_count = 0 a = 0 b = 0 while True: start_time = time.perf_counter() # receive decompressed image from Thread 3 preprocessOutput = receiver.receive2() thread3_waiting_time = time.perf_counter() decompressedImage = preprocessOutput[0] name = preprocessOutput[1] # image preprocessing model.load_image_cv2_backend(decompressedImage) image_preprocessing_time = time.perf_counter() # inference class_ids, scores, boxes = model.inference() model_inferenced_time = time.perf_counter() # Transfer inference results to thread 4, wait if it is not finished with last image yet receiver.putData((decompressedImage, name, image_count, model.labels, class_ids, boxes)) thread4_waiting_time = time.perf_counter() # statistics moving_average_fps.add(1 / (thread4_waiting_time - start_time)) moving_average_thread3_waiting_time.add(thread3_waiting_time - start_time) moving_average_img_preprocessing_time.add( image_preprocessing_time - thread3_waiting_time) moving_average_model_inference_time.add( model_inferenced_time - image_preprocessing_time) moving_average_thread4_waiting_time.add(thread4_waiting_time - model_inferenced_time) total_time = moving_average_thread3_waiting_time.get_moving_average() \ + moving_average_model_inference_time.get_moving_average() \ + moving_average_img_preprocessing_time.get_moving_average() \ + moving_average_thread4_waiting_time.get_moving_average() # count seconds it takes to process 400 images after a 800 frames warm-up time if(image_count == 800): a = time.perf_counter() if(image_count == 1200): a = time.perf_counter() - a print(a) # terminal prints if image_count % 20 == 0: print(" total: %4.1fms (%4.1ffps) " " Waiting for Thread 3 (receiving, decoding, verifying) %4.1f (%4.1f%%) " " preprocessing %4.1f (%4.1f%%) " " model inference %4.1f (%4.1f%%) " " Waiting for Thread 4 (postprocessing, signing, replying, displaying) %4.1f (%4.1f%%) " % ( 1000/moving_average_fps.get_moving_average(), moving_average_fps.get_moving_average(), moving_average_thread3_waiting_time.get_moving_average()*1000, moving_average_thread3_waiting_time.get_moving_average() / total_time * 100, moving_average_img_preprocessing_time.get_moving_average()*1000, moving_average_img_preprocessing_time.get_moving_average() / total_time * 100, moving_average_model_inference_time.get_moving_average()*1000, moving_average_model_inference_time.get_moving_average() / total_time * 100, moving_average_thread4_waiting_time.get_moving_average()*1000, moving_average_thread4_waiting_time.get_moving_average() / total_time * 100), end='\r') # counter image_count += 1
def main(): pk = SigningKey(Parameters.private_key_outsourcer) vk = VerifyKey(OutsourceContract.public_key_contractor) vk_verifier = VerifyKey(VerifierContract.public_key_verifier) # video info width = Parameters.input_size height = Parameters.input_size quality = Parameters.quality receiver_ip = Parameters.receiver_ip receiver_port = Parameters.receiver_port receiver_port_verifier = Parameters.receiver_port_verfier sending_port_verifier = Parameters.sending_port_verifier verifier_ip = Parameters.target_ip_verifier sending_port = Parameters.sending_port # statistics info moving_average_points = Parameters.moving_average_points merkle_tree_interval = OutsourceContract.merkle_tree_interval maxmium_number_of_frames_ahead = Parameters.maxmium_number_of_frames_ahead minimum_response_rate = Parameters.minimum_response_rate warm_up_time = Parameters.warm_up_time sampling_interval = Parameters.sampling_interval maxmium_number_of_frames_ahead_verifier = Parameters.maxmium_number_of_frames_ahead_verifier maxmium_number_of_verifier_sample_missed_consecutively = Parameters.maxmium_number_of_verifier_sample_missed_consecutively minimum_response_rate_verifier = Parameters.minimum_response_rate_verifier framesync = Parameters.framesync image_counter = ImageCounter(maxmium_number_of_frames_ahead) image_counter_verifier = ImageCounter(maxmium_number_of_frames_ahead) r = receiverlogic.Receiver(image_counter, receiver_ip, receiver_port) r_verifier = receiverlogic.Receiver( image_counter_verifier, receiver_ip, receiver_port_verifier) print('Waiting for contractor and verifier to connect ...') start_listening_time = time.perf_counter() while r.getConnectionEstablished() == False or r_verifier.getConnectionEstablished() == False: if time.perf_counter() - start_listening_time > 35: fault = 0 if r_verifier.getConnectionEstablished() == False: fault += 1 if r.getConnectionEstablished() == False: fault += 1 r.close() r_verifier.close() time.sleep(1) if fault == 0: sys.exit( 'Contract aborted: Contractor did not connect in time. Possible Consquences for Contractor: Blacklist, Bad Review') if fault == 1: sys.exit( 'Contract aborted: Verifier did not connect in time. Possible Consquences for Verifier: Blacklist, Bad Review') if fault == 2: sys.exit( 'Contract aborted: Contractor and Verifier did not connect in time. Possible Consquences for Contractor and Verifier: Blacklist, Bad Review') time.sleep(0.5) print('Connection with contractor and verfier established') a = 0 # saves input_counter, response, signautre of current sample outsourcerSample = (-1, '-1', '') # saves input_counter, response, signature of current sample verifierSample = (-1, '-1', '') outsourcer_sample_dict = {} verifier_sample_dict = {} output_counter = 0 output_counter_verifier = 0 lastSample = -1 # last sample index that was compared saved_compressed_sample_image = b'' #stores the compressed last sample image. If signed, unmatching responses are received any third party can verify with this saved image which response is not ocrrect contractHash = Helperfunctions.hashContract().encode('latin1') verifier_contract_hash = Helperfunctions.hashVerifierContract().encode('latin1') sampling_index = -1 verifier_sample_processed = 0 verifier_sample_missed = 0 #how many samples were missed in total verifier_sample_missed_consecutively = 0 #how many samples were missed conecutively if merkle_tree_interval > 0: mt = MerkleTools() interval_count = 0 time_to_challenge = False next_merkle_chall = 0 curr_merkle_chall = 0 current_root_hash = '' next_merkle_response = '' curr_merkle_response = '' sample_received_in_interval = -2 abort_at_next_merkle_root = False else: random_number = random.randint(0, sampling_interval - 1) # initialize sender image_sender = Sender(sending_port, pk, quality) image_sender.set_quality(quality) print('Sender Initialized') image_sender_verifier = Sender(sending_port_verifier, pk, quality) image_sender.set_quality(quality) print('Verifier Sender Initialized') # initialize RPi camera rpi_cam = RPiCamera(width, height) rpi_cam.start() print('Camera Started') time.sleep(1.0) # statistics moving_average_fps = MovingAverage(moving_average_points) moving_average_camera_time = MovingAverage(moving_average_points) moving_average_compress_time = MovingAverage(moving_average_points) moving_average_sign_time = MovingAverage(moving_average_points) moving_average_send_time = MovingAverage(moving_average_points) moving_average_response_time = MovingAverage(moving_average_points) moving_average_receive_time = MovingAverage(moving_average_points) moving_average_verify_time = MovingAverage(moving_average_points) moving_average_last_Sample = MovingAverage(moving_average_points) # streaming print('Start Streaming') while True: start_time = time.perf_counter() # capture image image = rpi_cam.get_image() if framesync: if image_counter.getInputCounter() > warm_up_time: milisecs_outsourcer = moving_average_fps.get_moving_average() frames_ahead_average = moving_average_response_time.get_moving_average() adjusted_milisecs = (frames_ahead_average-1)* (1/milisecs_outsourcer) - 0.005 #add safety puffer #adjusted_milisecs -= 0.01 if adjusted_milisecs > 0: time.sleep(adjusted_milisecs) camera_time = time.perf_counter() if merkle_tree_interval == 0: # if index is at random sample, send random sample to verifier if sampling_index == image_counter.getInputCounter(): #send image to both outsourcer and verifier compress_time, sign_time, send_time, compressed = image_sender.send_image_compressed_with_return( image_counter.getInputCounter(), image, contractHash, image_counter.getNumberofOutputsReceived()) compress_time2, sign_time2, send_time2, = image_sender_verifier.send_image_compressed_with_input( image_counter.getInputCounter(), image, verifier_contract_hash, image_counter_verifier.getNumberofOutputsReceived(), compressed) saved_compressed_sample_image = compressed compress_time += compress_time2 sign_time += sign_time2 send_time +=send_time2 else: compress_time, sign_time, send_time = image_sender.send_image_compressed( image_counter.getInputCounter(), image, contractHash, image_counter.getNumberofOutputsReceived()) else: # if sampling_index == image_counter.getInputCounter() or sampling_index + sampling_interval < image_counter.getInputCounter(): # only for high frequency to reduce receive time if sampling_index == image_counter.getInputCounter(): #send image to both outsourcer and verifier compress_time, sign_time, send_time, compressed = image_sender.send_image_compressed_Merkle_with_return( image_counter.getInputCounter(), image, contractHash, image_counter.getNumberofOutputsReceived(), curr_merkle_chall, interval_count, time_to_challenge) compress_time2, sign_time2, send_time2 = image_sender_verifier.send_image_compressed_with_input( image_counter.getInputCounter(), image, verifier_contract_hash, image_counter_verifier.getNumberofOutputsReceived(), compressed) saved_compressed_sample_image = compressed compress_time += compress_time2 sign_time += sign_time2 send_time += send_time2 else: compress_time, sign_time, send_time = image_sender.send_image_compressed_Merkle( image_counter.getInputCounter(), image, contractHash, image_counter.getNumberofOutputsReceived(), curr_merkle_chall, interval_count, time_to_challenge) # verifying receive_time = time.perf_counter() responses = [] signatures_outsourcer = [] output = r.getAll() if merkle_tree_interval == 0: for o in output: #print('Outsourcer Response:', o) #outputPrint if o[:5] == 'abort': image_sender_verifier.send_abort(image) r.close() r_verifier.close() time.sleep(1) sys.exit('Contract aborted by contractor according to custom.') try: sig = o.split(';--')[1].encode('latin1') msg = o.split(';--')[0].encode('latin1') except: r.close() r_verifier.close() time.sleep(1) sys.exit( 'Contract aborted: Contractor response is ill formated. Possible Consquences for Contractor: Blacklist, Bad Review') try: vk.verify(msg + contractHash, sig) except: r.close() r_verifier.close() time.sleep(1) sys.exit( 'Contract aborted: Contractor signature does not match response. Possible Consquences for Contractor: Blacklist, Bad Review') responses.append(msg) signatures_outsourcer.append(sig) else: # Merkle tree verification is active if image_counter.getNumberofOutputsReceived() > (merkle_tree_interval) * (interval_count+2): r.close() r_verifier.close() time.sleep(1) sys.exit('Contract aborted: No root hash received for current interval in time. Possible Consquences for Contractor: Blacklist, Bad Review, Refuse of Payment for images from current interval') for o in output: #print('Outsourcer Response:', o) #outputPrint if o[:5] == 'abort': image_sender_verifier.send_abort(image) r.close() r_verifier.close() time.sleep(1) sys.exit('Contract aborted by contractor according to custom') root_hash_received = False msg = o.split(';--')[0].encode('latin1') # get output # section to check for proofs if time_to_challenge == True: # If it's true then it's time to receive a challenge response proof_received = False # check if message structure indicates that it contains a proof if len(o.split(';--')) > 3: challenge_response = [] try: signature = o.split(';--')[1].encode('latin1') signatures_outsourcer.append(signature) challenge_response = o.split(';--')[2:] leaf_node = challenge_response[-2] root_node = challenge_response[-1] proof_string = challenge_response[0:-2] proofList = [] for strings in proof_string: strings = strings.replace("'", "\"") proofList.append(json.loads(strings)) proof_received = True except: pass if proof_received: # if message contains a proof # check if root node sent earlier matches current one mt = MerkleTools() if sample_received_in_interval == interval_count -1: #skip this part of the challenge if no sample was compared in last interval count mt.add_leaf(curr_merkle_response.decode('latin1'), True) #get last response into same format as leaf_node #if leaf_node != mt.get_leaf(0): # print('Merkle tree leaf node does not match earlier sent response') #else: #print('Success') if current_root_hash == root_node.encode('latin1'): #check if signed root hash received earlier equals sent root hash try: # print(str(challenge_response).encode('latin1') + bytes(interval_count-1) + contractHash) # print(challenge_response) vk.verify(str(challenge_response).encode( 'latin1') + bytes(interval_count-1) + contractHash, signature) except: r.close() r_verifier.close() time.sleep(1) sys.exit( 'Contract aborted: Contractor signature of challenge response is incorrect. Possible Consquences for Contractor: Blacklist, Bad Review, Refuse of Payment for images from current interval') try: merkle_proof_of_membership = mt.validate_proof( proofList, leaf_node, root_node) # verify proof of memebrship # print('Proof of membership for random sample in interval' + str(interval_count -1) + ' was successful') except: merkle_proof_of_membership = False if merkle_proof_of_membership == True: time_to_challenge = False # all challenges passed else: r.close() r_verifier.close() time.sleep(1) sys.exit( 'Contract aborted: Leaf is not contained in Merkle Tree. Possible Consquences for Contractor: Blacklist, Bad Review, Refuse of Payment for images from current interval, fine') else: r.close() r_verifier.close() time.sleep(1) sys.exit('Contract aborted: Contractor signature of root hash received at challenge response does not match previous signed root hash . Possible Consquences for Contractor: Blacklist, Bad Review, Refuse of Payment for images from current interval, fine') # section to check for merkle roots # if it's true then it's time to receive a new Merkle root if image_counter.getNumberofOutputsReceived() >= (merkle_tree_interval) * (interval_count+1): if time_to_challenge == True: r.close() r_verifier.close() time.sleep(1) sys.exit('Contract aborted: Merkle Tree proof of membership challenge response was not received in time. Possible Consquences for Contractor: Blacklist, Bad Review, Refuse of Payment for images from current interval') try: # check if merkle root received root_hash = o.split(';--')[1].encode('latin1') sig = o.split(';--')[2].encode('latin1') if len(o.split(';--')) == 3: root_hash_received = True except: pass if root_hash_received == True: # if root hash received, verify signature root_hash_received = False time_to_challenge = True random_number = random.randint( 0, merkle_tree_interval - 1) try: match = vk.verify( root_hash + bytes(interval_count) + contractHash, sig) interval_count += 1 curr_merkle_chall = next_merkle_chall #to send last checked sample as challenge current_root_hash = root_hash curr_merkle_response = next_merkle_response #to remmeber last check response # print(interval_count, image_counter.getNumberofOutputsReceived()) except: r.close() r_verifier.close() time.sleep(1) sys.exit( 'Contract aborted: Contractor signature of root hash is ill formated. Possible Consquences for Contractor: Blacklist, Bad Review, Refuse of Payment for images from current interval') if abort_at_next_merkle_root: r.close() r_verifier.close() time.sleep(1) sys.exit( 'Contract aborted: Merkle Tree is built on responses unequal to responses of the verifier. Possible Consquences for Contractor: Fine, Blacklist, Bad Review') signatures_outsourcer.append(sig) responses.append(msg) if len(signatures_outsourcer) == 0: signatures_outsourcer.append('Next merkle root serves as a proof') responses_verifier = [] signatures_verifier = [] output_verifier = r_verifier.getAll() for o in output_verifier: #print('Verfifier Response:', o) #outputPrint if o[:5] == 'abort': image_sender.send_abort(image) r.close() r_verifier.close() time.sleep(1) sys.exit('Contract aborted by verfier according to custom') try: sig = o.split(';--')[1].encode('latin1') msg = o.split(';--')[0].encode('latin1') except: r.close() r_verifier.close() time.sleep(1) sys.exit( 'Contract aborted: Verifier response is ill formated. Possible Consquences for Verifier: Blacklist, Bad Review') try: vk_verifier.verify(msg + verifier_contract_hash, sig) except: r.close() r_verifier.close() time.sleep(1) sys.exit( 'Contract aborted: Verifier signature does not match response. Possible Consquences for Verifier: Blacklist, Bad Review') responses_verifier.append(msg) signatures_verifier.append(sig) # make sure outspurcer has even computed a new output before assigning a new sample, otherwise it's possible to never compare samples if image_counter.getOutputCounter() == sampling_index and len(responses) > 0: if int(responses[-1].decode('latin1')[5:].split(':', 1)[0]) == sampling_index: #in rare cases of threading timing output counter and responses can be desynced outsourcer_sample_dict[sampling_index] = ( sampling_index, responses[-1], signatures_outsourcer[-1]) #merkle_challenge_index = image_counter.getOutputCounter() % merkle_tree_interval if image_counter_verifier.getOutputCounter() == sampling_index and len(responses_verifier) > 0: # make sure verfier has even computed a new output beofre assigning a new sample, otherwise it's possible to never compare samples if int(responses_verifier[-1].decode('latin1')[5:].split(':', 1)[0]) == sampling_index: #in rare cases of threading timing output counter and responses can be desynced verifier_sample_dict[sampling_index] = ( sampling_index, responses_verifier[-1], signatures_verifier[-1]) sample_checked = False if sampling_index in verifier_sample_dict and sampling_index in outsourcer_sample_dict: if outsourcer_sample_dict[sampling_index][0] == verifier_sample_dict[sampling_index][0]: sample_checked = True # compare resp if lastSample != outsourcer_sample_dict[sampling_index][0]: lastSample = outsourcer_sample_dict[sampling_index][0] if outsourcer_sample_dict[sampling_index][1] == verifier_sample_dict[sampling_index][1]: #print('The following sample was found to be equal:', outsourcer_sample_dict[sampling_index][1]) #outputPrint if merkle_tree_interval > 0: next_merkle_chall = outsourcer_sample_dict[sampling_index][0] next_merkle_response = outsourcer_sample_dict[sampling_index][1] sample_received_in_interval = interval_count #used to check if a sample was received in current merkle interval outsourcer_sample_dict.clear() verifier_sample_dict.clear() #outsourcer_sample_dict = {} not needed since keys dont repeat #verifier_sample_dict = {} else: #sample was found to be not equal if merkle_tree_interval == 0: r.close() r_verifier.close() time.sleep(1) sys.exit('Contract aborted. The following outputs are not equal: Outsourcer: ' +str(outsourcer_sample_dict[sampling_index][1]) + ' , Verifier: ' + str(verifier_sample_dict[sampling_index][1]) + ' Possible consequences for cheating party: Fine, Blacklist, Bad Review ' ) else: print( "The following outputs are not equal:", outsourcer_sample_dict[sampling_index][1], verifier_sample_dict[sampling_index][1]) #if no merkle tree -> exit, if merkle tree wait for next chall abort_at_next_merkle_root = True if image_counter.getNumberofOutputsReceived() % sampling_interval == 0: # pick new random sample # only pick next sample if both parties have already processed last sample if image_counter_verifier.getOutputCounter() >= sampling_index and image_counter.getOutputCounter() >= sampling_index: # random_number = random.randint(0,sampling_interval -1 - maxmium_number_of_frames_ahead) random_number = random.randint(1, sampling_interval) sampling_index = random_number + image_counter.getInputCounter() verifier_sample_processed += 1 #save that verifier sucessfully processed sample verifier_sample_missed_consecutively = 0 #reset else: if image_counter.getInputCounter() - sampling_index > maxmium_number_of_frames_ahead_verifier: #means that verifier has lost sample or is too slow random_number = random.randint(1, sampling_interval) sampling_index = random_number + image_counter.getInputCounter() verifier_sample_missed+=1 #save that verifier missed sample because of frame loss or being too slow verifier_sample_missed_consecutively+=1 if image_counter.getInputCounter() > warm_up_time: if verifier_sample_missed_consecutively > maxmium_number_of_verifier_sample_missed_consecutively or verifier_sample_missed/verifier_sample_processed < minimum_response_rate_verifier : r.close() r_verifier.close() time.sleep(1) sys.exit( 'Contract aborted: Verifier has failed to process enough samples in time. Possible Consquences for Verifier: Bad Review, Blacklist') verify_time = time.perf_counter() if(OutsourceContract.criteria == 'Atleast 2 objects detected'): for st in responses: if len(st) > 1000: print(st) frames_behind = image_counter.getFramesAhead() if frames_behind > maxmium_number_of_frames_ahead: if image_counter.getInputCounter() > warm_up_time: # print(image_counter.getInputCounter(), image_counter.getFramesAhead()) r.close() r_verifier.close() time.sleep(1) sys.exit( 'Contract aborted: Contractor response delay rate is too high. Possible Consquences for Contractor: Bad Review, Blacklist') if(image_counter.getNumberofOutputsReceived() < image_counter.getInputCounter() * minimum_response_rate): if image_counter.getInputCounter() > warm_up_time: r.close() r_verifier.close() time.sleep(1) sys.exit( 'Contract aborted: Contractor response rate is too low. Possible Consquences for Contractor: Bad Review, Blacklist') if(image_counter.getNumberofOutputsReceived() == 800): a = time.perf_counter() if(image_counter.getNumberofOutputsReceived() == 1200): a = time.perf_counter() - a print('contractor', a) if(image_counter_verifier.getNumberofOutputsReceived() == 300): b = time.perf_counter() if(image_counter_verifier.getNumberofOutputsReceived() == 700): b = time.perf_counter() - b print('verifier', b) # statistics moving_average_camera_time.add(camera_time - start_time) moving_average_compress_time.add(compress_time) moving_average_sign_time.add(sign_time) moving_average_send_time.add(send_time) moving_average_verify_time.add(verify_time - receive_time) if(frames_behind != -1): moving_average_response_time.add(frames_behind) if sample_checked: moving_average_last_Sample.add( image_counter.getInputCounter() - lastSample) total_time = moving_average_camera_time.get_moving_average() \ + moving_average_compress_time.get_moving_average() \ + moving_average_sign_time.get_moving_average() \ + moving_average_send_time.get_moving_average() instant_fps = 1 / (time.perf_counter() - start_time) moving_average_fps.add(instant_fps) # terminal prints if image_counter.getInputCounter() % 20 == 0: print("total: %5.1fms (%5.1ffps) camera %4.1f (%4.1f%%) compressing %4.1f (%4.1f%%) signing %4.1f (%4.1f%%) sending %4.1f (%4.1f%%) frames ahead %4.1f ahead of sample %4.1f verify time %4.1f (%4.1f%%) " % ( 1000/moving_average_fps.get_moving_average(), moving_average_fps.get_moving_average(), moving_average_camera_time.get_moving_average()*1000, moving_average_camera_time.get_moving_average() / total_time * 100, moving_average_compress_time.get_moving_average()*1000, moving_average_compress_time.get_moving_average() / total_time * 100, moving_average_sign_time.get_moving_average()*1000, moving_average_sign_time.get_moving_average() / total_time * 100, moving_average_send_time.get_moving_average()*1000, moving_average_send_time.get_moving_average() / total_time * 100, moving_average_response_time.get_moving_average(), moving_average_last_Sample.get_moving_average(), moving_average_verify_time.get_moving_average()*1000, moving_average_verify_time.get_moving_average() / total_time * 100), end='\r') # counter image_counter.increaseInputCounter()
def main(): # get paramters and contract details # print(contractHash) #preprocess_queue = queue.LifoQueue() #inference_queue = queue.LifoQueue() preprocess_queue = mp.Queue() inference_queue = mp.Queue() # postprocess_queue = Queue() p1 = mp.Process(target=inference, args=(preprocess_queue, inference_queue)) p2 = mp.Process(target=preprocessing, args=(preprocess_queue, )) #p1 = Process(target=dummy) #p2 = Process(target=dummy) # p3 = Process(target=Show_Image_mp, args=(Processed_frames, show, Final_frames)) p1.start() p2.start() # p3.start() sk = SigningKey(Parameters.private_key_contractor) contractHash = Helperfunctions.hashContract().encode('latin1') dont_show = Parameters.dont_show merkle_tree_interval = OutsourceContract.merkle_tree_interval hostname = Parameters.ip_outsourcer # Use to receive from other computer port = Parameters.port_outsourcer sendingPort = Parameters.sendingPort #import tensorflow as tf # time.sleep(1.0) # configure responder responder = re.Responder(hostname, sendingPort) # statistics info moving_average_points = 50 # statistics moving_average_fps = MovingAverage(moving_average_points) moving_average_receive_time = MovingAverage(moving_average_points) moving_average_decompress_time = MovingAverage(moving_average_points) # moving_average_model_load_image_time = MovingAverage(moving_average_points) moving_average_img_preprocessing_time = MovingAverage( moving_average_points) moving_average_model_inference_time = MovingAverage(moving_average_points) moving_average_img_postprocessing_time = MovingAverage( moving_average_points) moving_average_reply_time = MovingAverage(moving_average_points) moving_average_image_show_time = MovingAverage(moving_average_points) moving_average_verify_image_sig_time = MovingAverage(moving_average_points) moving_average_response_signing_time = MovingAverage(moving_average_points) image_count = 0 a = 0 b = 0 if merkle_tree_interval > 0: mt = MerkleTools() mtOld = MerkleTools() interval_count = 0 mtOld_leaf_indices = {} mt_leaf_indices = {} # rendundancy_counter = 0 # rendundancy_counter2 = 0 current_challenge = 1 merkle_root = '' # stringsend = '' last_challenge = 0 image_showed_time = time.perf_counter() # init while True: # start_time = time.perf_counter() if not inference_queue.empty(): queueData = inference_queue.get() while not inference_queue.empty(): queueData = inference_queue.get() start_time = image_showed_time # # boxes, scores, classes, valid_detections, name, original_image #queueData=inference_queue.get() #inference_queue.task_done() # boxes=queueData[0] # scores=queueData[1] # classes=queueData[2] # valid_detections=queueData[3] # name = queueData[4] # original_image = queueData[5] boxtext = queueData[0] image = queueData[1] name = queueData[2] if merkle_tree_interval > 0: outsorucer_signature = name[:-5] outsourcer_image_count = name[-5] outsourcer_number_of_outputs_received = name[-4] outsourcer_random_number = name[-3] outsourcer_interval_count = name[-2] outsourcer_time_to_challenge = bool(name[-1]) received_time = time.perf_counter() image_preprocessing_time = time.perf_counter() decompressed_time = time.perf_counter() verify_time = time.perf_counter() # inference # region # endregion model_inferenced_time = time.perf_counter() # image postprocessing # region h = time.perf_counter() # endregion if merkle_tree_interval == 0: boxtext = 'Image' + str(name[-2]) + ':;' + boxtext else: boxtext = 'Image' + str( outsourcer_image_count) + ':;' + boxtext image_postprocessing_time = time.perf_counter() # sign message ->need to add image_count/interval_count (for merkle tree sig), contract hash to output and verificaton if merkle_tree_interval == 0: # sig = sk.sign_deterministic(boxtext.encode('latin1')) sig = sk.sign(boxtext.encode('latin1') + contractHash).signature # sig = list(sig) sig = sig.decode('latin1') # send reply responder.respond(boxtext + ';--' + sig) else: # print(image_count) # add leafs dynamiclly to merkle tree mt.add_leaf(boxtext, True) # remember indices for challenge mt_leaf_indices[ outsourcer_image_count] = image_count % merkle_tree_interval # print(image_count % merkle_tree_interval) response = boxtext # time to send a new merkle root # e.g. if inervall = 128 then all respones from 0-127 are added to the merkle tree if image_count > 1 and (image_count + 1) % merkle_tree_interval == 0: # print(image_count) a = time.perf_counter() # rendundancy_counter = 2 mt.make_tree() merkle_root = mt.get_merkle_root() sig = sk.sign( merkle_root.encode('latin1') + bytes(interval_count) + contractHash).signature # sign merkle root # resond with merkle root response += ';--' + str(merkle_root) + \ ';--' + sig.decode('latin1') interval_count += 1 mtOld = mt # save old merkle tree for challenge # mtOld_leaf_indices.clear() # clear old indices mtOld_leaf_indices.clear() mtOld_leaf_indices = mt_leaf_indices.copy( ) # save old indices for challenge # print(mtOld_leaf_indices) mt_leaf_indices.clear() # clear for new indices # mt_leaf_indices = {} mt = MerkleTools( ) # construct new merkle tree for next interval te = time.perf_counter() - a # print('1', te, image_count) else: # if this is true then the outsourcer has not received the merkle root yet -> send again if interval_count > outsourcer_image_count: sig = sk.sign( merkle_root.encode('latin1') + bytes(interval_count) + contractHash).signature # sign merkle root response += ';--' + str(merkle_root) + \ ';--' + sig.decode('latin1') # print('2', image_count) else: # in this case outsourcer has confirmed to have recieved the merkle root # in this case outsourcer has sent a challenge to meet with the old merkle tree, give outsourcer 3 frames time to confirm challenge received before sending again if outsourcer_time_to_challenge and image_count - last_challenge > 3: last_challenge = image_count if outsourcer_random_number in mtOld_leaf_indices: # if challenge can be found, send proof back outsourcer_random_number_index = mtOld_leaf_indices[ outsourcer_random_number] else: # if challenge index cannot be found return leaf 0 outsourcer_random_number_index = 0 # print('proof index not found') proofs = mtOld.get_proof( outsourcer_random_number_index) stringsend = '' for proof in proofs: stringsend += ';--' # indicate start of proof stringsend += proof.__str__() # send proof stringsend += ';--' # send leaf stringsend += mtOld.get_leaf( outsourcer_random_number_index) stringsend += ';--' stringsend += mtOld.get_merkle_root() # send root stringarr = [] stringarr = stringsend.split(';--') leaf_node = stringarr[-2] root_node = stringarr[-1] proof_string = stringarr[0:-2] # sign proof and contract details sig = sk.sign( str(stringarr[1:]).encode('latin1') + bytes(interval_count - 1) + contractHash).signature # print(str(stringarr).encode('latin1') + bytes(interval_count-1) + contractHash) # print(stringarr) # attach signature response += ';--' + sig.decode('latin1') response += stringsend # attach challenge response to response # print('3', te, image_count) responder.respond(response) response_signing_time = time.perf_counter() # print(response_signing_time- image_postprocessing_time) replied_time = time.perf_counter() # display image if not dont_show: # image.show() image = cv2.cvtColor(np.array(image), cv2.COLOR_BGR2RGB) cv2.imshow('raspberrypi', image) if cv2.waitKey(1) == ord('q'): responder.respond('abort12345:6') sys.exit( 'Contract aborted: Contractor ended contract according to custom' ) image_showed_time = time.perf_counter() # statistics moving_average_fps.add(1 / (image_showed_time - start_time)) moving_average_receive_time.add(received_time - start_time) moving_average_decompress_time.add(decompressed_time - received_time) moving_average_verify_image_sig_time.add(verify_time - decompressed_time) moving_average_img_preprocessing_time.add( image_preprocessing_time - verify_time) moving_average_model_inference_time.add(model_inferenced_time - image_preprocessing_time) moving_average_img_postprocessing_time.add( image_postprocessing_time - model_inferenced_time) moving_average_response_signing_time.add( response_signing_time - image_postprocessing_time) # adjust for merkle root moving_average_reply_time.add(replied_time - response_signing_time) moving_average_image_show_time.add(image_showed_time - replied_time) total_time=moving_average_receive_time.get_moving_average() \ + moving_average_decompress_time.get_moving_average() \ + moving_average_verify_image_sig_time.get_moving_average() \ + moving_average_img_preprocessing_time.get_moving_average() \ + moving_average_model_inference_time.get_moving_average() \ + moving_average_img_postprocessing_time.get_moving_average() \ + moving_average_response_signing_time.get_moving_average() \ + moving_average_reply_time.get_moving_average() \ + moving_average_image_show_time.get_moving_average() if (image_count == 800): a = time.perf_counter() if (image_count == 1200): a = time.perf_counter() - a print(a) # terminal prints if image_count % 20 == 0: print( " total: %4.1fms (%4.1ffps) " " receiving %4.1f (%4.1f%%) " " decoding %4.1f (%4.1f%%) " " verifying %4.1f (%4.1f%%) " " preprocessing %4.1f (%4.1f%%) " " model inference %4.1f (%4.1f%%) " " postprocessing %4.1f (%4.1f%%) " " signing %4.1f (%4.1f%%) " " replying %4.1f (%4.1f%%) " " display %4.1f (%4.1f%%) " % ( 1000 / moving_average_fps.get_moving_average(), moving_average_fps.get_moving_average(), moving_average_receive_time.get_moving_average() * 1000, moving_average_receive_time.get_moving_average() / total_time * 100, moving_average_decompress_time.get_moving_average() * 1000, moving_average_decompress_time.get_moving_average() / total_time * 100, moving_average_verify_image_sig_time. get_moving_average() * 1000, moving_average_verify_image_sig_time. get_moving_average() / total_time * 100, moving_average_img_preprocessing_time. get_moving_average() * 1000, moving_average_img_preprocessing_time. get_moving_average() / total_time * 100, moving_average_model_inference_time.get_moving_average( ) * 1000, moving_average_model_inference_time.get_moving_average( ) / total_time * 100, moving_average_img_postprocessing_time. get_moving_average() * 1000, moving_average_img_postprocessing_time. get_moving_average() / total_time * 100, moving_average_response_signing_time. get_moving_average() * 1000, moving_average_response_signing_time. get_moving_average() / total_time * 100, moving_average_reply_time.get_moving_average() * 1000, moving_average_reply_time.get_moving_average() / total_time * 100, moving_average_image_show_time.get_moving_average() * 1000, moving_average_image_show_time.get_moving_average() / total_time * 100, ), end='\r') # counter image_count += 1
def main(): """ main function interface :return: nothing """ # params width = 640 height = 368 moving_average_points = 50 # initialize RPi camera rpi_cam = RPiCamera(width, height) rpi_cam.start() print('RPi Bird Feeder -> RPi Camera Ready') time.sleep(1.0) # initialize object detection model model = Model() model.load_model( 'models_edgetpu/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite' ) model.load_labels('labels_edgetpu/coco_labels.txt') model.set_confidence_level(0.6) print('RPi Bird Feeder -> Object Detection Model Initialized') time.sleep(1.0) # initialize render render = Render() print('RPi Bird Feeder -> Render Ready') time.sleep(0.5) # statistics moving_average_fps = MovingAverage(moving_average_points) moving_average_receive_time = MovingAverage(moving_average_points) moving_average_model_load_image_time = MovingAverage(moving_average_points) moving_average_model_inference_time = MovingAverage(moving_average_points) moving_average_image_show_time = MovingAverage(moving_average_points) # streaming image_count = 0 print('RPi Bird Feeder -> Receiver Streaming') time.sleep(0.5) bird_pic_count = 0 p = subprocess.Popen(['ls', '-l'], shell=True) while True: start_time = time.monotonic() # get image image = rpi_cam.get_image() received_time = time.monotonic() # load image into model (cv2 or pil backend) model.load_image_cv2_backend(image) model_loaded_image_time = time.monotonic() # model inference class_ids, scores, boxes = model.inference() model_inferenced_time = time.monotonic() # render image render.set_image(image) render.render_detection(model.labels, class_ids, boxes, image.shape[1], image.shape[0], (45, 227, 227), 3) for i in range(len(class_ids)): if int(class_ids[i]) == 0 and p.poll( ) is not None: # and (after_render_time - bird_time) > 5: cmd = 'python3 /home/pi/Documents/bird-feeder/DSLR.py --DSLRPICDIR /home/pi/Documents/bird-feeder/DSLR_pics --FILENAME ' + str( bird_pic_count) + '.jpg' print(cmd) p = subprocess.Popen(cmd, shell=True) bird_pic_count += 1 render.render_fps(moving_average_fps.get_moving_average()) # show image cv2.imshow('Bird Feeder', image) image_showed_time = time.monotonic() if cv2.waitKey(1) == ord('q'): break # statistics instant_fps = 1 / (image_showed_time - start_time) moving_average_fps.add(instant_fps) receive_time = received_time - start_time moving_average_receive_time.add(receive_time) model_load_image_time = model_loaded_image_time - received_time moving_average_model_load_image_time.add(model_load_image_time) model_inference_time = model_inferenced_time - model_loaded_image_time moving_average_model_inference_time.add(model_inference_time) image_show_time = image_showed_time - model_inferenced_time moving_average_image_show_time.add(image_show_time) total_time = moving_average_receive_time.get_moving_average() \ + moving_average_model_load_image_time.get_moving_average() \ + moving_average_model_inference_time.get_moving_average() \ + moving_average_image_show_time.get_moving_average() # terminal prints if image_count % 50 == 0: print(" receiver's fps: %4.1f" " receiver's time components: " "receiving %4.1f%% " "model load image %4.1f%% " "model inference %4.1f%% " "image show %4.1f%%" % (moving_average_fps.get_moving_average(), moving_average_receive_time.get_moving_average() / total_time * 100, moving_average_model_load_image_time.get_moving_average() / total_time * 100, moving_average_model_inference_time.get_moving_average() / total_time * 100, moving_average_image_show_time.get_moving_average() / total_time * 100)) #, end='\r') # counter image_count += 1 if image_count == 10000000: image_count = 0
def main(_argv): vk = b'Y\xf8D\xe6o\xf9MZZh\x9e\xcb\xe0b\xb7h\xdb\\\xd7\x80\xd2S\xf5\x81\x92\xe8\x109r*U\xebT\x95\x0c\xf2\xf4(\x13%\x83\xb8\xfa;\xf04\xd3\xfb' vk = VerifyingKey.from_string(vk) config = ConfigProto() config.gpu_options.allow_growth = True session = InteractiveSession(config=config) #STRIDES, ANCHORS, NUM_CLASS, XYSCALE = utils.load_config(FLAGS) input_size = 416 iou = 0.45 score = 0.5 model = 'yolov4' framework = '' tiny = True weights = './checkpoints/yolov4-tiny-416' count = False dont_show = False info = True crop = False #images = FLAGS.images #images = [] #images.append("C:/Users/Kitzbi/Documents/tensorflow yolo/yolov4-custom-functions/data/images/dog.jpg") # load model if framework == 'tflite': interpreter = tf.lite.Interpreter(model_path=weights) else: saved_model_loaded = tf.saved_model.load(weights, tags=[tag_constants.SERVING]) # statistics info moving_average_points = 50 # initialize receiver image_hub = imagezmq.ImageHub() print('RPi Stream -> Receiver Initialized') time.sleep(1.0) # initialize render render = Render() print('RPi Stream -> Render Ready') # statistics moving_average_fps = MovingAverage(moving_average_points) moving_average_receive_time = MovingAverage(moving_average_points) moving_average_decompress_time = MovingAverage(moving_average_points) moving_average_model_load_image_time = MovingAverage(moving_average_points) moving_average_model_inference_time = MovingAverage(moving_average_points) moving_average_reply_time = MovingAverage(moving_average_points) moving_average_image_show_time = MovingAverage(moving_average_points) image_count = 0 # read in all class names from config class_names = utils.read_class_names(cfg.YOLO.CLASSES) # streaming print('RPi Stream -> Receiver Streaming') while True: start_time = time.monotonic() # receive image name, compressed = image_hub.recv_jpg() received_time = time.monotonic() # decompress image decompressedImage = cv2.imdecode(np.frombuffer(compressed, dtype='uint8'), -1) decompressed_time = time.monotonic() #frame = cv2.cvtColor(decompressedImage, cv2.COLOR_BGR2RGB) #image = Image.fromarray(frame) original_image = cv2.cvtColor(decompressedImage, cv2.COLOR_BGR2RGB) image_data = cv2.resize(original_image, (input_size, input_size)) image_data = image_data / 255. # get image name by using split method #image_name = image_path.split('/')[-1] #image_name = image_name.split('.')[0] images_data = [] for i in range(1): images_data.append(image_data) images_data = np.asarray(images_data).astype(np.float32) with concurrent.futures.ThreadPoolExecutor() as executor: f1 = executor.submit(verify, vk, name, compressed) f2 = executor.submit(inference, framework, images_data, model, tiny, saved_model_loaded, iou, score) success = f1.result() boxes, scores, classes, valid_detections = f2.result() # format bounding boxes from normalized ymin, xmin, ymax, xmax ---> xmin, ymin, xmax, ymax original_h, original_w, _ = original_image.shape bboxes = utils.format_boxes(boxes.numpy()[0], original_h, original_w) # hold all detection data in one variable pred_bbox = [bboxes, scores.numpy()[0], classes.numpy()[0], valid_detections.numpy()[0]] # by default allow all classes in .names file allowed_classes = list(class_names.values()) # custom allowed classes (uncomment line below to allow detections for only people) #allowed_classes = ['person'] # if crop flag is enabled, crop each detection and save it as new image if crop: crop_path = os.path.join(os.getcwd(), 'detections', 'crop', image_name) try: os.mkdir(crop_path) except FileExistsError: pass crop_objects(cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB), pred_bbox, crop_path, allowed_classes) if count: # count objects found counted_classes = count_objects(pred_bbox, by_class = False, allowed_classes=allowed_classes) # loop through dict and print for key, value in counted_classes.items(): print("Number of {}s: {}".format(key, value)) boxtext, image = utils.draw_bbox(original_image, pred_bbox, image_count, info, counted_classes, allowed_classes=allowed_classes) else: boxtext, image = utils.draw_bbox(original_image, pred_bbox, image_count, info, allowed_classes=allowed_classes) image = Image.fromarray(image.astype(np.uint8)) #print(boxtext) # send reply if(info): image_hub.send_reply(boxtext) else: image_hub.send_reply('Ok') #stra = str(pred_bbox).encode() #image_hub.send_reply(stra) #print(stra) #image_hub.send_reply(str(pred_bbox).encode()) #image_hub.send_reply(bytearray(pred_bbox)) if not dont_show: #image.show() image = cv2.cvtColor(np.array(image), cv2.COLOR_BGR2RGB) cv2.imshow('raspberrypi', image) image_showed_time = time.monotonic() if cv2.waitKey(1) == ord('q'): break # statistics instant_fps = 1 / (image_showed_time - start_time) moving_average_fps.add(instant_fps) receive_time = received_time - start_time moving_average_receive_time.add(receive_time) decompress_time = decompressed_time - received_time moving_average_decompress_time.add(decompress_time) #model_load_image_time = model_loaded_image_time - decompressed_time #moving_average_model_load_image_time.add(model_load_image_time) #model_inference_time = model_inferenced_time - model_loaded_image_time #moving_average_model_inference_time.add(model_inference_time) #reply_time = replied_time - model_inferenced_time #moving_average_reply_time.add(reply_time) #image_show_time = image_showed_time - replied_time #moving_average_image_show_time.add(image_show_time) #total_time = moving_average_receive_time.get_moving_average() \ # + moving_average_decompress_time.get_moving_average() \ # + moving_average_model_load_image_time.get_moving_average() \ # + moving_average_model_inference_time.get_moving_average() \ # + moving_average_reply_time.get_moving_average() \ # + moving_average_image_show_time.get_moving_average() #terminal prints #if image_count % 10 == 0: #print(moving_average_fps) #print(decompress_time) #print(" receiver's fps: %4.1f" #" receiver's time components: " #"receiving %4.1f%% " #"decompressing %4.1f%% " #"model load image %4.1f%% " #"model inference %4.1f%% " #"replying %4.1f%% " #"image show %4.1f%%" #% (moving_average_fps.get_moving_average()), end='\r') #moving_average_fps.get_moving_average(), #moving_average_receive_time.get_moving_average() / total_time * 100, #moving_average_decompress_time.get_moving_average() / total_time * 100, #moving_average_model_load_image_time.get_moving_average() / total_time * 100, #moving_average_model_inference_time.get_moving_average() / total_time * 100, #moving_average_reply_time.get_moving_average() / total_time * 100, #moving_average_image_show_time.get_moving_average() / total_time * 100), end='\r') #artifically added # counter image_count += 1
def main(_argv): # get paramters and contract details vk = VerifyKey(OutsourceContract.public_key_outsourcer) sk = SigningKey(Parameters.private_key_contractor) model = OutsourceContract.model framework = Parameters.framework tiny = OutsourceContract.tiny weights = Parameters.weights count = Parameters.count dont_show = Parameters.dont_show info = Parameters.info crop = Parameters.crop input_size = Parameters.input_size iou = Parameters.iou score = Parameters.score merkle_tree_interval = OutsourceContract.merkle_tree_interval hostname = Parameters.ip_outsourcer # Use to receive from other computer port = Parameters.port_outsourcer sendingPort = Parameters.sendingPort minimum_receive_rate_from_contractor = Parameters.minimum_receive_rate_from_contractor contractHash = Helperfunctions.hashContract().encode('latin1') # print(contractHash) # configure video stream receiver receiver = vss.VideoStreamSubscriber(hostname, port) time.sleep(2) preProcesser = Preprocesser(receiver, vk, merkle_tree_interval, minimum_receive_rate_from_contractor) time.sleep(2) print('RPi Stream -> Receiver Initialized') # time.sleep(1.0) # configure gpu usage config = ConfigProto() config.gpu_options.allow_growth = True session = InteractiveSession(config=config) # load model if framework == 'tflite': interpreter = tf.lite.Interpreter(model_path=weights) else: saved_model_loaded = tf.saved_model.load( weights, tags=[tag_constants.SERVING]) # read in all class names from config class_names = utils.read_class_names(cfg.YOLO.CLASSES) # configure responder responder = re.Responder(hostname, sendingPort) # statistics info moving_average_points = 50 # statistics moving_average_fps = MovingAverage(moving_average_points) moving_average_receive_time = MovingAverage(moving_average_points) moving_average_decompress_time = MovingAverage(moving_average_points) #moving_average_model_load_image_time = MovingAverage(moving_average_points) moving_average_img_preprocessing_time = MovingAverage( moving_average_points) moving_average_model_inference_time = MovingAverage(moving_average_points) moving_average_img_postprocessing_time = MovingAverage( moving_average_points) moving_average_reply_time = MovingAverage(moving_average_points) moving_average_image_show_time = MovingAverage(moving_average_points) moving_average_verify_image_sig_time = MovingAverage(moving_average_points) moving_average_response_signing_time = MovingAverage(moving_average_points) image_count = 0 a = 0 b = 0 if merkle_tree_interval > 0: mt = MerkleTools() mtOld = MerkleTools() interval_count = 0 mtOld_leaf_indices = {} mt_leaf_indices = {} #rendundancy_counter = 0 #rendundancy_counter2 = 0 current_challenge = 1 merkle_root = '' #stringsend = '' last_challenge = 0 while True: start_time = time.perf_counter() data = preProcesser.receive() images_data = data[0] name = data[1] received_time = time.perf_counter() decompressed_time = time.perf_counter() verify_time = time.perf_counter() image_preprocessing_time = time.perf_counter() # receive image # region # name[:-2] image signature, name # inference # region if framework == 'tflite': interpreter.allocate_tensors() input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() interpreter.set_tensor(input_details[0]['index'], images_data) interpreter.invoke() pred = [interpreter.get_tensor( output_details[i]['index']) for i in range(len(output_details))] if model == 'yolov3' and tiny == True: boxes, pred_conf = filter_boxes( pred[1], pred[0], score_threshold=0.25, input_shape=tf.constant([input_size, input_size])) else: boxes, pred_conf = filter_boxes( pred[0], pred[1], score_threshold=0.25, input_shape=tf.constant([input_size, input_size])) else: infer = saved_model_loaded.signatures['serving_default'] batch_data = tf.constant(images_data) pred_bbox = infer(batch_data) for key, value in pred_bbox.items(): boxes = value[:, :, 0:4] pred_conf = value[:, :, 4:] # endregion model_inferenced_time = time.perf_counter() # image postprocessing # region h = time.perf_counter() boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression( boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)), scores=tf.reshape( pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])), max_output_size_per_class=50, max_total_size=50, iou_threshold=iou, score_threshold=score ) # 1.2ms # format bounding boxes from normalized ymin, xmin, ymax, xmax ---> xmin, ymin, xmax, ymax original_h, original_w, _ = original_image.shape bboxes = utils.format_boxes( boxes.numpy()[0], original_h, original_w) # 1ms # hold all detection data in one variable pred_bbox = [bboxes, scores.numpy()[0], classes.numpy()[0], valid_detections.numpy()[0]] # by default allow all classes in .names file allowed_classes = list(class_names.values()) # custom allowed classes (uncomment line below to allow detections for only people) #allowed_classes = ['person'] # if crop flag is enabled, crop each detection and save it as new image if crop: crop_path = os.path.join( os.getcwd(), 'detections', 'crop', image_name) try: os.mkdir(crop_path) except FileExistsError: pass crop_objects(cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB), pred_bbox, crop_path, allowed_classes) if count: # count objects found counted_classes = count_objects( pred_bbox, by_class=False, allowed_classes=allowed_classes) # loop through dict and print for key, value in counted_classes.items(): print("Number of {}s: {}".format(key, value)) boxtext, image = utils.draw_bbox( original_image, pred_bbox, info, counted_classes, allowed_classes=allowed_classes) else: boxtext, image = utils.draw_bbox( original_image, pred_bbox, info, allowed_classes=allowed_classes) # 0.5ms image = Image.fromarray(image.astype(np.uint8)) # 0.3ms # endregion if merkle_tree_interval == 0: boxtext = 'Image' + str(name[-2]) + ':;' + boxtext else: boxtext = 'Image' + str(outsourcer_image_count) + ':;' + boxtext image_postprocessing_time = time.perf_counter() # sign message ->need to add image_count/interval_count (for merkle tree sig), contract hash to output and verificaton if merkle_tree_interval == 0: #sig = sk.sign_deterministic(boxtext.encode('latin1')) sig = sk.sign(boxtext.encode('latin1') + contractHash).signature #sig = list(sig) sig = sig.decode('latin1') # send reply responder.respond(boxtext + ';--' + sig) else: # print(image_count) mt.add_leaf(boxtext, True) #add leafs dynamiclly to merkle tree mt_leaf_indices[outsourcer_image_count] = image_count % merkle_tree_interval #remember indices for challenge #print(image_count % merkle_tree_interval) response = boxtext # time to send a new merkle root if image_count > 1 and (image_count+1) % merkle_tree_interval == 0: #e.g. if inervall = 128 then all respones from 0-127 are added to the merkle tree #print(image_count) a = time.perf_counter() #rendundancy_counter = 2 mt.make_tree() merkle_root = mt.get_merkle_root() sig = sk.sign(merkle_root.encode( 'latin1') + bytes(interval_count) + contractHash).signature # sign merkle root # resond with merkle root response += ';--' + str(merkle_root) + \ ';--' + sig.decode('latin1') interval_count += 1 mtOld = mt # save old merkle tree for challenge #mtOld_leaf_indices.clear() # clear old indices mtOld_leaf_indices.clear() mtOld_leaf_indices = mt_leaf_indices.copy() #save old indices for challenge #print(mtOld_leaf_indices) mt_leaf_indices.clear() #clear for new indices #mt_leaf_indices = {} mt = MerkleTools() # construct new merkle tree for next interval te = time.perf_counter()-a # print('1', te, image_count) else: if interval_count > outsourcer_image_count : #if this is true then the outsourcer has not received the merkle root yet -> send again sig = sk.sign(merkle_root.encode( 'latin1') + bytes(interval_count) + contractHash).signature # sign merkle root response += ';--' + str(merkle_root) + \ ';--' + sig.decode('latin1') # print('2', image_count) else: # in this case outsourcer has confirmed to have recieved the merkle root if outsourcer_time_to_challenge and image_count - last_challenge > 3: #in this case outsourcer has sent a challenge to meet with the old merkle tree, give outsourcer 3 frames time to confirm challenge received before sending again last_challenge = image_count if outsourcer_random_number in mtOld_leaf_indices: outsourcer_random_number_index = mtOld_leaf_indices[outsourcer_random_number] #if challenge can be found, send proof back else: outsourcer_random_number_index = 0 #if challenge index cannot be found return leaf 0 #print('proof index not found') proofs = mtOld.get_proof(outsourcer_random_number_index) stringsend = '' for proof in proofs: stringsend += ';--' # indicate start of proof stringsend += proof.__str__() # send proof stringsend += ';--' # send leaf stringsend += mtOld.get_leaf(outsourcer_random_number_index) stringsend += ';--' stringsend += mtOld.get_merkle_root() # send root stringarr = [] stringarr = stringsend.split(';--') leaf_node = stringarr[-2] root_node = stringarr[-1] proof_string = stringarr[0:-2] sig = sk.sign(str(stringarr[1:]).encode('latin1') + bytes(interval_count-1) + contractHash).signature # sign proof and contract details #print(str(stringarr).encode('latin1') + bytes(interval_count-1) + contractHash) #print(stringarr) # attach signature response += ';--' + sig.decode('latin1') response += stringsend # attach challenge response to response # print('3', te, image_count) responder.respond(response) response_signing_time = time.perf_counter() # print(response_signing_time- image_postprocessing_time) replied_time = time.perf_counter() # display image if not dont_show: # image.show() image = cv2.cvtColor(np.array(image), cv2.COLOR_BGR2RGB) cv2.imshow('raspberrypi', image) if cv2.waitKey(1) == ord('q'): responder.respond('abort12345:6') sys.exit( 'Contract aborted: Contractor ended contract according to custom') image_showed_time = time.perf_counter() # statistics moving_average_fps.add(1 / (image_showed_time - start_time)) moving_average_receive_time.add(received_time - start_time) moving_average_decompress_time.add(decompressed_time - received_time) moving_average_verify_image_sig_time.add( verify_time - decompressed_time) moving_average_img_preprocessing_time.add( image_preprocessing_time - verify_time) moving_average_model_inference_time.add( model_inferenced_time - image_preprocessing_time) moving_average_img_postprocessing_time.add( image_postprocessing_time - model_inferenced_time) moving_average_response_signing_time.add( response_signing_time - image_postprocessing_time) # adjust for merkle root moving_average_reply_time.add(replied_time - response_signing_time) moving_average_image_show_time.add(image_showed_time - replied_time) total_time = moving_average_receive_time.get_moving_average() \ + moving_average_decompress_time.get_moving_average() \ + moving_average_verify_image_sig_time.get_moving_average() \ + moving_average_img_preprocessing_time.get_moving_average() \ + moving_average_model_inference_time.get_moving_average() \ + moving_average_img_postprocessing_time.get_moving_average() \ + moving_average_response_signing_time.get_moving_average() \ + moving_average_reply_time.get_moving_average() \ + moving_average_image_show_time.get_moving_average() if(image_count == 800): a = time.perf_counter() if(image_count == 1200): a = time.perf_counter() - a print(a) # terminal prints if image_count % 20 == 0: print(" total: %4.1fms (%4.1ffps) " " receiving %4.1f (%4.1f%%) " " decoding %4.1f (%4.1f%%) " " verifying %4.1f (%4.1f%%) " " preprocessing %4.1f (%4.1f%%) " " model inference %4.1f (%4.1f%%) " " postprocessing %4.1f (%4.1f%%) " " signing %4.1f (%4.1f%%) " " replying %4.1f (%4.1f%%) " " display %4.1f (%4.1f%%) " % ( 1000/moving_average_fps.get_moving_average(), moving_average_fps.get_moving_average(), moving_average_receive_time.get_moving_average()*1000, moving_average_receive_time.get_moving_average() / total_time * 100, moving_average_decompress_time.get_moving_average()*1000, moving_average_decompress_time.get_moving_average() / total_time * 100, moving_average_verify_image_sig_time.get_moving_average()*1000, moving_average_verify_image_sig_time.get_moving_average() / total_time * 100, moving_average_img_preprocessing_time.get_moving_average()*1000, moving_average_img_preprocessing_time.get_moving_average() / total_time * 100, moving_average_model_inference_time.get_moving_average()*1000, moving_average_model_inference_time.get_moving_average() / total_time * 100, moving_average_img_postprocessing_time.get_moving_average()*1000, moving_average_img_postprocessing_time.get_moving_average() / total_time * 100, moving_average_response_signing_time.get_moving_average()*1000, moving_average_response_signing_time.get_moving_average() / total_time * 100, moving_average_reply_time.get_moving_average() * 1000, moving_average_reply_time.get_moving_average() / total_time * 100, moving_average_image_show_time.get_moving_average()*1000, moving_average_image_show_time.get_moving_average() / total_time * 100,), end='\r') # counter image_count += 1
def main(): """ main function interface :return: nothing """ # statistics info moving_average_points = 50 # initialize model model = Model() model.load_model('models_edgetpu/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite') model.load_labels('labels_edgetpu/coco_labels.txt') model.set_confidence_level(0.3) # initialize receiver image_hub = imagezmq.ImageHub() print('RPi Stream -> Receiver Initialized') time.sleep(1.0) # initialize render render = Render() print('RPi Stream -> Render Ready') # statistics moving_average_fps = MovingAverage(moving_average_points) moving_average_receive_time = MovingAverage(moving_average_points) moving_average_decompress_time = MovingAverage(moving_average_points) moving_average_model_load_image_time = MovingAverage(moving_average_points) moving_average_model_inference_time = MovingAverage(moving_average_points) moving_average_reply_time = MovingAverage(moving_average_points) moving_average_image_show_time = MovingAverage(moving_average_points) image_count = 0 # streaming print('RPi Stream -> Receiver Streaming') while True: start_time = time.monotonic() # receive image name, compressed = image_hub.recv_jpg() received_time = time.monotonic() # decompress image image = cv2.imdecode(np.frombuffer(compressed, dtype='uint8'), -1) decompressed_time = time.monotonic() # load image into model (cv2 or pil backend) model.load_image_cv2_backend(image) model_loaded_image_time = time.monotonic() # do model inference class_ids, scores, boxes = model.inference() model_inferenced_time = time.monotonic() # send reply image_hub.send_reply(b'OK') replied_time = time.monotonic() # render image render.set_image(image) render.render_detection(model.labels, class_ids, boxes, image.shape[1], image.shape[0], (45, 227, 227), 3) render.render_fps(moving_average_fps.get_moving_average()) # show image cv2.imshow(name, image) image_showed_time = time.monotonic() if cv2.waitKey(1) == ord('q'): break # statistics instant_fps = 1 / (image_showed_time - start_time) moving_average_fps.add(instant_fps) receive_time = received_time - start_time moving_average_receive_time.add(receive_time) decompress_time = decompressed_time - received_time moving_average_decompress_time.add(decompress_time) model_load_image_time = model_loaded_image_time - decompressed_time moving_average_model_load_image_time.add(model_load_image_time) model_inference_time = model_inferenced_time - model_loaded_image_time moving_average_model_inference_time.add(model_inference_time) reply_time = replied_time - model_inferenced_time moving_average_reply_time.add(reply_time) image_show_time = image_showed_time - replied_time moving_average_image_show_time.add(image_show_time) total_time = moving_average_receive_time.get_moving_average() \ + moving_average_decompress_time.get_moving_average() \ + moving_average_model_load_image_time.get_moving_average() \ + moving_average_model_inference_time.get_moving_average() \ + moving_average_reply_time.get_moving_average() \ + moving_average_image_show_time.get_moving_average() # terminal prints if image_count % 10 == 0: print(" receiver's fps: %4.1f" " receiver's time components: " "receiving %4.1f%% " "decompressing %4.1f%% " "model load image %4.1f%% " "model inference %4.1f%% " "replying %4.1f%% " "image show %4.1f%%" % (moving_average_fps.get_moving_average(), moving_average_receive_time.get_moving_average() / total_time * 100, moving_average_decompress_time.get_moving_average() / total_time * 100, moving_average_model_load_image_time.get_moving_average() / total_time * 100, moving_average_model_inference_time.get_moving_average() / total_time * 100, moving_average_reply_time.get_moving_average() / total_time * 100, moving_average_image_show_time.get_moving_average() / total_time * 100), end='\r') # counter image_count += 1 if image_count == 10000000: image_count = 0
def main(_argv): # get paramters and contract details if Parameters.is_contractor == True: # checks if this machine is outsourcer or verifier vk = VerifyKey(OutsourceContract.public_key_outsourcer) contractHash = Helperfunctions.hashContract().encode('latin1') model_to_use = OutsourceContract.model tiny = OutsourceContract.tiny merkle_tree_interval = OutsourceContract.merkle_tree_interval display_name = 'Contractor' else: vk = VerifyKey(VerifierContract.public_key_outsourcer) contractHash = Helperfunctions.hashVerifierContract().encode('latin1') model_to_use = VerifierContract.model tiny = VerifierContract.tiny merkle_tree_interval = 0 display_name = 'Verifier' sk = SigningKey(Parameters.private_key_self) framework = Parameters.framework weights = Parameters.weights count = Parameters.count dont_show = Parameters.dont_show info = Parameters.info crop = Parameters.crop input_size = Parameters.input_size iou = Parameters.iou score = Parameters.score hostname = Parameters.ip_outsourcer port = Parameters.port_outsourcer sendingPort = Parameters.sendingPort minimum_receive_rate_from_contractor = Parameters.minimum_receive_rate_from_contractor # configure video stream receiver receiver = vss.VideoStreamSubscriber(hostname, port) print('Receiver Initialized') # configure gpu usage config = ConfigProto() config.gpu_options.allow_growth = True session = InteractiveSession(config=config) # load model if framework == 'tflite': interpreter = tf.lite.Interpreter(model_path=weights) else: saved_model_loaded = tf.saved_model.load(weights, tags=[tag_constants.SERVING]) # read in all class names from config class_names = utils.read_class_names(cfg.YOLO.CLASSES) # configure responder responder = re.Responder(hostname, sendingPort) # configure and iniitialize statistic variables moving_average_points = 50 moving_average_fps = MovingAverage(moving_average_points) moving_average_receive_time = MovingAverage(moving_average_points) moving_average_decompress_time = MovingAverage(moving_average_points) moving_average_img_preprocessing_time = MovingAverage( moving_average_points) moving_average_model_inference_time = MovingAverage(moving_average_points) moving_average_img_postprocessing_time = MovingAverage( moving_average_points) moving_average_reply_time = MovingAverage(moving_average_points) moving_average_image_show_time = MovingAverage(moving_average_points) moving_average_verify_image_sig_time = MovingAverage(moving_average_points) moving_average_response_signing_time = MovingAverage(moving_average_points) image_count = 0 acknowledged_frames = 0 a = 0 b = 0 # configure Merkle tree related variables if merkle trees are to be used if merkle_tree_interval > 0: mt = MerkleTools() mtOld = MerkleTools() interval_count = 0 mtOld_leaf_indices = {} mt_leaf_indices = {} current_challenge = 1 merkle_root = '' last_challenge = 0 # start real time processing and verification while True: start_time = time.perf_counter() # receive image name, compressed = receiver.receive() if name == 'abort': sys.exit('Contract aborted by outsourcer according to custom') received_time = time.perf_counter() # decompress image decompressedImage = cv2.imdecode( np.frombuffer(compressed, dtype='uint8'), -1) decompressed_time = time.perf_counter() # verify image (verify if signature matches image, contract hash and image count, and number of outptuts received) if merkle_tree_interval == 0: try: vk.verify( bytes(compressed) + contractHash + bytes(name[-2]) + bytes(name[-1]), bytes(name[:-2])) except: sys.exit( 'Contract aborted: Outsourcer signature does not match input. Possible Consquences for Outsourcer: Blacklist, Bad Review' ) if name[-1] < (image_count - 2) * minimum_receive_rate_from_contractor or name[ -1] < acknowledged_frames: sys.exit( 'Contract aborted: Outsourcer did not acknowledge enough ouputs. Possible Consquences for Outsourcer: Blacklist, Bad Review' ) acknowledged_frames = name[-1] else: # verify if signature matches image, contract hash, and image count, and number of intervals, and random number try: vk.verify( bytes(compressed) + contractHash + bytes(name[-5]) + bytes(name[-4]) + bytes(name[-3]) + bytes(name[-2]) + bytes(name[-1]), bytes(name[:-5])) except: sys.exit( 'Contract aborted: Outsourcer signature does not match input. Possible Consquences for Outsourcer: Blacklist, Bad Review' ) if name[-4] < (image_count - 2) * minimum_receive_rate_from_contractor or name[ -4] < acknowledged_frames: sys.exit( 'Contract aborted: Outsourcer did not acknowledge enough ouputs. Possible Consquences for Outsourcer: Blacklist, Bad Review' ) acknowledged_frames = name[-4] outsorucer_signature = name[:-5] outsourcer_image_count = name[-5] outsourcer_number_of_outputs_received = name[-4] outsourcer_random_number = name[-3] outsourcer_interval_count = name[-2] outsourcer_time_to_challenge = bool(name[-1]) verify_time = time.perf_counter() # image preprocessing original_image = cv2.cvtColor(decompressedImage, cv2.COLOR_BGR2RGB) image_data = cv2.resize(original_image, (input_size, input_size)) # 0.4ms image_data = image_data / 255. # 2.53ms images_data = [] for i in range(1): images_data.append(image_data) images_data = np.asarray(images_data).astype(np.float32) # 3.15ms image_preprocessing_time = time.perf_counter() # inference if framework == 'tflite': interpreter.allocate_tensors() input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() interpreter.set_tensor(input_details[0]['index'], images_data) interpreter.invoke() pred = [ interpreter.get_tensor(output_details[i]['index']) for i in range(len(output_details)) ] if model_to_use == 'yolov3' and tiny == True: boxes, pred_conf = filter_boxes(pred[1], pred[0], score_threshold=0.25, input_shape=tf.constant( [input_size, input_size])) else: boxes, pred_conf = filter_boxes(pred[0], pred[1], score_threshold=0.25, input_shape=tf.constant( [input_size, input_size])) else: infer = saved_model_loaded.signatures['serving_default'] batch_data = tf.constant(images_data) pred_bbox = infer(batch_data) for key, value in pred_bbox.items(): boxes = value[:, :, 0:4] pred_conf = value[:, :, 4:] model_inferenced_time = time.perf_counter() # image postprocessing # region h = time.perf_counter() boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression( boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)), scores=tf.reshape( pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])), max_output_size_per_class=50, max_total_size=50, iou_threshold=iou, score_threshold=score) # 1.2ms # format bounding boxes from normalized ymin, xmin, ymax, xmax ---> xmin, ymin, xmax, ymax original_h, original_w, _ = original_image.shape bboxes = utils.format_boxes(boxes.numpy()[0], original_h, original_w) # 1ms # hold all detection data in one variable pred_bbox = [ bboxes, scores.numpy()[0], classes.numpy()[0], valid_detections.numpy()[0] ] # by default allow all classes in .names file allowed_classes = list(class_names.values()) # custom allowed classes (uncomment line below to allow detections for only people) #allowed_classes = ['person'] # if crop flag is enabled, crop each detection and save it as new image if crop: crop_path = os.path.join(os.getcwd(), 'detections', 'crop', image_name) try: os.mkdir(crop_path) except FileExistsError: pass crop_objects(cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB), pred_bbox, crop_path, allowed_classes) if count: # count objects found counted_classes = count_objects(pred_bbox, by_class=False, allowed_classes=allowed_classes) # loop through dict and print for key, value in counted_classes.items(): print("Number of {}s: {}".format(key, value)) boxtext, image = utils.draw_bbox(original_image, pred_bbox, info, counted_classes, allowed_classes=allowed_classes) else: boxtext, image = utils.draw_bbox( original_image, pred_bbox, info, allowed_classes=allowed_classes) # 0.5ms image = Image.fromarray(image.astype(np.uint8)) # 0.3ms # endregion # prepare response if merkle_tree_interval == 0: boxtext = 'Image' + str(name[-2]) + ':;' + boxtext else: boxtext = 'Image' + str(outsourcer_image_count) + ':;' + boxtext #boxtext += "Object found: Person" #dishonest image_postprocessing_time = time.perf_counter() if merkle_tree_interval == 0: sig = sk.sign(boxtext.encode('latin1') + contractHash).signature sig = sig.decode('latin1') # send reply responder.respond(boxtext + ';--' + sig) else: mt.add_leaf(boxtext, True) # add leafs dynamiclly to merkle tree # remember indices for challenge mt_leaf_indices[ outsourcer_image_count] = image_count % merkle_tree_interval response = boxtext # if statement is true then it's time to send a new merkle root # e.g. if inervall = 128 then all respones from 0-127 are added to the merkle tree if image_count > 1 and (image_count + 1) % merkle_tree_interval == 0: mt.make_tree() merkle_root = mt.get_merkle_root() #merkle_root = mt.get_leaf(0) #dishonest sig = sk.sign( merkle_root.encode('latin1') + bytes(interval_count) + contractHash).signature # sign merkle root # resond with merkle root response += ';--' + str(merkle_root) + \ ';--' + sig.decode('latin1') interval_count += 1 mtOld = mt # save old merkle tree for challenge mtOld_leaf_indices.clear() mtOld_leaf_indices = mt_leaf_indices.copy( ) # save old indices for challenge mt_leaf_indices.clear() # clear for new indices mt = MerkleTools( ) # construct new merkle tree for next interval else: # if statement is true then it's time to resend the merkle root because outsourcer has not received it yet # if this is true then the outsourcer has not received the merkle root yet -> send again if interval_count > outsourcer_image_count: sig = sk.sign( merkle_root.encode('latin1') + bytes(interval_count) + contractHash).signature # sign merkle root response += ';--' + str(merkle_root) + \ ';--' + sig.decode('latin1') else: # in this case outsourcer has confirmed to have recieved the merkle root # if statement is true then it's time to resond to a challenge from the outsourcer # in this case outsourcer has sent a challenge to meet with the old merkle tree, give outsourcer 3 frames time to confirm challenge received before sending again if outsourcer_time_to_challenge and image_count - last_challenge > 3: last_challenge = image_count if outsourcer_random_number in mtOld_leaf_indices: # if challenge can be found, send proof back outsourcer_random_number_index = mtOld_leaf_indices[ outsourcer_random_number] else: # if challenge index cannot be found return leaf 0 outsourcer_random_number_index = 0 proofs = mtOld.get_proof( outsourcer_random_number_index) stringsend = '' for proof in proofs: stringsend += ';--' # indicate start of proof stringsend += proof.__str__() # send proof stringsend += ';--' # send leaf stringsend += mtOld.get_leaf( outsourcer_random_number_index) stringsend += ';--' stringsend += mtOld.get_merkle_root() # send root stringarr = [] stringarr = stringsend.split(';--') leaf_node = stringarr[-2] root_node = stringarr[-1] proof_string = stringarr[0:-2] sig = sk.sign( str(stringarr[1:]).encode('latin1') + bytes(interval_count - 1) + contractHash ).signature # sign proof and contract details # attach signature response += ';--' + sig.decode('latin1') response += stringsend # attach challenge response to response responder.respond(response) response_signing_time = time.perf_counter() replied_time = time.perf_counter() # display image if not dont_show: image = cv2.cvtColor(np.array(image), cv2.COLOR_BGR2RGB) cv2.imshow(display_name, image) if cv2.waitKey(1) == ord('q'): responder.respond('abort12345:6') sys.exit( 'Contract aborted: Ended contract according to custom') image_showed_time = time.perf_counter() # statistics moving_average_fps.add(1 / (image_showed_time - start_time)) moving_average_receive_time.add(received_time - start_time) moving_average_decompress_time.add(decompressed_time - received_time) moving_average_verify_image_sig_time.add(verify_time - decompressed_time) moving_average_img_preprocessing_time.add(image_preprocessing_time - verify_time) moving_average_model_inference_time.add(model_inferenced_time - image_preprocessing_time) moving_average_img_postprocessing_time.add(image_postprocessing_time - model_inferenced_time) moving_average_response_signing_time.add( response_signing_time - image_postprocessing_time) # adjust for merkle root moving_average_reply_time.add(replied_time - response_signing_time) moving_average_image_show_time.add(image_showed_time - replied_time) total_time = moving_average_receive_time.get_moving_average() \ + moving_average_decompress_time.get_moving_average() \ + moving_average_verify_image_sig_time.get_moving_average() \ + moving_average_img_preprocessing_time.get_moving_average() \ + moving_average_model_inference_time.get_moving_average() \ + moving_average_img_postprocessing_time.get_moving_average() \ + moving_average_response_signing_time.get_moving_average() \ + moving_average_reply_time.get_moving_average() \ + moving_average_image_show_time.get_moving_average() # count seconds it takes to process 400 images after a 800 frames warm-up time if (image_count == 800): a = time.perf_counter() if (image_count == 1200): a = time.perf_counter() - a print(a) # terminal prints if image_count % 20 == 0: print( " total: %4.1fms (%4.1ffps) " " receiving %4.1f (%4.1f%%) " " decoding %4.1f (%4.1f%%) " " verifying %4.1f (%4.1f%%) " " preprocessing %4.1f (%4.1f%%) " " model inference %4.1f (%4.1f%%) " " postprocessing %4.1f (%4.1f%%) " " signing %4.1f (%4.1f%%) " " replying %4.1f (%4.1f%%) " " display %4.1f (%4.1f%%) " % ( 1000 / moving_average_fps.get_moving_average(), moving_average_fps.get_moving_average(), moving_average_receive_time.get_moving_average() * 1000, moving_average_receive_time.get_moving_average() / total_time * 100, moving_average_decompress_time.get_moving_average() * 1000, moving_average_decompress_time.get_moving_average() / total_time * 100, moving_average_verify_image_sig_time.get_moving_average() * 1000, moving_average_verify_image_sig_time.get_moving_average() / total_time * 100, moving_average_img_preprocessing_time.get_moving_average() * 1000, moving_average_img_preprocessing_time.get_moving_average() / total_time * 100, moving_average_model_inference_time.get_moving_average() * 1000, moving_average_model_inference_time.get_moving_average() / total_time * 100, moving_average_img_postprocessing_time.get_moving_average( ) * 1000, moving_average_img_postprocessing_time.get_moving_average( ) / total_time * 100, moving_average_response_signing_time.get_moving_average() * 1000, moving_average_response_signing_time.get_moving_average() / total_time * 100, moving_average_reply_time.get_moving_average() * 1000, moving_average_reply_time.get_moving_average() / total_time * 100, moving_average_image_show_time.get_moving_average() * 1000, moving_average_image_show_time.get_moving_average() / total_time * 100, ), end='\r') # counter image_count += 1
def main(): """ main function interface :return: nothing """ # video info width = 640 height = 368 quality = 65 # host computer info target_ip = '192.168.7.33' target_port = '5555' # statistics info moving_average_points = 50 # initialize sender image_sender = Sender(target_ip, target_port) image_sender.set_quality(quality) print('RPi Stream -> Sender Initialized') # initialize RPi camera rpi_cam = RPiCamera(width, height) rpi_cam.start() print('RPi Stream -> Camera Started') time.sleep(1.0) # statistics moving_average_fps = MovingAverage(moving_average_points) moving_average_camera_time = MovingAverage(moving_average_points) moving_average_compress_time = MovingAverage(moving_average_points) moving_average_send_time = MovingAverage(moving_average_points) image_count = 0 # streaming print('RPi Stream -> Start Streaming') while True: start_time = time.monotonic() # capture image image = rpi_cam.get_image() camera_time = time.monotonic() - start_time moving_average_camera_time.add(camera_time) # send compressed image (compress + send) compress_time, send_time = image_sender.send_image_compressed( rpi_cam.name, image) moving_average_compress_time.add(compress_time) moving_average_send_time.add(send_time) # statistics total_time = moving_average_camera_time.get_moving_average() \ + moving_average_compress_time.get_moving_average() \ + moving_average_send_time.get_moving_average() instant_fps = 1 / (time.monotonic() - start_time) moving_average_fps.add(instant_fps) # terminal prints if image_count % 10 == 0: print( " sender's fps: %5.1f sender's time components: camera %4.1f%% compressing %4.1f%% sending %4.1f%%" % (moving_average_fps.get_moving_average(), moving_average_camera_time.get_moving_average() / total_time * 100, moving_average_compress_time.get_moving_average() / total_time * 100, moving_average_send_time.get_moving_average() / total_time * 100), end='\r') # counter image_count += 1 if image_count == 10000000: image_count = 0
def main(): """ main function interface :return: nothing """ # statistics info moving_average_points = 50 # initialize receiver image_hub = imagezmq.ImageHub() print('RPi Stream -> Receiver Initialized') time.sleep(1.0) # statistics moving_average_fps = MovingAverage(moving_average_points) moving_average_receive_time = MovingAverage(moving_average_points) moving_average_decompress_time = MovingAverage(moving_average_points) moving_average_reply_time = MovingAverage(moving_average_points) moving_average_image_show_time = MovingAverage(moving_average_points) image_count = 0 # streaming print('RPi Stream -> Receiver Streaming') while True: start_time = time.monotonic() # receive image name, compressed = image_hub.recv_jpg() received_time = time.monotonic() # decompress image image = cv2.imdecode(np.frombuffer(compressed, dtype='uint8'), -1) decompressed_time = time.monotonic() # send reply image_hub.send_reply(b'OK') replied_time = time.monotonic() # show image cv2.imshow(name, image) image_showed_time = time.monotonic() if cv2.waitKey(1) == ord('q'): break # statistics instant_fps = 1 / (image_showed_time - start_time) moving_average_fps.add(instant_fps) receive_time = received_time - start_time moving_average_receive_time.add(receive_time) decompress_time = decompressed_time - received_time moving_average_decompress_time.add(decompress_time) reply_time = replied_time - decompressed_time moving_average_reply_time.add(reply_time) image_show_time = image_showed_time - replied_time moving_average_image_show_time.add(image_show_time) total_time = moving_average_receive_time.get_moving_average() \ + moving_average_decompress_time.get_moving_average() \ + moving_average_reply_time.get_moving_average() \ + moving_average_image_show_time.get_moving_average() # terminal prints if image_count % 10 == 0: print(" receiver's fps: %5.1f" " receiver's time components: receiving %4.1f%% decompressing %4.1f%% replying %4.1f%% image show " "%4.1f%% " % (moving_average_fps.get_moving_average(), moving_average_receive_time.get_moving_average() / total_time * 100, moving_average_decompress_time.get_moving_average() / total_time * 100, moving_average_reply_time.get_moving_average() / total_time * 100, moving_average_image_show_time.get_moving_average() / total_time * 100), end='\r') # counter image_count += 1 if image_count == 10000000: image_count = 0
def main(_argv): # get paramters and contract details if Parameters.is_contractor == True: vk_Bytes = OutsourceContract.public_key_outsourcer merkle_tree_interval = OutsourceContract.merkle_tree_interval contractHash = Helperfunctions.hashContract().encode('latin1') model_to_use = OutsourceContract.model tiny = OutsourceContract.tiny else: vk_Bytes = VerifierContract.public_key_outsourcer contractHash = Helperfunctions.hashVerifierContract().encode('latin1') model_to_use = VerifierContract.model tiny = VerifierContract.tiny merkle_tree_interval = 0 port = Parameters.port_outsourcer sendingPort = Parameters.sendingPort hostname = Parameters.ip_outsourcer # Use to receive from other computer minimum_receive_rate_from_contractor = Parameters.minimum_receive_rate_from_contractor dont_show = Parameters.dont_show framework = Parameters.framework weights = Parameters.weights count = Parameters.count info = Parameters.info crop = Parameters.crop iou = Parameters.iou score = Parameters.score input_size = Parameters.input_size # configure thread Handler to handle T2 (receiving), T3 (decompressing, verifiying, preprocessing), and T4 (postprocessing, signing , sending, displaying) receiver = vss3.ThreadHandler(hostname, port, merkle_tree_interval, contractHash, minimum_receive_rate_from_contractor, vk_Bytes, input_size, sendingPort) print('Receiver Initialized') # configure gpu usage config = ConfigProto() config.gpu_options.allow_growth = True session = InteractiveSession(config=config) # load model if framework == 'tflite': interpreter = tf.lite.Interpreter(model_path=weights) else: saved_model_loaded = tf.saved_model.load(weights, tags=[tag_constants.SERVING]) # read in all class names from config class_names = utils.read_class_names(cfg.YOLO.CLASSES) # configure and iniitialize statistic variables moving_average_points = 50 # statistics moving_average_fps = MovingAverage(moving_average_points) moving_average_thread3_waiting_time = MovingAverage(moving_average_points) moving_average_thread4_waiting_time = MovingAverage(moving_average_points) moving_average_model_inference_time = MovingAverage(moving_average_points) moving_average_img_postprocessing_time = MovingAverage( moving_average_points) image_count = 0 a = 0 b = 0 while True: start_time = time.perf_counter() # receive preprocessed image from Thread 3 preprocessOutput = receiver.receive2() thread3_waiting_time = time.perf_counter() images_data = preprocessOutput[0] name = preprocessOutput[1] original_image = preprocessOutput[2] # inference if framework == 'tflite': interpreter.allocate_tensors() input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() interpreter.set_tensor(input_details[0]['index'], images_data) interpreter.invoke() pred = [ interpreter.get_tensor(output_details[i]['index']) for i in range(len(output_details)) ] if model_to_use == 'yolov3' and tiny == True: boxes, pred_conf = filter_boxes(pred[1], pred[0], score_threshold=0.25, input_shape=tf.constant( [input_size, input_size])) else: boxes, pred_conf = filter_boxes(pred[0], pred[1], score_threshold=0.25, input_shape=tf.constant( [input_size, input_size])) else: infer = saved_model_loaded.signatures['serving_default'] batch_data = tf.constant(images_data) pred_bbox = infer(batch_data) for key, value in pred_bbox.items(): boxes = value[:, :, 0:4] pred_conf = value[:, :, 4:] model_inferenced_time = time.perf_counter() # image postprocessing boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression( boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)), scores=tf.reshape( pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])), max_output_size_per_class=50, max_total_size=50, iou_threshold=iou, score_threshold=score) # 1.2ms # format bounding boxes from normalized ymin, xmin, ymax, xmax ---> xmin, ymin, xmax, ymax original_h, original_w, _ = original_image.shape bboxes = utils.format_boxes(boxes.numpy()[0], original_h, original_w) # 1ms # hold all detection data in one variable pred_bbox = [ bboxes, scores.numpy()[0], classes.numpy()[0], valid_detections.numpy()[0] ] # by default allow all classes in .names file allowed_classes = list(class_names.values()) # custom allowed classes (uncomment line below to allow detections for only people) #allowed_classes = ['person'] # if crop flag is enabled, crop each detection and save it as new image if crop: crop_path = os.path.join(os.getcwd(), 'detections', 'crop', image_name) try: os.mkdir(crop_path) except FileExistsError: pass crop_objects(cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB), pred_bbox, crop_path, allowed_classes) if count: # count objects found counted_classes = count_objects(pred_bbox, by_class=False, allowed_classes=allowed_classes) # loop through dict and print for key, value in counted_classes.items(): print("Number of {}s: {}".format(key, value)) boxtext, image = utils.draw_bbox(original_image, pred_bbox, info, counted_classes, allowed_classes=allowed_classes) else: boxtext, image = utils.draw_bbox( original_image, pred_bbox, info, allowed_classes=allowed_classes) # 0.5ms image = Image.fromarray(image.astype(np.uint8)) # 0.3ms # endregion if merkle_tree_interval == 0: boxtext = 'Image' + str(name[-2]) + ':;' + boxtext else: boxtext = 'Image' + str(name[-5]) + ':;' + boxtext image_postprocessing_time = time.perf_counter() # send postprocess result to Thread 4 for sending, signing and displaying. Wait if Thread 4 is still busy with processing last frame receiver.putData((boxtext, image, name, image_count)) thread4_waiting_time = time.perf_counter() # statistics moving_average_fps.add(1 / (thread4_waiting_time - start_time)) moving_average_thread3_waiting_time.add(thread3_waiting_time - start_time) moving_average_model_inference_time.add(model_inferenced_time - thread3_waiting_time) moving_average_img_postprocessing_time.add(image_postprocessing_time - model_inferenced_time) moving_average_thread4_waiting_time.add(thread4_waiting_time - image_postprocessing_time) total_time = moving_average_thread3_waiting_time.get_moving_average() \ + moving_average_model_inference_time.get_moving_average() \ + moving_average_img_postprocessing_time.get_moving_average() \ + moving_average_thread4_waiting_time.get_moving_average() if (image_count == 800): a = time.perf_counter() if (image_count == 1200): a = time.perf_counter() - a print(a) # terminal prints if image_count % 20 == 0: print( " total: %4.1fms (%4.1ffps) " " Waiting for Thread 3 (receiving, decoding, verifying, preprocessing) %4.1f (%4.1f%%) " " model inference %4.1f (%4.1f%%) " " postprocessing %4.1f (%4.1f%%) " " Waiting for Thread 4 (signing, replying, displaying) %4.1f (%4.1f%%) " % (1000 / moving_average_fps.get_moving_average(), moving_average_fps.get_moving_average(), moving_average_thread3_waiting_time.get_moving_average() * 1000, moving_average_thread3_waiting_time.get_moving_average() / total_time * 100, moving_average_model_inference_time.get_moving_average() * 1000, moving_average_model_inference_time.get_moving_average() / total_time * 100, moving_average_img_postprocessing_time.get_moving_average() * 1000, moving_average_img_postprocessing_time.get_moving_average() / total_time * 100, moving_average_thread4_waiting_time.get_moving_average() * 1000, moving_average_thread4_waiting_time.get_moving_average() / total_time * 100), end='\r') # counter image_count += 1