def run_sample(): try: # Create the BlockBlockService that is used to call the Blob service for the storage account block_blob_service = BlockBlobService( account_name='functionsimgpro9756', account_key='****************************') # Create a container called 'quickstartblobs'. container_name = 'samples-workitems' block_blob_service.create_container(container_name) # Set the permission so the blobs are public. block_blob_service.set_container_acl( container_name, public_access=PublicAccess.Container) local_file_name = "Mobile_camera_Feed.jpg" full_path_to_file = "./Mobile_camera_Feed.jpg" predictor = CustomVisionPredictionClient(prediction_key, endpoint=ENDPOINT) project = find_project() while True: print("HI!!!") img_resp = requests.get(url) img_arr = np.array(bytearray(img_resp.content), dtype=np.uint8) time.sleep(0.1) try: img = cv2.imdecode(img_arr, -1) except: print("An exception occurred") cv2.imwrite("Mobile_camera_Feed.jpg", img) block_blob_service.create_blob_from_path(container_name, local_file_name, full_path_to_file) #Make Prediction print("Make prediction") with open("Mobile_camera_Feed.jpg", mode="rb") as test_data: results = predictor.predict_image(project.id, test_data.read()) waste_type = results.predictions[0].tag_name # Display the results. print(waste_type) sendUpdateToArduino(waste_type) # for prediction in results.predictions: # # print(prediction.tag_name + ": {0:.2f}%".format(prediction.probability * 100)) # print(prediction.tag_name) if cv2.waitKey(1) == 27: break sleep(10) except Exception as e: os.remove(full_path_to_file) print(e)
def predict_project(prediction_key, project, iteration): predictor = CustomVisionPredictionClient(prediction_key, endpoint=ENDPOINT) # Open the sample image and get back the prediction results. with open(os.path.join(IMAGES_FOLDER, "Test", "test_od_image.jpg"), mode="rb") as test_data: results = predictor.predict_image(project.id, test_data, iteration.id) # Display the results. for prediction in results.predictions: print ("\t" + prediction.tag_name + ": {0:.2f}%".format(prediction.probability * 100), prediction.bounding_box.left, prediction.bounding_box.top, prediction.bounding_box.width, prediction.bounding_box.height)
def predict_project(prediction_key, file_path): predictor = CustomVisionPredictionClient(prediction_key, endpoint=ENDPOINT) with open(os.path.join(IMAGES_FOLDER, file_path), mode="rb") as test_data: results = predictor.predict_image(PROJECT_ID, test_data.read()) # Display the results. for prediction in results.predictions: print("\t" + prediction.tag_name + ": {0:.2f}%".format(prediction.probability * 100))
def predict_project(subscription_key): predictor = CustomVisionPredictionClient(subscription_key, endpoint=ENDPOINT) # Find or train a new project to use for prediction. project = find_or_train_project() with open(os.path.join(IMAGES_FOLDER, "Test", "test_image.jpg"), mode="rb") as test_data: results = predictor.predict_image(project.id, test_data.read()) # Display the results. for prediction in results.predictions: print("\t" + prediction.tag_name + ": {0:.2f}%".format(prediction.probability * 100))
class PotDetector: def __init__(self): self.ENDPOINT = "https://southcentralus.api.cognitive.microsoft.com" # Project keys self.training_key = "dae03cb013f840658708cd62781d90c1" self.prediction_key = "6212b24516c6492190c63d2b32084079" self.project_id = "a97fb679-77e7-4e07-b946-81c752ee3112" self.probability_min = 60 # Now there is a trained endpoint that can be used to make a prediction self.predictor = CustomVisionPredictionClient(self.prediction_key, endpoint=self.ENDPOINT) def update(self, frame, stove): # Open the sample image and get back the prediction results. results = self.predictor.predict_image(self.project_id, frame) center_x = 0.0 center_y = 0.0 stove.upper_right.pot_detected = False stove.upper_left.pot_detected = False stove.lower_left.pot_detected = False stove.lower_right.pot_detected = False # Display the results. for prediction in results.predictions: if (prediction.probability * 100 > self.probability_min): center_x = prediction.bounding_box.left + ( (prediction.bounding_box.width) / 2) center_y = prediction.bounding_box.top + ( (prediction.bounding_box.height) / 2) if (center_x > 0.5 and center_y < 0.5): stove.upper_right.pot_detected = True elif (center_x < 0.5 and center_y < 0.5): stove.upper_left.pot_detected = True elif (center_x < 0.5 and center_y > 0.5): stove.lower_left.pot_detected = True elif (center_x > 0.5 and center_y > 0.5): stove.lower_right.pot_detected = True
def predict_project(filename): predictor = CustomVisionPredictionClient(PREDICTION_KEY, endpoint=ENDPOINT) # Find or train a new project to use for prediction. project = find_project() if (project == False): print("Run training first, project not found!") return [] with open(filename, mode="rb") as test_data: results = predictor.predict_image(project.id, test_data.read()) # Display the results. for prediction in results.predictions: print("\t" + prediction.tag_name + ": {0:.2f}%".format(prediction.probability * 100)) return results
if (proj.name == YOUR_PROJECT_NAME): return proj except Exception as e: print(str(e)) print("Set path for folder containign image to predict") IMAGES_FOLDER = os.path.dirname(os.path.realpath(__file__)) # print(str(IMAGES_FOLDER)) # print(str(os.path.join(IMAGES_FOLDER, "ElephantGiraffeTestImages", "testelephant2.jpg"))) # Get predictor and project objects predictor = CustomVisionPredictionClient(prediction_key, endpoint=ENDPOINT) project = find_project() print("Make prediction") try: with open(os.path.join(IMAGES_FOLDER, "ElephantGiraffeTestImages", "testgiraffe1.jpg"), mode="rb") as test_data: results = predictor.predict_image(project.id, test_data.read()) except Exception as e: print(str(e)) input() # Display the results. for prediction in results.predictions: print(prediction.tag_name + ": {0:.2f}%".format(prediction.probability * 100)) input()
seconds = stop - start print(1 / seconds) key = cv2.waitKey(1) & 0xFF rawCapture.truncate(0) if key == ord("q"): break else: lab_img = cv2.cvtColor(image, cv2.COLOR_BGR2LAB) l_c, a_c, b_c = cv2.split(lab_img) t = "" if (np.average(l_c)) > 100: #light threshold t = "Daylight" with open(image_path, mode="rb") as test_data: results = predictor.predict_image(project_id, test_data, iteration_id) im_d = np.array(image, dtype=np.uint8) res = np.asarray([[ p.tag_name, p.probability, p.bounding_box.left, p.bounding_box.top, p.bounding_box.width, p.bounding_box.height ] for p in results.predictions]) classes = res[res[:, 1].astype(np.float) > thresh] w, h, c = im_d.shape else: t = "Infrared" with open(image_path, mode="rb") as test_data: results = predictor_i.predict_image(project_id_i, test_data, iterationId_i) im_d = np.array(image, dtype=np.uint8)
print("Training...") iteration = trainer.train_project(project.id) while (iteration.status != "Completed"): iteration = trainer.get_iteration(project.id, iteration.id) print("Training status: " + iteration.status) time.sleep(1) # The iteration is now trained. Make it the default project endpoint trainer.update_iteration(project.id, iteration.id, is_default=True) print("Done!") # Text model and show results from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient # Now there is a trained endpoint that can be used to make a prediction predictor = CustomVisionPredictionClient(prediction_key, endpoint=ENDPOINT) # Open the sample image and get back the prediction results. with open("images/Test/test_od_image.jpg", mode="rb") as test_data: results = predictor.predict_image(project.id, test_data, iteration.id) # Display the results. for prediction in results.predictions: print( "\t" + prediction.tag_name + ": {0:.2f}%".format(prediction.probability * 100), prediction.bounding_box.left, prediction.bounding_box.top, prediction.bounding_box.width, prediction.bounding_box.height)
# Azure custom vision prediction service from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient # Change endpoint location according to Azure account ENDPOINT = "https://southeastasia.api.cognitive.microsoft.com" projectID = '47917e0f-ee76-4fc3-afe4-1eb02b94d6b0' # Replace with a valid key training_key = "6ad939516c234dfc8e7de03935037264" prediction_key = "4b0ab4fa945a41b187c5fcb6c4ea5cdb" # Now there is a trained endpoint that can be used to make a prediction predictor = CustomVisionPredictionClient(prediction_key, endpoint=ENDPOINT) img_path = input('Image path: ') # Open the sample image and get back the prediction results. with open(img_path, mode="rb") as test_data: results = predictor.predict_image(projectID, test_data) # Display the results. for prediction in results.predictions: print( "\t" + prediction.tag_name + ": {0:.2f}%".format(prediction.probability * 100), prediction.bounding_box.left, prediction.bounding_box.top, prediction.bounding_box.width, prediction.bounding_box.height)
def Google_Microsoft_Fun(image_path, file_name_created): # Google: Instantiates a google client client = vision.ImageAnnotatorClient() # Microsoft: a trained endpoint that can be used to make a prediction prediction_key = "82399d964ace4ef8a3b8a8b5a2540540" #predictor = prediction_endpoint.PredictionEndpoint(prediction_key) ENDPOINT = "https://southcentralus.api.cognitive.microsoft.com" predictor = CustomVisionPredictionClient(prediction_key, endpoint=ENDPOINT) # iteration = trainer.train_project(3c2e78ba-4367-4941-8cf8-b668deb28726) # Activity_Recognition_New project_id = "d620a7d4-2aec-42a1-8ea3-810077cdcbc2" """create a list of file paths of images""" path = image_path dirs = os.listdir(path) image_list = [] activity_highest = [] # print(dirs) for f in dirs: if f.endswith('jpg'): image_list.append(os.path.join(path, f)) # sort the images with the number of image (P.S the convention of image name has to be whatever_1.jpg, whatever_2.jpg) image_list = sorted(image_list, key=lambda x: int(x.split('.')[-2].split('_')[-1])) """write the returned labels """ python_name = file_name_created file = open(python_name, "w") for f in image_list: with open(python_name, "a") as file: file.write(f) file.write("\n") with io.open(f, 'rb') as image_file: # google_labels content = image_file.read() image = types.Image(content=content) response = client.label_detection(image=image) labels = response.label_annotations for i in labels: with open(python_name, "a") as file: file.write("#") file.write(i.description.encode("utf-8")) file.write("\n") # microsoft with io.open(f, 'rb') as image_file: results = predictor.predict_image(project_id, image_file) count = 0 with open(python_name, "a") as file: for prediction in results.predictions: if count == 0: str = prediction.tag_name.encode("utf-8") # print("highest score is "+str) file.write("!") file.write(str) file.write("\n") # print ("\t" + prediction.tag_name + ": {0:.2f}%".format(prediction.probability * 100)) file.write("---" + prediction.tag_name + ": {0:.2f}%".format(prediction.probability * 100)) count = count + 1 file.write("\n") file.close()
def get(self): json_data = {"data": "open", "loc": ""} video_capture = cv2.VideoCapture(0) # Get predictor and project objects predictor = CustomVisionPredictionClient(prediction_key, endpoint=ENDPOINT) project = find_project() ret, frame = video_capture.read() #print (ret) cv2.imwrite('./color_img.jpg', frame) #cv2.imshow('Video', frame) try: with open('./color_img.jpg', mode="rb") as test_data: results = predictor.predict_image(project.id, test_data.read()) except Exception as e: print(str(e)) input() # Display the results. for prediction in results.predictions: #print(prediction.tag_name + ": {0:.2f}%".format(prediction.probability * 100)) if prediction.tag_name == CLOSED_EYES: if ((prediction.probability * 100) > 0.01): global count count += 1 if count == 1: # global count count = 0 acc = '66ae978d-d815-4f75-8fe0-fc765dc5b829' req = smartcar.get_vehicle_ids(acc) vld = req['vehicles'][0] v = smartcar.Vehicle(vld, acc) print(v.location()) json_data = { "data": "closed", "loc": v.location()['data'] } message(vld, v.location()['data']) #request.put(json=json_data) print("hazard!") url = 'https://www.jsonstore.io/962b54063ad9a4019de7f1629eea83173b549ae39f2d064e1f9f724b35851731' headers = {'Content-Type': 'application/json'} requests.post(url, json=json_data, headers=headers) else: json_data = {"data": "open", "loc": ""} url = 'https://www.jsonstore.io/962b54063ad9a4019de7f1629eea83173b549ae39f2d064e1f9f724b35851731' headers = {'Content-Type': 'application/json'} requests.post(url, json=json_data, headers=headers) count = 0 #send_main(SRC, DST, "!!!") # else: # json_data = {"data":"allgood", "loc":""} # #global count # #count = 0 # input() # if cv2.waitKey(1) & 0xFF == ord('q'): # break video_capture.release() cv2.destroyAllWindows() return json_data
# pip install azure-cognitiveservices-vision-customvision from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient # Now there is a trained endpoint that can be used to make a prediction ENDPOINT = "https://southcentralus.api.cognitive.microsoft.com" predictor = CustomVisionPredictionClient("Prediction key", endpoint=ENDPOINT) # Open the sample image and get back the prediction results. with open("test.jpg", mode="rb") as test_data: results = predictor.predict_image("Project.id", test_data) # Display the results. for prediction in results.predictions: print ("\t" + prediction.tag_name + ": {0:.2f}%".format(prediction.probability * 100), prediction.bounding_box.left, prediction.bounding_box.top, prediction.bounding_box.width, prediction.bounding_box.height)