def Watson_VR(PicPath_ToClassify): # Authentication ################### use your API key and URL ################### vr_authenticator = IAMAuthenticator('') visual_recognition = VisualRecognitionV3(version='', authenticator=vr_authenticator) visual_recognition.set_service_url('') # Classify an Image using classifier I defined with open(PicPath_ToClassify, 'rb') as images_file: classes = visual_recognition.classify( images_file=images_file, threshold='0.6', classifier_ids='' ).get_result() ##### your classifier ID here ##### if not classes['images'][0]['classifiers'][0]['classes']: class_name = 0 class_score = 0 else: class_name = classes['images'][0]['classifiers'][0]['classes'][0][ 'class'] class_score = classes['images'][0]['classifiers'][0]['classes'][0][ 'score'] return class_name, class_score
def __init__(self): self.nlu_authenticator = IAMAuthenticator( 'MfGCw7jH73n6Eaee1xPJA_ZC6VkJldmbNL9fsrHQe1qm') self.natural_language_understanding = NaturalLanguageUnderstandingV1( version='2019-07-12', authenticator=self.nlu_authenticator) self.tone_authenticator = IAMAuthenticator( 'gAdmEsh1sEKmYlw2FjabXQtE_znP26DF7ZUwa4Pltx4K') self.tone_analyzer = ToneAnalyzerV3( version='2019-07-12', authenticator=self.tone_authenticator) self.visual_authenticator = IAMAuthenticator( '5w-5fC5sEzkc9W2SCBUAUrq6CuI_ZQO5KeRQC3VLuFSt') self.visual_recognition = VisualRecognitionV3( version='2019-07-12', authenticator=self.visual_authenticator) self.visual_recognition.set_service_url( 'https://gateway.watsonplatform.net/visual-recognition/api') self.tone_analyzer.set_service_url( 'https://api.eu-gb.tone-analyzer.watson.cloud.ibm.com/instances/175258bd-a3f9-4f9e-a7c6-1a012b72d887' ) self.natural_language_understanding.set_service_url( 'https://gateway-lon.watsonplatform.net/natural-language-understanding/api' )
def evalimage(): # Downsize image before sending if larger than 1M, saves upload time imsize = os.stat(imm).st_size if(imsize > 1000000): im = Image.open(imm) im.save(imm, quality=25) #raise quality if sureness starts dropping # API visual_recognition = VisualRecognitionV3( version='2018-03-19', iam_apikey='{yourapikey}')#replace this with your key print("Sending image") # Take image and process it and get results with open(imm, 'rb') as image: faces = visual_recognition.detect_faces(image).get_result() print("Image sent, files received") # Save results into json file with open('result.json', 'w') as jsondata: json.dump(faces, jsondata, indent=2) return;
def watson_classify(filename, directory, classifier_id): '''Classifies an image with a custom IBM Watson Visual Recognition model with classifier_ids = classifer_id. Returns a dictionary with the name of the classified image, the model name, an (arbitrary) classifier_type label for the model, and a list of classes and their scores.''' # Load apikey for the IBM Watson VR instance with open("watson_credentials.json", "r") as f: watson_credentials = json.load(f) apikey = watson_credentials["apikey"] # Instantiate the Watson VR service visual_recognition = VisualRecognitionV3(version='2018-03-19', iam_apikey=apikey) # Classify with open(os.path.join(directory, filename), "rb") as images_file: result = visual_recognition.classify( images_file, threshold="0.0", classifier_ids=classifier_id).get_result() # Wrap everything into a dictionary watson_output = { "image": filename, "classifier_type": "Custom IBM Watson Visual Recognition Model", "classifier": result["images"][0]["classifiers"][0]["name"], "classes": result["images"][0]["classifiers"][0]["classes"] } return watson_output
def scene_detect(frame): import json from collections import OrderedDict from ibm_watson import VisualRecognitionV3 from ibm_cloud_sdk_core.authenticators import IAMAuthenticator authenticator = IAMAuthenticator('mto9k-KHFPcM1jKbmvsy0-qDK7qYX4CJnKOtSDFSPxo0') visual_recognition = VisualRecognitionV3( version='2018-03-19', authenticator=authenticator ) visual_recognition.set_service_url('https://api.us-south.visual-recognition.watson.cloud.ibm.com/instances/185b8d2d-f8cd-4e52-89bf-7d444dc56161') url = 'https://encrypted-tbn0.gstatic.com/images?q=tbn%3AANd9GcQmCVpuaSeV9lrhKKN7YxZBbydUHNVo1IM7b5-yM00xbFxz5q8E&usqp=CAU' url2 = 'https://thumbs.dreamstime.com/z/empty-street-india-empty-street-india-day-effects-109146697.jpg' #classes_result = visual_recognition.classify(url=url2).get_result() classes_result = visual_recognition.classify(images_file = frame , images_filename = 'sample').get_result() store = dict() flag = 0 for i in range(len(classes_result["images"][0]["classifiers"][0]["classes"])): store[json.dumps(classes_result["images"][0]["classifiers"][0]["classes"][i]["score"])] = json.dumps(classes_result["images"][0]["classifiers"][0]["classes"][i]["class"]) if(str((classes_result["images"][0]["classifiers"][0]["classes"][i]["class"])) == "crowd" or str(json.dumps(classes_result["images"][0]["classifiers"][0]["classes"][i]["class"])) == "people"): flag = 1 #print(len(classes_result["images"][0]["classifiers"][0]["classes"])) store = OrderedDict(sorted(store.items(),reverse = True)) #print(store) # flag = 0 # for k,v in store.items(): # if(v == ) # break # print(store) return flag
def classify_image(api_key, classifier_id): # connect with the watson visual recognition account visual_recognition = VisualRecognitionV3( version='2018-03-19', iam_apikey=api_key, url='https://gateway.watsonplatform.net/visual-recognition/api') img_path = "image-file" img_path_images = os.getcwd() + "/" + img_path test_file_names=[os.path.join(img_path_images, f) for f in os.listdir(img_path_images) if f.endswith(".jpg") or f.endswith(".png")] top_sorted_class_scores = [] for img_name in test_file_names: try: with open(img_name, "rb") as image_file: # pass image to custom classifier, specify custom classifier using classifier_id here res_classification = visual_recognition.classify( images_file=image_file, threshold='0.1', classifier_ids=[classifier_id]).get_result() class_scores_img = [] for img_class in res_classification["images"][0]["classifiers"][0]["classes"]: class_scores_img.append(img_class) sorted_class_scores = sorted(class_scores_img, key=itemgetter("score"), reverse=True) if len(sorted_class_scores) > 3: top_sorted_class_scores = sorted_class_scores[0:3] else: top_sorted_class_scores = sorted_class_scores except ApiException as ex: print(ex) return top_sorted_class_scores
def getFeatFromPicture(url): authenticator = IAMAuthenticator('kuo4VcndKjTh-piE0JWLTF3vRmEKimmutQ7M-SrZsTRo') visual_recognition = VisualRecognitionV3( version='2018-03-19', authenticator=authenticator ) visual_recognition.set_service_url('https://gateway.watsonplatform.net/visual-recognition/api') with open(url, 'rb') as images_file: classes = visual_recognition.classify( images_file=images_file, classifier_ids=["default"]).get_result() res = json.dumps(classes, indent=2) print(res) result = classes['images'][0]['classifiers'][0]['classes'] list_result = [] filtered_out_words = ['hotel', 'sky', 'color', 'building', 'slope', 'road'] for r in result: print(r['class']) list_result.append([r['class'], r['score']]) return filter(list_result, filtered_out_words)
def visual_recog(frame:str, location_name:str): """ IBM Watson visual recognition model""" visual_recognition = VisualRecognitionV3( version='2018-03-19', authenticator=authenticator ) visual_recognition.set_service_url(os.getenv("SERVICE_URL")) url = frame classes_result = visual_recognition.classify(url=url).get_result() pred_classes=set() for item in classes_result['images'][0]['classifiers'][0]['classes']: pred_classes.add(item['class']) # Check for occurrence of fire and activate red rhino robot accordingly if(pred_classes & ALERT_OBJ): alert_3r(location_name) # Check for combustible if (pred_classes & COMBUSTIBLE_OBJ): return 1 else: return 0
def imageAnalyser( url, server='https://api.us-south.visual-recognition.watson.cloud.ibm.com'): IMAGE_API_KEY = os.getenv('IMAGEANALYSERAPI') image_analyser = VisualRecognitionV3( version='2018-03-19', authenticator=IAMAuthenticator(IMAGE_API_KEY)) image_analyser.set_service_url(server) time_start = time() classes_result = image_analyser.classify(url=url).get_result() time_end = time() if debug: print(f"Processing Time: {time_end-time_start}") if debug: print(json.dumps(classes_result, indent=2)) # Data in - for debug def sortClasses(lst): lst.sort(key=lambda x: x[1], reverse=True) return lst img_lst = [] for img in classes_result["images"]: if (len(img["classifiers"]) != 1): raise Exception( "An image has more than 1 classifer. Check why this happend!") classes = [(i["class"], i["score"]) for i in img["classifiers"][0]["classes"]] img_lst.append(sortClasses(classes)) return img_lst
def classify_image(file): authenticator = IAMAuthenticator(API_KEY) vr_service = VisualRecognitionV3(version='2018-03-19', authenticator=authenticator) vr_service.set_service_url(API_URL) vr_service.set_default_headers({'x-watson-learning-opt-out': "true"}) vr_service.set_disable_ssl_verification(True) try: with open('./media/images/' + str(file), 'rb') as images_file: try: classes = vr_service.classify( images_file=images_file, threshold=0.0, classifier_ids=['polc_660192415']).get_result() print(json.dumps(classes, indent=2)) except WatsonApiException as ex: print(f'Status code: {ex.code}') print(f'Error message: {ex.message}') except ApiException as ApiEx: print(f'Status code: {ApiEx.code}') print(f'Error message: {ApiEx.message}') except FileNotFoundError: print( 'File insertion wasn\'t successful. Please try with different name or file source' ) return classes
def classify_image(request): form = ImageForm(request.POST, request.FILES) if form.is_valid(): img = form.save() api_key = "7_fPX5vxcsijEg5wRBre3TL5SjX9cLMu7h-gwZP_t0C2" classifier_id = "Muebles_882291314" authenticator = IAMAuthenticator(api_key) visual_recognition = VisualRecognitionV3(version='2018-03-19', authenticator=authenticator) visual_recognition.set_service_url( 'https://gateway.watsonplatform.net/visual-recognition/api') with open(img.img.path, "rb") as image_file: res_classification = visual_recognition.classify( images_file=image_file, threshold=0.1, classifier_ids=[classifier_id]).get_result() class_scores_img = [] for img_class in res_classification['images'][0]['classifiers'][0][ 'classes']: class_scores_img.append(img_class) sorted_class_scores = sorted(class_scores_img, key=itemgetter('score'), reverse=True) else: sorted_class_scores = [] print(form.errors) return render(request, "classfied_results.html", {'results': sorted_class_scores})
def image(updater, context): """ takes an image and returns what is drawn in the image from IBM Watson API """ bot = Bot(token=TOKEN) file_id = updater.message.photo[-1] newFile = bot.get_file(file_id) newFile.download("./images/img.jpg") # Set up IBM Watson VisualRecognition API authenticator = IAMAuthenticator(IBM_TOKEN) visual_recognition = VisualRecognitionV3(version=IBM_VERSION, authenticator=authenticator) visual_recognition.set_service_url(IBM_URL) # Send a request to IBM Watson and recieve the result with open("./images/img.jpg", "rb") as image_file: classes = visual_recognition.classify(images_file=image_file, threshold="0.6").get_result() img_recognition = classes["images"][0]["classifiers"][0]["classes"][0][ "class"] recognition_score = classes["images"][0]["classifiers"][0]["classes"][0][ "score"] updater.message.reply_text(TEMPLATES["image_recognition"].format( name=img_recognition, percent=recognition_score)) rm("./images/img.jpg")
def __init__(self, corpus_dir, config_name=None, iam_api_key=None, service_instance_url=None): """ Initializes the Visual Recognition Runner. Creates instances of the Visual Recognition Service, the Zip helper and loads the corpus directory information. Arguments: config_name {string} -- The name of the config file. corpus_dir {string} -- The name of the folder that contains the corpus. """ if config_name is None: config_iam_api_key = iam_api_key config_service_url = service_instance_url else: config_iam_api_key, config_service_url = self._get_config( config_name) authenticator = IAMAuthenticator(apikey=config_iam_api_key) self.vr_instance = VisualRecognitionV3(version='2018-03-19', authenticator=authenticator) self.vr_instance.set_service_url(config_service_url) self.zip_helper = ZipHelper() self.corpora = Corpora(corpus_dir) self.vr_service = VrServiceWrapper(self.vr_instance, self.zip_helper)
def classify_image_v3(picture_name, classifier_id): """ Performs authentication at IBM Cloud and sends a classification request :param picture_name: string :param classifier_id: string :return: classification_result (list) or an ApiException """ classification_result = [] try: authenticator = IAMAuthenticator('e9Eeu6hkRaPEwO05UYRLrGmmot3OvDqa-1DLfjuY85ad') service = VisualRecognitionV3( '2019-12-19', authenticator=authenticator) service.set_service_url( 'https://api.us-south.visual-recognition.watson.cloud.ibm.com/instances/8c0dc5a5-aae6-4f84-a1ba-50563dc2b957' ) with open(f'./pictures/classified_images/{picture_name}', 'rb') as images_file: response = service.classify(images_file=images_file, classifier_ids=classifier_id ).get_result() for dictionary in response['images'][0]['classifiers'][0]['classes']: classification_result.append(dictionary['class']) return classification_result except ApiException as ex: response = f"Wystąpił błąd o kodzie {str(ex.code)}: {ex.message}" return response
def image_search(): IBM_API_KEY = 'api-key' IBM_URL = 'https://api.us-south.visual-recognition.watson.cloud.ibm.com/instances/c0aa675b-7bdd-42b8-bd93-df6d6fc3a5ce' # # Passing API KEY and URL to the Visual Recognition authenticator = IAMAuthenticator(IBM_API_KEY) visual_recognition = VisualRecognitionV3( version='2018-03-19', authenticator=authenticator) visual_recognition.set_service_url(IBM_URL) # Running the Visual Recognition on test.img file with open('./static/Pepperoni-pizza.jpg', 'rb') as image: classes = visual_recognition.classify(images_file=image, threshold='0.6', classifier_ids='food').get_result() # print(json.dumps(classes, indent=2)) output_query = classes['images'][0]['classifiers'][0]['classes'][0]['class'] # latitude = 19.245690 # longitude = 73.140822 latitude = 12.959111 longitude = 77.732022 return latitude, longitude, output_query
def sendImage(): # print(base64Image) data = request.form authenticator = IAMAuthenticator(os.getenv("IAMAUTHENTICATOR_KEY")) instance = VisualRecognitionV3(version='2018-03-19', authenticator=authenticator) instance.set_service_url(os.getenv("SERVICE_URL_KEY")) classifier_ids = ["food"] impo2 = bytes(data.get('image'), encoding='utf8') imgdata = base64.b64decode(impo2) tf = tempfile.mkstemp()[1] with open(tf, 'wb') as f: f.write(imgdata) file = open(tf, 'rb') img2 = instance.classify(images_file=file, classifier_ids=classifier_ids).get_result() file.close() f.close() predictions = [] print(type(img2)) for i in img2['images'][0]['classifiers'][0]['classes']: predictions.append(i['class']) print(predictions) return jsonify(predictions) return []
def display(): if request.method == "POST": image = request.files["food"] authenticator = IAMAuthenticator('2A6BucKErMHbNpKGwdyGMBTsAZYxRYmm8Rxr0chzTvfm') visual_recognition = VisualRecognitionV3( version='2018-03-19', authenticator=authenticator) visual_recognition.set_service_url('https://api.us-south.visual-recognition.watson.cloud.ibm.com/instances/80c78105-880f-4bb7-b79c-93764795ee73') classes = visual_recognition.classify(images_filename=image.filename, images_file=image ,classifier_ids='food').get_result() data=json.loads(json.dumps(classes,indent=4)) foodName=data["images"][0]['classifiers'][0]["classes"][0]["class"] nutrients = {} USDAapiKey = '9f8yGs19GGo5ExPpBj7fqjKOFlXXxkJdMyJKXwG3' response = requests.get('https://api.nal.usda.gov/fdc/v1/foods/search?api_key={}&query={}&requireAllWords={}'.format(USDAapiKey, foodName, True)) data = json.loads(response.text) concepts = data['foods'][0]['foodNutrients'] arr = ["Sugars","Energy", "Vitamin A","Vitamin D","Vitamin B", "Vitamin C", "Protein","Fiber","Iron","Magnesium", "Phosphorus","Cholestrol","Carbohydrate","Total lipid (fat)", "Sodium", "Calcium",] for x in concepts: if x['nutrientName'].split(',')[0] in arr: if(x['nutrientName'].split(',')[0]=="Total lipid (fat)"): nutrients['Fat'] = str(x['value'])+" " + x['unitName'] else: nutrients[x['nutrientName'].split(',')[0]] = str(x['value'])+" " +x['unitName'] return render_template('display.html', x = foodName, data = nutrients, account = session['username']) else: return render_template('submission.html')
def create_classifier(training_data_dict, negative_class, classifier_name): """ Connects to IBM Cloud using API and creates a custom classifier. :param training_data_dict: dict :param negative_class: .zip file :param classifier_name: string :return: json """ try: authenticator = IAMAuthenticator('e9Eeu6hkRaPEwO05UYRLrGmmot3OvDqa-1DLfjuY85ad') visual_recognition = VisualRecognitionV3( '2019-12-19', authenticator=authenticator) visual_recognition.set_service_url( 'https://api.us-south.visual-recognition.watson.cloud.ibm.com/instances/8c0dc5a5-aae6-4f84-a1ba-50563dc2b957' ) model = visual_recognition.create_classifier( name=classifier_name, positive_examples=training_data_dict, negative_examples=negative_class ).get_result() return model except ApiException as ex: response = f"Wystąpił błąd o kodzie {str(ex.code)}: {ex.message}" return response
def authenticate_watson(): authenticator = IAMAuthenticator(os.getenv("IBMCLOUD_API_KEY")) visual_recognition = VisualRecognitionV3(version="2018-03-19", authenticator=authenticator) visual_recognition.set_service_url(os.getenv("IBMCLOUD_API_URL")) return visual_recognition
def classify_zip(file): authenticator = IAMAuthenticator(API_KEY) vr_service = VisualRecognitionV3(version='2018-03-19', authenticator=authenticator) vr_service.set_service_url(API_URL) vr_service.set_default_headers({'x-watson-learning-opt-out': "true"}) vr_service.set_disable_ssl_verification(True) confusion_matrix = { 'democrat': { 'true': 0, 'false': 0 }, 'republican': { 'true': 0, 'false': 0 } } with ZipFile('./media/files/' + str(file), 'r') as zip_file: print('########## printing zip file') for image_file in zip_file.namelist(): print(image_file.split('/')) if image_file.split('/')[1] is not '': with zip_file.open(image_file, 'r') as image: # with open(image_file, 'rb') as image: expected_result: str = str(image_file.split('/')[0]) print('# image file: #') print(image) try: classes = vr_service.classify( images_file=image, threshold=0.0, classifier_ids=['polc_660192415']).get_result() print('#### result : ') print( str(classes['images'][0].get('classifiers')[0].get( 'classes')[0].get('class'))) actual_label: str = str(classes['images'][0].get( 'classifiers')[0].get('classes')[0].get('class')) print(actual_label.lower()) print(expected_result.lower()) if expected_result.lower() == actual_label.lower(): confusion_matrix[ expected_result.lower()]['true'] += 1 else: confusion_matrix[ expected_result.lower()]['false'] += 1 except WatsonApiException as ex: print(f'Status code: {ex.code}') print(f'Error message: {ex.message}') except ApiException as ApiEx: print(f'Status code: {ApiEx.code}') print(f'Error message: {ApiEx.message}') print(confusion_matrix) return confusion_matrix
def setting_API(self): self.API_key = "5p6k57Ul0eleYgJKvgnE_39uIHI4brPd0_ON9W3UC22D" self.API_version = "2018-03-19" #self.service_url = "https://api.eu-de.visual-recognition.watson.cloud.ibm.com" self.service_url = "https://api.us-south.visual-recognition.watson.cloud.ibm.com/instances/0dfd12f6-c530-4ba5-b702-ec407e215885" self.visual_recognition = VisualRecognitionV3( version=self.API_version, authenticator=IAMAuthenticator(self.API_key)) self.visual_recognition.set_service_url(self.service_url)
def classifyImageWithVR(image_file, threshold='0.6'): service = VisualRecognitionV3('2018-03-19') # classifiers = service.list_classifiers().get_result() # print(json.dumps(classifiers, indent=2)) with open(image_file, 'rb') as image_file: results = service.classify( images_file=image_file, threshold=threshold, # classifier_ids=['DefaultCustomModel_981890366']).get_result() classifier_ids=['rpsxone_245199051']).get_result() #print(json.dumps(results, indent=2)) classes = results["images"][0]["classifiers"][0]["classes"] return classes
def connect_watson_vr(): """ establish a connection to watson vr service """ authenticator = IAMAuthenticator(VR_KEY) service = VisualRecognitionV3(version=VR_VERSION, authenticator=authenticator) service.set_service_url(VR_URL) print("\nConnection established.\n") return (service)
def watson(name): authenticator = IAMAuthenticator('AAAAAAAAAAAAAAAAA') visual_recognition = VisualRecognitionV3(version='2018-03-19', authenticator=authenticator) visual_recognition.set_service_url( 'https://api.us-south.visual-recognition.watson.cloud.ibm.com') with open('data/' + name + '.png', 'rb') as images_file: classes = visual_recognition.classify(images_file=images_file, threshold='0.6', owners=["me"]).get_result() print(json.dumps(classes, indent=2)) return json.dumps(classes, indent=2)
def __init__(self, watson_api_version, watson_api_key, classifier_ids): self.watson_api_version = watson_api_version self.watson_api_key = watson_api_key self.classifier_ids = classifier_ids self.visual_recognition = None try: self.visual_recognition = VisualRecognitionV3( version=self.watson_api_version, iam_apikey=self.watson_api_key ) self.visual_recognition.disable_SSL_verification() except ApiException as ex: LOG.exception("NorthStarWatson:__init__ Failed to connect watson " "visual recognition with %s", ex)
def request_IBM_resources(): authenticator = IAMAuthenticator( '8VvzgIf3e8WJ_Lh6KlOiPD6KRj1Fm1jCzmjQVIbMNup7') visual_recognition = VisualRecognitionV3(version='2018-03-19', authenticator=authenticator) visual_recognition.set_service_url( 'https://gateway.watsonplatform.net/visual-recognition/api') with open('./canadian_goose.jpg', 'rb') as images_file: classes = visual_recognition.classify(images_file=images_file, threshold='0.1', owners=["me"]).get_result() print(json.dumps(classes, indent=2)) return json.dumps(classes)
def watsonClassifier(path): print("classificar no watson") visual_recognition = VisualRecognitionV3( '2018-03-19', iam_apikey=enviroments.watsonCredentials['iam_apikey']) #print("Certificado watson") with open(path, 'rb') as images_file: classes = visual_recognition.classify( images_file, threshold='0.0', classifier_ids=enviroments.watsonCredentials['classifier_ids'] ).get_result() print("classificado") print(f"Classes do Watson {classes}") return classes
def RecognizeObject_VisualRecog(imagen, contador): cv2.imwrite("celda.png", imagen) # VISUAL RECOGNITION API visual_recognition = VisualRecognitionV3( '2019-06-21', iam_apikey='YuGs3PFCMTaCCJvSs-5nvrSZ5Knnzl3kuRiTEE7qGnMz') faces = visual_recognition.detect_faces("celda.png").get_result() print(json.dumps(faces, indent=2)) #Contador solo se usa para avanzar con la demo haciendo como que se ha encontrado una tecla if (contador == 5): print("Se ha encontrado una tuberia en el contador: " + str(contador) + "\n\n") return True
def main(args): authenticator = IAMAuthenticator('tmbr2hVDD4UjHHyVkpP99wcLoZz1jU2NoHc7vGH7mAFd') visual_recognition = VisualRecognitionV3( version='2018-03-19', authenticator=authenticator ) visual_recognition.set_service_url('https://api.us-south.visual-recognition.watson.cloud.ibm.com/instances/c46b05ae-21d1-4ba1-bea0-ede174674834') classifier_ids = ["Mood_Model_101620031"] #share the location of the image file url = args["mydata"] try: classes = visual_recognition.classify(url=url, classifier_ids=classifier_ids) myresult = classes.get_result()['images'][0]['classifiers'][0]['classes'][0]['class'] return {"msg": json.dumps(myresult, indent=2)} except ApiException as ex: print("Method failed with status code " + str(ex.code) + ": " + ex.message)
def execute(image_path): authenticator = IAMAuthenticator( 'fZSpq0XcLXx3SaEbJTsAux2f0sGtrUxl6pemvYYauSvx') visual_recognition = VisualRecognitionV3(version='2018-03-19', authenticator=authenticator) visual_recognition.set_service_url( 'https://api.us-south.visual-recognition.watson.cloud.ibm.com/instances/5358c9dd-186b-4149-a8f0-ae4c9a30beb2' ) print(image_path) with open(image_path, 'rb') as images_file: classes = visual_recognition.classify( images_file=images_file).get_result() json_data = json.loads(json.dumps(classes, indent=2)) type(json_data) #print(json_data) return (json_data)