account_key=account_key) blob_url_template = "https://meganoni.blob.core.windows.net/test/%s" plate_blob_url_template = "https://meganoni.blob.core.windows.net/plaque/%s" FLASK_DEBUG = os.environ.get('FLASK_DEBUG', True) SUPPORTED_EXTENSIONS = ('.png', '.jpg', '.jpeg') app = Flask(__name__) COMPUTER_VISION_SUBSCRIPTION_KEY = "40d4b184080c436aaab896d811353948" COMPUTER_VISION_ENDPOINT = "https://meganoni.cognitiveservices.azure.com/" computervision_client = ComputerVisionClient( COMPUTER_VISION_ENDPOINT, CognitiveServicesCredentials(COMPUTER_VISION_SUBSCRIPTION_KEY)) @app.route("/ping") def ping(): return "ping" @app.route("/time") def time(): return str(datetime.utcnow()) @app.route("/sendPlateLocation", methods=['GET']) def send_plate_location():
from reportlab.pdfbase.ttfonts import TTFont from reportlab.pdfbase import pdfmetrics from azure.cognitiveservices.vision.computervision import ComputerVisionClient #from azure.cognitiveservices.vision.computervision.models import OperationStatusCodes from azure.cognitiveservices.vision.computervision.models import TextOperationStatusCodes from msrest.authentication import CognitiveServicesCredentials import re import os import time #Llave azure KEY = '9a5336a0d1bb46e89d85b0510c8b0798' ENDPOINT = 'https://gofaster.cognitiveservices.azure.com/' #servicio de azure _client = ComputerVisionClient(ENDPOINT, CognitiveServicesCredentials(KEY)) app = Flask(__name__) app.secret_key = 'matangalachanga' app.config["DEBUG"] = True app.config['MYSQL_DATABASE_USER'] = '******' app.config['MYSQL_DATABASE_PASSWORD'] = '******' app.config['MYSQL_DATABASE_DB'] = 'sepherot_jenniferBD' app.config['MYSQL_DATABASE_HOST'] = 'nemonico.com.mx' mysql = MySQL(app) mysql.init_app(app) #mineria de datos
SDK: https://docs.microsoft.com/en-us/python/api/overview/azure/cognitiveservices/computervision?view=azure-python ''' # Replace with your endpoint and key from the Azure portal endpoint = '<ADD ENDPOINT HERE>' key = '<ADD COMPUTER VISION SUBSCRIPTION KEY HERE>' # Alternatively, uncomment and get endpoint/key from environment variables ''' import os endpoint = os.environ['COMPUTER_VISION_ENDPOINT'] key = os.environ['COMPUTER_VISION_SUBSCRIPTION_KEY'] ''' # Set credentials credentials = CognitiveServicesCredentials(key) # Create client client = ComputerVisionClient(endpoint, credentials) # change this URL to reflect the image that you would like to test. url = "https://azurecomcdn.azureedge.net/cvt-181c82bceabc9fab9ec6f3dca486738800e04b45a0b3c1268609c94f4d67173a/images/shared/cognitive-services-demos/analyze-image/analyze-6-thumbnail.jpg" # image_path = "images/computer_vision_ocr.png" lang = 'en' raw = True custom_headers = None # Read an image from a url rawHttpResponse = client.read(url, language=lang, custom_headers=custom_headers, raw=raw) # Uncomment the following code and comment out line 37 to read from image stream
import os import instaloader import json from json import JSONEncoder from azure.cognitiveservices.vision.computervision import ComputerVisionClient from msrest.authentication import CognitiveServicesCredentials subscription_key = os.getenv('COMPUTERVISION_KEY') endpoint = os.getenv('COMPUTERVISION_ENDPOINT') computervision_client = ComputerVisionClient(endpoint, CognitiveServicesCredentials(subscription_key)) def iterate_until(iterable, max_iterations): index = 0 for value in iterable: index += 1 yield value if index > max_iterations: break class User: def __init__(self, username, max_posts=3): instance = instaloader.Instaloader() self.username = username
visited += len(web_data.web_pages.value) for page in web_data.web_pages.value: bing_url.append(page.url) bing_name.append(page.name) title = key_word + ', ' + str(visited) with open('bing_url.txt', 'w') as f: f.write(title + '\n') f.write('\n'.join(bing_url)) with open('bing_name.txt', 'w', encoding='utf-8') as f: f.write(title + '\n') f.write('\n'.join(bing_name)) return bing_url, bing_name if __name__ == '__main__': subscription_key = "fea767db97b44733bc86bfe710f80be6" client = WebSearchAPI(CognitiveServicesCredentials(subscription_key)) search = 'chengxiang zhai site:illinois.edu' web_data = client.web.search(query=search, response_filter=['Webpages'], \ client_ip='130.126.255.152', count=5) print('\nSearched for Query# {}'.format(search)) if hasattr(web_data.web_pages, 'value'): print("\nWebpage Results#{}".format(len(web_data.web_pages.value))) for index, page in enumerate(web_data.web_pages.value): print("{} web page name: {} ".format(index + 1, page.name)) print("{} web page URL: {} ".format(index + 1, page.url)) else: print("Didn't find any web pages...")
import firebase_admin from firebase_admin import credentials from firebase_admin import db cred = credentials.Certificate( "/Users/Naveen/Desktop/Python/facial-recog/hacknjit-9a9be-firebase-adminsdk-2j61m-5d5bac921b.json" ) firebase_admin.initialize_app( cred, {'databaseURL': 'https://hacknjit-9a9be.firebaseio.com/'}) root = db.reference() KEY = os.environ['FACE_SUBSCRIPTION_KEY'] ENDPOINT = os.environ['FACE_ENDPOINT'] # Create an authenticated FaceClient. face_client = FaceClient(ENDPOINT, CognitiveServicesCredentials(KEY)) # Used in the Person Group Operations PERSON_GROUP_ID = "authorized" TARGET_PERSON_GROUP_ID = str( uuid.uuid4()) # assign a random ID (or name it anything) choice = raw_input( "Train (t), Identify (i), Delete (d), Create Group (c), Delete Group (q), List(l), or Exit (anything else)?\n" ) while True: if choice == 't': # TRAINING # Find all jpeg images the person
n_fast=12, n_slow=26, n_sign=9, fillna=False) if macd[-1] > macd_sig[-1]: trn_macd_status = "Buy" elif macd[-1] < macd_sig[-1]: trn_macd_status = "Sell" else: trn_macd_status = "Hold" return trn_macd_status subscription_key = "d225b3f12aab446aa34af931359edbe0" search_term = tickers date = 2018, 1, 1 client = NewsSearchAPI(CognitiveServicesCredentials(subscription_key)) news_result = client.news.search(query=search_term) if news_result.value: first_news_result = news_result.value[2] data = format(first_news_result.description) sec_news_result = news_result.value[3] data1 = format(sec_news_result.description) third_news_result = news_result.value[4] data2 = format(third_news_result.description) # print("news name: {}".format(first_news_result.name)) # # print("news description: {}".format(first_news_result.description))
client.knowledgebase.delete(kb_id=kb_id) print("Deleted knowledge base.") # </DeleteKB> # <GenerateAnswer> def generate_answer(client, kb_id): print ("Querying knowledge base...") listSearchResults = client.knowledgebase.generate_answer(kb_id, QueryDTO(question = "How do I manage my knowledgebase?")) for i in listSearchResults.answers: print(f"Answer ID: {i.id}.") print(f"Answer: {i.answer}.") print(f"Answer score: {i.score}.") # </GenerateAnswer> # <Main> # <AuthorizationAuthor> client = QnAMakerClient(endpoint=endpoint, credentials=CognitiveServicesCredentials(subscription_key)) # </AuthorizationAuthor> kb_id = create_kb(client=client) update_kb (client=client, kb_id=kb_id) publish_kb (client=client, kb_id=kb_id) download_kb (client=client, kb_id=kb_id) generate_answer(client=client, kb_id=kb_id) delete_kb (client=client, kb_id=kb_id) # </Main>
def addinfo(): if request.method == 'POST' and request.files and int( request.form.get('type')) == 2: imagex = request.files["image"] imagex.save(os.path.join(app.config["IMG_MEDICINES"], imagex.filename)) KEY = '2698a28d0b3a47be9a0177011b4fca38' ENDPOINT = 'https://hackcovid.cognitiveservices.azure.com/' face_client = FaceClient(ENDPOINT, CognitiveServicesCredentials(KEY)) test_image_array = glob.glob( os.path.join(app.config["IMG_MEDICINES"], imagex.filename)) image = open(test_image_array[0], 'r+b') # Detect faces face_ids = [] faces = face_client.face.detect_with_stream(image) for face in faces: face_ids.append(face.face_id) print(face_ids) # Identify faces results = None if face_ids: results = face_client.face.identify(face_ids, personGroupId) print('Identifying faces in {}'.format(os.path.basename( image.name))) if not results: print( 'No person identified in the person group for faces from {}.'. format(os.path.basename(image.name))) return jsonify({"error": "none"}) return res = 0 for person in results: print( 'Person for face ID {} is identified in {} with a confidence of {}.' .format(person.face_id, os.path.basename(image.name), person.candidates[0].confidence )) # Get topmost confidence score print(person.candidates[0].person_id) res = person.candidates[0].person_id print(res) connect = sqlite3.connect("Face-DataBase") c = connect.cursor() c.execute("SELECT * FROM Students WHERE personID = ?", (res, )) row = c.fetchone() print(row[1] + " recognized") c = connect.cursor() c.execute("SELECT * FROM Students WHERE personID = ?", (res, )) row = c.fetchone() publickey = row[4] ganache_url = "http://ec2-54-175-197-129.compute-1.amazonaws.com:8545" web3 = Web3(Web3.HTTPProvider(ganache_url)) abi = json.loads( '[{"constant":false,"inputs":[{"name":"medicine","type":"string"}],"name":"addmedicines","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"stat","type":"int256"}],"name":"updatestatus","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"getmedicines","outputs":[{"name":"","type":"string"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"getstatus","outputs":[{"name":"","type":"int256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"name":"","type":"address"}],"name":"medicines","outputs":[{"name":"","type":"string"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"name":"","type":"address"}],"name":"status","outputs":[{"name":"","type":"int256"}],"payable":false,"stateMutability":"view","type":"function"}]' ) address = web3.toChecksumAddress( "0xC70003Ef71fdEa97A785183615B871A99cFB73b7") contract = web3.eth.contract(address=address, abi=abi) web3.eth.defaultAccount = publickey medicines = request.form.get('medicines') contract.functions.addmedicines(medicines).transact() print(request.form.get('status')) status = int(request.form.get('status')) contract.functions.updatestatus(status).transact() return jsonify({"status": 'success'})
def payment(): if request.method == 'POST' and request.files: imagex = request.files["image"] imagex.save(os.path.join(app.config["IMAGE_UPLOADS"], imagex.filename)) KEY = '2698a28d0b3a47be9a0177011b4fca38' ENDPOINT = 'https://hackcovid.cognitiveservices.azure.com/' # Replace with your regional Base URL face_client = FaceClient(ENDPOINT, CognitiveServicesCredentials(KEY)) test_image_array = glob.glob( os.path.join(app.config["IMAGE_UPLOADS"], imagex.filename)) image = open(test_image_array[0], 'r+b') # Detect faces face_ids = [] faces = face_client.face.detect_with_stream(image) for face in faces: face_ids.append(face.face_id) print(face_ids) # Identify faces results = face_client.face.identify(face_ids, personGroupId) print('Identifying faces in {}'.format(os.path.basename(image.name))) if not results: print( 'No person identified in the person group for faces from {}.'. format(os.path.basename(image.name))) res = 0 for person in results: print( 'Person for face ID {} is identified in {} with a confidence of {}.' .format(person.face_id, os.path.basename(image.name), person.candidates[0].confidence )) # Get topmost confidence score print(person.candidates[0].person_id) res = person.candidates[0].person_id print(res) connect = sqlite3.connect("Face-DataBase") c = connect.cursor() c.execute("SELECT * FROM Students WHERE personID = ?", (res, )) row = c.fetchone() print(row[1] + " recognized") c = connect.cursor() c.execute("SELECT * FROM Students WHERE personID = ?", (res, )) row = c.fetchone() account_1 = request.form.get('publickey') # account_1 = account_1[1:-1] account_2 = row[4] private_key = request.form.get('privatekey') # private_key = private_key[1:-1] print('private_key', private_key) amount = float(request.form.get('amount')) print(amount) connect.commit() connect.close() ganache_url = "http://ec2-54-175-197-129.compute-1.amazonaws.com:8545" web3 = Web3(Web3.HTTPProvider(ganache_url)) web3.eth.defaultAccount = account_1 nonce = web3.eth.getTransactionCount(account_1) if int(web3.eth.getBalance(account_1)) < int(amount * (10**18)): return jsonify({"status": "error"}) print(nonce) tx = { 'nonce': nonce, 'to': account_2, 'value': web3.toWei(amount, 'ether'), 'gas': 2000000, 'gasPrice': web3.toWei('50', 'gwei'), } signed_tx = web3.eth.account.signTransaction(tx, private_key) tx_hash = web3.eth.sendRawTransaction(signed_tx.rawTransaction) print(web3.toHex(tx_hash)) return jsonify({"status": "success", "tx_hash": web3.toHex(tx_hash)}) else: return render_template('index.html')
def result_types_lookup(subscription_key): """WebSearchResultTypesLookup. This will look up a single query (Xbox) and print out name and url for first web, image, news and videos results. """ client = WebSearchAPI(CognitiveServicesCredentials(subscription_key)) try: web_data = client.web.search(query="xbox") print("Searched for Query# \" Xbox \"") # WebPages if web_data.web_pages.value: print("Webpage Results#{}".format(len(web_data.web_pages.value))) first_web_page = web_data.web_pages.value[0] print("First web page name: {} ".format(first_web_page.name)) print("First web page URL: {} ".format(first_web_page.url)) else: print("Didn't see any Web data..") # Images if web_data.images.value: print("Image Results#{}".format(len(web_data.images.value))) first_image = web_data.images.value[0] print("First Image name: {} ".format(first_image.name)) print("First Image URL: {} ".format(first_image.url)) else: print("Didn't see any Image..") # News if web_data.news.value: print("News Results#{}".format(len(web_data.news.value))) first_news = web_data.news.value[0] print("First News name: {} ".format(first_news.name)) print("First News URL: {} ".format(first_news.url)) else: print("Didn't see any News..") # Videos if web_data.videos.value: print("Videos Results#{}".format(len(web_data.videos.value))) first_video = web_data.videos.value[0] print("First Videos name: {} ".format(first_video.name)) print("First Videos URL: {} ".format(first_video.url)) else: print("Didn't see any Videos..") except Exception as err: print("Encountered exception. {}".format(err))
load_dotenv() key_var_name = 'PERSONALIZER_RESOURCE_KEY' # if not key_var_name in os.environ: # raise Exception('Please set/export the environment variable: {}'.format(key_var_name)) personalizer_key = os.getenv(key_var_name) # Replace <your-resource-name>: https://<your-resource-name>.api.cognitive.microsoft.com/ endpoint_var_name = 'PERSONALIZER_RESOURCE_ENDPOINT' # if not endpoint_var_name in os.environ: # raise Exception('Please set/export the environment variable: {}'.format(endpoint_var_name)) personalizer_endpoint = os.getenv(endpoint_var_name) # Instantiate a Personalizer client client = PersonalizerClient(personalizer_endpoint, CognitiveServicesCredentials(personalizer_key)) def get_actions(): action1 = RankableAction(id='pasta', features=[{ "taste": "salty", "spice_level": "medium" }, { "nutrition_level": 5, "cuisine": "italian" }]) action2 = RankableAction(id='ice cream', features=[{ "taste": "sweet", "spice_level": "none"
"\nPlease set the COMPUTERVISION_API_KEY environment variable.\n**Note that you might need to restart your shell or IDE.**" ) sys.exit() if 'COMPUTERVISION_REGION' in os.environ: computervision_region = os.environ['COMPUTERVISION_REGION'] else: print( "\nPlease set the COMPUTERVISION_REGION environment variable.\n**Note that you might need to restart your shell or IDE.**" ) sys.exit() endpoint_url = "https://" + computervision_region + ".api.cognitive.microsoft.com" computervision_client = ComputerVisionClient( endpoint_url, CognitiveServicesCredentials(computervision_api_key)) # END - Configure the Computer Vision client # Get a local image for analysis local_image_path = "resources\\faces.jpg" print("\n\nLocal image path:\n" + os.getcwd() + local_image_path) # END - Get a local image for analysis # Describe a local image by: # 1. Opening the binary file for reading. # 2. Defining what to extract from the image by initializing an array of VisualFeatureTypes. # 3. Calling the Computer Vision service's analyze_image_in_stream with the: # - image # - features to extract # 4. Displaying the image captions and their confidence values. local_image = open(local_image_path, "rb")
Services are not combined here, but could be potentially. Install the Computer Vision SDK from a command prompt or IDE terminal: pip install azure-cognitiveservices-vision-computervision ''' # URL image query_image_url = "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/ComputerVision/Images/faces.jpg" subscription_key = 'PASTE_YOUR_COMPUTER_VISION_SUBSCRIPTION_KEY_HERE' endpoint = 'PASTE_YOUR_COMPUTER_VISION_ENDPOINT_HERE' ''' Authenticate a client. ''' computer_vision_client = ComputerVisionClient( endpoint, CognitiveServicesCredentials(subscription_key)) ''' Computer Vision This example uses the API calls: analyze_image() and describe_image() ''' print() print("===== Computer Vision =====") # Select the visual feature(s) you want. image_features = ["faces"] # Call the API with detect faces feature, returns an ImageAnalysis which has a list[FaceDescription] detected_faces = computer_vision_client.analyze_image(query_image_url, image_features) # Print the results with age and bounding box print("Face age and location in the image: ")
from azure.cognitiveservices.vision.computervision import ComputerVisionClient from azure.cognitiveservices.vision.computervision.models import ComputerVisionErrorException # Create the Custom Vision project from azure.cognitiveservices.vision.customvision.training import CustomVisionTrainingClient from azure.cognitiveservices.vision.customvision.training.models import ImageFileCreateBatch, ImageFileCreateEntry from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient from msrest.authentication import ApiKeyCredentials # Custom vision endpoint custom_vision_endpoint = "https://eastus.api.cognitive.microsoft.com/" # Custom Vision project name publish_iteration_name = "classifyModel" vision_credentials = CognitiveServicesCredentials(COGSVCS_KEY) vision_client = ComputerVisionClient(COGSVCS_CLIENTURL, vision_credentials) person_group_id = 'reactor' # Num retries for processRequest() function _maxNumRetries = 10 # General headers headers = { 'Content-Type': 'application/json', 'Ocp-Apim-Subscription-Key': _key } #---------------------------------------------------------------------------------------------------------------------# # Endpoint dictionaries
def authenticateClient(): credentials = CognitiveServicesCredentials(subscription_key) text_analytics_client = TextAnalyticsClient(endpoint=endpoint, credentials=credentials) return text_analytics_client
def booking_app(subscription_kuy): """Authoring. This will create a LUIS Booking application, train and publish it. """ client = LUISAuthoringClient( 'https://westus.api.cognitive.microsoft.com', CognitiveServicesCredentials(subscription_key), ) try: # Create a LUIS app default_app_name = "Contoso-{}".format(datetime.datetime.now()) version_id = "0.1" print("Creating App {}, version {}".format(default_app_name, version_id)) app_id = client.apps.add({ 'name': default_app_name, 'initial_version_id': version_id, 'description': "New App created with LUIS Python sample", 'culture': 'en-us', }) print("Created app {}".format(app_id)) # Add information into the model print("\nWe'll create two new entities.") print("The \"Destination\" simple entity will hold the flight destination.") print("The \"Class\" hierarchical entity will accept \"First\", \"Business\" and \"Economy\" values.") destination_name = "Destination" destination_id = client.model.add_entity( app_id, version_id, destination_name ) print("{} simple entity created with id {}".format( destination_name, destination_id )) class_name = "Class" class_id = client.model.add_hierarchical_entity( app_id, version_id, name=class_name, children=["First", "Business", "Economy"] ) print("{} hierarchical entity created with id {}".format( class_name, class_id )) print("\nWe'll now create the \"Flight\" composite entity including \"Class\" and \"Destination\".") flight_name = "Flight" flight_id = client.model.add_composite_entity( app_id, version_id, name=flight_name, children=[class_name, destination_name] ) print("{} composite entity created with id {}".format( flight_name, flight_id )) find_economy_to_madrid = "find flights in economy to Madrid" find_first_to_london = "find flights to London in first class" print("\nWe'll create a new \"FindFlights\" intent including the following utterances:") print(" - "+find_economy_to_madrid) print(" - "+find_first_to_london) intent_name = "FindFlights" intent_id = client.model.add_intent( app_id, version_id, intent_name ) print("{} intent created with id {}".format( intent_name, intent_id )) def get_example_label(utterance, entity_name, value): """Build a EntityLabelObject. This will find the "value" start/end index in "utterance", and assign it to "entity name" """ utterance = utterance.lower() value = value.lower() return { 'entity_name': entity_name, 'start_char_index': utterance.find(value), 'end_char_index': utterance.find(value) + len(value) } utterances = [{ 'text': find_economy_to_madrid, 'intent_name': intent_name, 'entity_labels':[ get_example_label(find_economy_to_madrid, "Flight", "economy to madrid"), get_example_label(find_economy_to_madrid, "Destination", "Madrid"), get_example_label(find_economy_to_madrid, "Class", "economy"), ] }, { 'text': find_first_to_london, 'intent_name': intent_name, 'entity_labels':[ get_example_label(find_first_to_london, "Flight", "London in first class"), get_example_label(find_first_to_london, "Destination", "London"), get_example_label(find_first_to_london, "Class", "first"), ] }] utterances_result = client.examples.batch( app_id, version_id, utterances ) print("\nUtterances added to the {} intent".format(intent_name)) # Training the model print("\nWe'll start training your app...") async_training = client.train.train_version(app_id, version_id) is_trained = async_training.status == "UpToDate" trained_status = ["UpToDate", "Success"] while not is_trained: time.sleep(1) status = client.train.get_status(app_id, version_id) is_trained = all(m.details.status in trained_status for m in status) print("Your app is trained. You can now go to the LUIS portal and test it!") # Publish the app print("\nWe'll start publishing your app...") publish_result = client.apps.publish( app_id, { 'version_id': version_id, 'is_staging': False, 'region': 'westus' } ) endpoint = publish_result.endpoint_url + "?subscription-key=" + subscription_key + "&q=" print("Your app is published. You can now go to test it on\n{}".format(endpoint)) except Exception as err: print("Encountered exception. {}".format(err))
def create_cv_client(): # Instantiate a client with your endpoint and key # Create a CognitiveServicesCredentials object with your key, and use it with your endpoint to create a Co > computervision_client = ComputerVisionClient( endpoint, CognitiveServicesCredentials(subscription_key)) return computervision_client
def management(subscription_key): """Managing This will show how to manage your LUIS applications. """ client = LUISAuthoringClient( 'https://westus.api.cognitive.microsoft.com', CognitiveServicesCredentials(subscription_key), ) try: # Create a LUIS app default_app_name = "Contoso-{}".format(datetime.datetime.now()) version_id = "0.1" print("Creating App {}, version {}".format(default_app_name, version_id)) app_id = client.apps.add({ 'name': default_app_name, 'initial_version_id': version_id, 'description': "New App created with LUIS Python sample", 'culture': 'en-us', }) print("Created app {}".format(app_id)) # Listing app print("\nList all apps") for app in client.apps.list(): print("\t->App: '{}'".format(app.name)) # Cloning a version print("\nCloning version 0.1 into 0.2") client.versions.clone( app_id, "0.1", # Source "0.2" # New version name ) print("Your app version has been cloned.") # Export the version print("\nExport version 0.2 as JSON") luis_app = client.versions.export( app_id, "0.2" ) luis_app_as_json = json.dumps(luis_app.serialize()) # You can now save this JSON string as a file # Import the version print("\nImport previously exported version as 0.3") luis_app client.versions.import_method( app_id, json.loads(luis_app_as_json), "0.3" ) # Listing versions print("\nList all versions in this app") for version in client.versions.list(app_id): print("\t->Version: '{}', training status: {}".format(version.version, version.training_status)) # Print app details print("\nPrint app '{}' details".format(default_app_name)) details = client.apps.get(app_id) pprint(details.as_dict()) # as_dict "dictify" the object, by default it's attribute based. e.g. details.name # Print version details print("\nPrint version '{}' details".format(version_id)) details = client.versions.get(app_id, version_id) pprint(details.as_dict()) # as_dict "dictify" the object, by default it's attribute based. e.g. details.name # Delete an app print("\nDelete app '{}'".format(default_app_name)) client.apps.delete(app_id) print("App deleted!") except Exception as err: print("Encountered exception. {}".format(err))
def __init__(self): subscription_key = "fea767db97b44733bc86bfe710f80be6" self.client = WebSearchAPI( CognitiveServicesCredentials(subscription_key))
def initFaceClient(): KEY = '5250cc4496a74ceca28563b13b083a01' ENDPOINT = 'https://westeurope.api.cognitive.microsoft.com/' return FaceClient(ENDPOINT, CognitiveServicesCredentials(KEY))
from PIL import Image, ImageDraw, ImageFont import time app = Flask(__name__) try: with open('/home/config.json', 'r') as f: CONFIG = json.load(f) f.close() SUBSCRIPTION_KEY = CONFIG['azure']['subscription_key'] ENDPOINT = CONFIG['azure']['endpoint'] FACE_KEY = CONFIG['azure']['face_key'] FACE_END = CONFIG['azure']['face_end'] FACE_CLIENT = FaceClient(FACE_END, CognitiveServicesCredentials(FACE_KEY)) PERSON_GROUP_ID = "elsiegroup" LINE_SECRET = CONFIG['line']['line_secret'] LINE_TOKEN = CONFIG['line']['line_token'] IMGUR_CONFIG = CONFIG['imgur'] except FileNotFoundError: SUBSCRIPTION_KEY = os.getenv('SUBSCRIPTION_KEY') ENDPOINT = os.getenv('ENDPOINT') FACE_KEY = os.getenv('FACE_KEY') FACE_END = os.getenv('FACE_END') LINE_SECRET = os.getenv('LINE_SECRET') LINE_TOKEN = os.getenv('LINE_TOKEN') IMGUR_CONFIG = { "client_id": os.getenv('IMGUR_ID'),
def quickstart(): # <VariablesYouChange> authoringKey = 'YourAuthoringKey' authoringEndpoint = 'YourAuthoringEndpoint' predictionKey = 'YourAuthoringKey' predictionEndpoint = 'YourPredictionEndpoint' # </VariablesYouChange> # <VariablesYouDontNeedToChangeChange> appName = "Contoso Pizza Company" versionId = "0.1" intentName = "OrderPizzaIntent" # </VariablesYouDontNeedToChangeChange> # <AuthoringCreateClient> client = LUISAuthoringClient(authoringEndpoint, CognitiveServicesCredentials(authoringKey)) # </AuthoringCreateClient> # Create app app_id = create_app(client, appName, versionId) # <AddIntent> client.model.add_intent(app_id, versionId, intentName) # </AddIntent> # Add Entities add_entities(client, app_id, versionId) # Add labeled examples add_labeled_examples(client, app_id, versionId, intentName) # <TrainAppVersion> client.train.train_version(app_id, versionId) waiting = True while waiting: info = client.train.get_status(app_id, versionId) # get_status returns a list of training statuses, one for each model. Loop through them and make sure all are # done. waiting = any( map( lambda x: 'Queued' == x.details.status or 'InProgress' == x. details.status, info)) if waiting: print("Waiting 10 seconds for training to complete...") time.sleep(10) else: print("trained") waiting = False # </TrainAppVersion> # <PublishVersion> client.apps.publish(app_id, versionId, is_staging=False) # </PublishVersion> # <PredictionCreateClient> runtimeCredentials = CognitiveServicesCredentials(predictionKey) clientRuntime = LUISRuntimeClient(endpoint=predictionEndpoint, credentials=runtimeCredentials) # </PredictionCreateClient> # <QueryPredictionEndpoint> # Production == slot name predictionRequest = { "query": "I want two small pepperoni pizzas with more salsa" } predictionResponse = clientRuntime.prediction.get_slot_prediction( app_id, "Production", predictionRequest) print("Top intent: {}".format(predictionResponse.prediction.top_intent)) print("Sentiment: {}".format(predictionResponse.prediction.sentiment)) print("Intents: ") for intent in predictionResponse.prediction.intents: print("\t{}".format(json.dumps(intent))) print("Entities: {}".format(predictionResponse.prediction.entities))
This example, using the general Cognitive Services key/endpoint. It's used when you want to combine many Cognitive Services with just one authentication key/endpoint. Services are not combined here, but could be potentially. Install the Text Analytics SDK from a command prompt or IDE terminal: pip install --upgrade azure-cognitiveservices-language-textanalytics ''' # Add your Cognitive Services key and endpoint to your environment variables. subscription_key = os.environ['COGNITIVE_SERVICES_SUBSCRIPTION_KEY'] endpoint = os.environ['COGNITIVE_SERVICES_ENDPOINT'] ''' AUTHENTICATE Create a Text Analytics client. ''' credentials = CognitiveServicesCredentials(subscription_key) text_analytics_client = TextAnalyticsClient(endpoint=endpoint, credentials=credentials) ''' TEXT ANALYTICS Gets the sentiment value of a body of text. Values closer to zero (0.0) indicate a negative sentiment, while values closer to one (1.0) indicate a positive sentiment. ''' try: documents = [{ "id": "1", "language": "en", "text": "I had the best day of my life." }, { "id": "2",
dict(Authorization=authHeaderValue)) for i in listSearchResults.answers: print(f"Answer ID: {i.id}.") print(f"Answer: {i.answer}.") print(f"Answer score: {i.score}.") # </GenerateAnswer> # <Main> # <AuthorizationAuthor> client = QnAMakerClient( endpoint=authoring_endpoint, credentials=CognitiveServicesCredentials(subscription_key)) # </AuthorizationAuthor> kb_id = create_kb(client=client) update_kb(client=client, kb_id=kb_id) publish_kb(client=client, kb_id=kb_id) download_kb(client=client, kb_id=kb_id) queryRuntimeKey = getEndpointKeys_kb(client=client) # <AuthorizationQuery> runtimeClient = QnAMakerRuntimeClient( runtime_endpoint=runtime_endpoint, credentials=CognitiveServicesCredentials(queryRuntimeKey)) # </AuthorizationQuery>