Example #1
0
def request_priv_info():
    PRIVATE_FILE = "private.json"

    path = os.path.join(os.getcwd(), PRIVATE_FILE)

    key, endpoint = get_private(path, "Text Analytics")

    return key, endpoint
Example #2
0
if ver(azver) < ver("0.6.0"):
    sys.exit(
        f"""*** WARNING *** Currently you have installed version {azver} of the
Azure Cognitives Services Computer Vision library. This might have
been installed automatically as part of the *configure* of the
package. Some incompatible changes have emerged in recent
upgrades. Please upgrade to the latest version of that library using:

    pip3 install --upgrade azure-cognitiveservices-vision-computervision
""")

# ----------------------------------------------------------------------
# Request subscription key and endpoint from user.
# ----------------------------------------------------------------------
key, endpoint = get_private()

mlask()

# Set credentials.

credentials = CognitiveServicesCredentials(key)

# Create client.

client = ComputerVisionClient(endpoint, credentials)

url0 = "https://upload.wikimedia.org/"
url1 = "wikipedia/commons/thumb/1/12/Broadway_and_Times_Square_by_night.jpg/"
url2 = "450px-Broadway_and_Times_Square_by_night.jpg"
url = url0 + url1 + url2
Example #3
0
from translate import translate_speech_to_text
import azure.cognitiveservices.speech as speechsdk
import os
import sys

mlcat("Speech Services", """\
Welcome to a demo of the pre-built models for Speech provided
through Azure's Cognitive Services. The Speech cloud service
supports speech to text, text to speech, speech translation and
Speaker Recognition capabilities.
""")

# ----------------------------------------------------------------------
# Request subscription key and location from user.
# ----------------------------------------------------------------------
key, location = get_private()

# Recognition is experimental and is only available at present
# 20210428 from the westus data centre.

RECOGNISE_FLAG = (location == "westus")

# -----------------------------------------------------------------------
# Set up a speech recognizer and synthesizer.
# -----------------------------------------------------------------------

# Following is the code that does the actual work, creating an
# instance of a speech config with the specified subscription key and
# service region, then creating a recognizer with the given settings,
# and then performing recognition. recognize_once() returns when the
# first utterance has been recognized, so it is suitable only for
Example #4
0
# ----------------------------------------------------------------------
# Parse command line arguments
# ----------------------------------------------------------------------

option_parser = argparse.ArgumentParser(add_help=False)

option_parser.add_argument('path', help='path or url to image')

args = option_parser.parse_args()

# ----------------------------------------------------------------------
# Request subscription key and endpoint from user.
# ----------------------------------------------------------------------

subscription_key, endpoint = get_private()

# Set credentials.

credentials = CognitiveServicesCredentials(subscription_key)

# Create client.

client = ComputerVisionClient(endpoint, credentials)

# Check the URL supplied. Also want to support local file.

# Send image to azure to identify landmark

# url = "https://images.pexels.com/photos/338515/pexels-photo-338515.jpeg"
Example #5
0
        error = response["errorDetails"]
        errors = " ".join(error)
        if "query: This parameter is missing or invalid." in errors:
            sys.exit("The address parameter is required. ")
        elif "Access was denied" in errors:
            sys.exit(errors +
                     "\nPlease run 'ml configure bing' to update the key. ")
        else:
            sys.exit(errors)

    return result


if __name__ == "__main__":

    key = get_private()[0]

    # Private file stores the Bing Maps key required by the geocoding
    # function.

    parser = argparse.ArgumentParser(description='Bing Maps')

    parser.add_argument('address',
                        type=str,
                        nargs='*',
                        help='location to geocode')

    parser.add_argument('--neighbourhood',
                        '-n',
                        action="store_true",
                        help='include neighbourhood of the address.')
Example #6
0
        js = intent_result.intent_json
        js = json.loads(js)
        score = js["topScoringIntent"]["score"]
        entities = ""
        sep = ""
        for item in js["entities"]:
            entities += sep + item["entity"]
            sep = ", "

        print("Recognized: \"{}\" with intent id `{}`. The score: {}, and entities: {}".
              format(intent_result.text, intent_result.intent_id, str(score), entities))
    elif intent_result.reason == speechsdk.ResultReason.RecognizedSpeech:
        print("Recognized: {}".format(intent_result.text))
    elif intent_result.reason == speechsdk.ResultReason.NoMatch:
        print("No speech could be recognized: {}".format(intent_result.no_match_details))
    elif intent_result.reason == speechsdk.ResultReason.Canceled:
        print("Intent recognition canceled: {}".format(intent_result.cancellation_details.reason))
        if intent_result.cancellation_details.reason == speechsdk.CancellationReason.Error:
            print("Error details: {}".format(intent_result.cancellation_details.error_details))


if __name__ == "__main__":
    # ----------------------------------------------------------------------
    # Request subscription key and location from user.
    # ----------------------------------------------------------------------

    key, location, location, app_id = get_private()

    intent_config = speechsdk.SpeechConfig(subscription=key, region=location)
    intent(intent_config, app_id)
Example #7
0
an operation to be performed on specific entities.
""")

mlask(end="\n")

# Import the required libraries.

import sys
import requests
from time import sleep
from mlhub.pkg import get_private

# ----------------------------------------------------------------------
# Request subscription key, endpoint and App ID from user.
# ----------------------------------------------------------------------
subscription_key, endpoint, location, id = get_private()

mlcat(
    "", """\
LUIS includes a set of prebuilt intents from a number of prebuilt domains
for quickly adding common intents and utterances to conversational client
apps. These include Camera, Music, HomeAutomation, and many more. We will
begin with a demonstration of Home Automation. Do note that typically you
will need to train the LUIS model with your speceific intents.

Below we will demonstrate a series of commands and identify the intent and
the entities, together with a confidence score.
""")
mlask()

headers = {'Ocp-Apim-Subscription-Key': subscription_key}
Example #8
0
    option_parser.add_argument('--original',
                               "-f",
                               default="en-US",
                               help='original language')

    option_parser.add_argument('--target', "-t", help='target language')

    option_parser.add_argument(
        '--output',
        "-o",
        help='path to an audio file to save. The file type should be wav')

    args = option_parser.parse_args()

    from_language = args.original
    to_language = args.target

    if args.original:
        pass
    else:
        args.original = "en-US"

    # ----------------------------------------------------------------------
    # Request subscription key and location from user.
    # ----------------------------------------------------------------------

    key, region = get_private()

    translate_speech_to_text(from_language, to_language, True, args.output,
                             key, region)