Пример #1
0
    sys.exit(
        f"""*** WARNING *** Currently you have installed version {azver} of the
Azure Cognitives Services Computer Vision library. This might have
been installed automatically as part of the *configure* of the
package. Some incompatible changes have emerged in recent
upgrades. Please upgrade to the latest version of that library using:

    pip3 install --upgrade azure-cognitiveservices-vision-computervision
""")

# ----------------------------------------------------------------------
# Request subscription key and endpoint from user.
# ----------------------------------------------------------------------
key, endpoint = get_private()

mlask()

# Set credentials.

credentials = CognitiveServicesCredentials(key)

# Create client.

client = ComputerVisionClient(endpoint, credentials)

url0 = "https://upload.wikimedia.org/"
url1 = "wikipedia/commons/thumb/1/12/Broadway_and_Times_Square_by_night.jpg/"
url2 = "450px-Broadway_and_Times_Square_by_night.jpg"
url = url0 + url1 + url2

mlcat(
Пример #2
0
# Constants.

SERVICE = "Anomaly Detector"
KEY_FILE = os.path.join(os.getcwd(), "private.txt")
DATA_FILE = "request.json"

# URLs for anomaly detection with the Anomaly Detector API.

BATCH_URL = "anomalydetector/v1.0/timeseries/entire/detect"
LATEST_URL = "anomalydetector/v1.0/timeseries/last/detect"

# Request subscription key and endpoint from user.

subscription_key, endpoint = azkey(KEY_FILE, SERVICE)

mlask()

# Read data from a json time series from file.

file_handler = open(DATA_FILE)
data = json.load(file_handler)
series = data['series']
sensitivity = data['sensitivity']

mlcat("Sample Data",
      """\
The dataset contains {} {} observations recording the number of requests
received for a particular service. It is quite a small dataset used to 
illustrate the concepts. Below we see sample observations from the dataset.
""".format(len(series), data['granularity']),
      begin="\n")
Пример #3
0
    model.load_state_dict(torch.load(mfile,
                                     map_location=torch.device('cpu')))
except Exception:
    print("No model found. Bad model file or model not yet trained.")
    print(f"Tried loading '{mfile}'.")
    exit()

samples = ["Here's to having a glorious day.",
           "That was a horrible meal.",
           "The chef should be sacked.",
           "Hi there, hope you are well.",
           "The sun has already risen."]

for text in samples:

    mlask(end="\n")

    mlcat(f"{text}", """\
Passing the above text on to the pre-built model to determine
sentiment identifies the sentiment as being:
""")

    encoded_tweet = tokenizer.encode_plus(
        text,
        max_length=MAX_LEN,
        add_special_tokens=True,
        return_token_type_ids=False,
        pad_to_max_length=True,
        return_attention_mask=True,
        return_tensors='pt',
        truncation=True
Пример #4
0
# and then performing recognition. recognize_once() returns when the
# first utterance has been recognized, so it is suitable only for
# single shot recognition like command or query. For long-running
# recognition, use start_continuous_recognition() instead, or if you
# want to run recognition in a non-blocking manner, use
# recognize_once_async().

speech_config = speechsdk.SpeechConfig(subscription=key, region=location)
speech_recognizer = speechsdk.SpeechRecognizer(speech_config=speech_config)
speech_synthesizer = speechsdk.SpeechSynthesizer(speech_config=speech_config)

# -----------------------------------------------------------------------
# Transcribe some spoken words.
# -----------------------------------------------------------------------

mlask(end="\n")

mlcat("Speech to Text", """\
The TRANSCRIBE command can take spoken audio, from the microphone
for example, and transcribe it into text.
""")

mlask(end=True, prompt="Press Enter and then say something")

result = speech_recognizer.recognize_once()

if result.reason == speechsdk.ResultReason.RecognizedSpeech:
    print("Recognized: {}".format(result.text))
elif result.reason == speechsdk.ResultReason.NoMatch:
    print("No speech could be recognized: {}".format(result.no_match_details))
elif result.reason == speechsdk.ResultReason.Canceled:
Пример #5
0
0-9, 10-19, 20-29, 30-39, 40-49, 50-59, 60-69, and 70+.  One common
way to summarise this kind of data is with a histogram.  That is, to
report the number of patients that were classified as belonging to
each age group.

For this demonstration we will create a histogram for the actual data
and then a histogram for differentially private data.

The data is first loaded from a csv file. It simply consists of two
columns, the first is the date and the scond is the age group.""")

# Read the raw data.

data = pd.read_csv("pcr_testing_age_group_2020-03-09.csv")

mlask(True, True)

# Compute the exact query responses.

exact_counts = data["age_group"].value_counts().sort_index()
values = exact_counts.values

mlcat("Data Sample", """\
Here's a random sample of some of the records:
""")

print(data.sample(10))

mlask(True, True)

mlcat("Laplace Mechanism", """\
Пример #6
0
# ----------------------------------------------------------------------
# Setup
# ----------------------------------------------------------------------

# Import the required libraries.

import re
import easyocr
import urllib

reader = easyocr.Reader(['ch_sim', 'en'])

path = "https://upload.wikimedia.org/wikipedia/commons/thumb/0/06/Toronto_-_ON_-_Cecil_Street.jpg/1200px-Toronto_-_ON_-_Cecil_Street.jpg"

mlpreview(path)
mlask(end="\n")

mlcat(
    "Apply Models", """\
A detection model and recognition models are now being loaded and applied to the image.
The results will be displayed on each line, consisting of the certainty of the result,
the bounding boxe of the text, and the text identified.
""")
with urllib.request.urlopen(path) as url:
    img = url.read()
    result = reader.readtext(img)

for r in result:
    bb = re.sub("[,\[\]]", "", " ".join(map(str, r[0])))
    print(f'{round(r[2],2)},{bb},{r[1]}')
Пример #7
0
#
# This demo is based on the Azure Cognitive Services Translator Quick Starts
#
# https://github.com/MicrosoftTranslator/Text-Translation-API-V3-Python
#

from mlhub.pkg import mlask, mlcat, get_private

mlcat(
    "Azure Text Translation", """\
Welcome to a demo of the pre-built models for Text Translation provided
through Azure's Cognitive Services. This service translates text between
multiple languages.
""")

mlask(end='\n')
# ----------------------------------------------------------------------
# Setup
# ----------------------------------------------------------------------

# Import the required libraries.

import sys
import requests

# ----------------------------------------------------------------------
# Request subscription key and location from user.
# ----------------------------------------------------------------------

key, location = get_private()
Пример #8
0
def main():
    # -----------------------------------------------------------------------
    # Load pre-built models and samples
    # -----------------------------------------------------------------------

    scorer = os.path.join(os.getcwd(), "deepspeech-0.9.3-models.scorer")
    model = os.path.join(os.getcwd(), "deepspeech-0.9.3-models.pbmm")
    audio = os.path.join(os.getcwd(), "audio-0.9.3.tar.gz")

    tar = tarfile.open(audio, "r:gz")
    tar.extractall()
    tar.close()

    audio_path = os.path.join(os.getcwd(), "audio")

    audio_file_list = []

    for filename in os.listdir(audio_path):
        if not filename.startswith(".") and filename.endswith("wav"):
            audio_file_list.append(
                os.path.join(os.getcwd(), "audio/" + filename))

    audio_file_list = sorted(audio_file_list)

    mlcat(
        "Deepspeech", """\
Welcome to a demo of Mozilla's Deepspeech pre-built model for speech to text.
This model is trained by machine learning techniques based on Baidu's Deep
Speech research paper (https://arxiv.org/abs/1412.5567), and implemented by
Mozilla. In this demo we will play a number of audio files and then
transcribe them to text using model.
""")
    mlask(end="\n")

    msg = """\
The audio will be played and if you listen carefully you should hear:

	"""

    # -----------------------------------------------------------------------
    # First audio
    # -----------------------------------------------------------------------

    mlcat("Audio Example 1", msg + "\"Experience proves this.\"")
    mlask(begin="\n", end="\n")
    os.system(f'aplay {audio_file_list[0]} >/dev/null 2>&1')
    mlask(end="\n", prompt="Press Enter to transcribe this audio")
    ds, desired_sample_rate = utils.load(model, scorer, True, "", "", "", "")
    utils.deepspeech(ds, desired_sample_rate, audio_file_list[0], "demo", True,
                     "", "", "")
    mlask(end="\n")

    # -----------------------------------------------------------------------
    # Second audio
    # -----------------------------------------------------------------------

    mlcat("Audio Example 2", msg + "\"Why should one halt on the way?\"")
    mlask(begin="\n", end="\n")
    os.system(f'aplay {audio_file_list[1]} >/dev/null 2>&1')
    mlask(end="\n", prompt="Press Enter to transcribe this audio")
    utils.deepspeech(ds, desired_sample_rate, audio_file_list[1], "demo", True,
                     "", "", "")
    mlask(end="\n")

    # -----------------------------------------------------------------------
    # Third audio
    # -----------------------------------------------------------------------

    mlcat("Audio Example 3", msg + "\"Your power is sufficient I said.\"")
    mlask(begin="\n", end="\n")
    os.system(f'aplay {audio_file_list[2]} >/dev/null 2>&1')
    mlask(end="\n", prompt="Press Enter to transcribe this audio")
    utils.deepspeech(ds, desired_sample_rate, audio_file_list[2], "demo", True,
                     "", "", "")