コード例 #1
0
ファイル: utils.py プロジェクト: isabella232/azface
def get_abspath(path):
    """Return the absolute path of <path>.

    Because the working directory of MLHUB model is ~/.mlhub/<model>,
    when user run 'ml score facematch <image-path>', the <image-path> may be a
    path relative to the path where 'ml score facematch' is typed, to cope with
    this scenario, mlhub provides mlhub.utils.get_cmd_cwd() to obtain this path.
    """

    path = os.path.expanduser(path)
    if not os.path.isabs(path):
        path = os.path.join(mlutils.get_cmd_cwd(), path)

    return os.path.abspath(path)
コード例 #2
0
# Create client.

client = ComputerVisionClient(endpoint, credentials)

# Check the URL supplied or path exists and is an image.

# Send provided image (url or path) to azure to extract text.

# ----------------------------------------------------------------------
# URL or path
# ----------------------------------------------------------------------

path = args.path

# ----------------------------------------------------------------------
# Objects
# ----------------------------------------------------------------------

if is_url(path):
    analysis = client.detect_objects(path)
else:
    path = os.path.join(get_cmd_cwd(), path)
    with open(path, 'rb') as fstream:
        analysis = client.detect_objects_in_stream(fstream)

for object in analysis.objects:
    print(f"{object.rectangle.x} {object.rectangle.y} " +
          f"{object.rectangle.x + object.rectangle.w} " +
          f"{object.rectangle.y + object.rectangle.h}")
コード例 #3
0
ファイル: classify.py プロジェクト: microsoft/cvbp
    sys.stderr.write("Failed to obtain labels probably because of " +
                     "a network connection error.\n")
    sys.exit(1)

# ----------------------------------------------------------------------
# Load the pre-built model
# ----------------------------------------------------------------------

for path in args.path:

    if is_url(path):
        tempdir = tempfile.gettempdir()
        imfile = os.path.join(tempdir, "temp.jpg")
        urllib.request.urlretrieve(path, imfile)
    else:
        imfile = os.path.join(get_cmd_cwd(), path)
    
    try:
        im = open_image(imfile, convert_mode='RGB')
    except:
        sys.stderr.write(f"'{imfile}' may not be an image file and will be skipped.\n")
        continue

    # Select the pre-built model.

    for m in modeln: 
        if m == "densenet201":
            model = model_to_learner(models.densenet201(pretrained=True), IMAGENET_IM_SIZE)
        elif m == "resnet152":
            model = model_to_learner(models.resnet152(pretrained=True), IMAGENET_IM_SIZE)
        elif m == "alexnet":
コード例 #4
0
ファイル: ocr.py プロジェクト: isabella232/azcv
    mode = TextRecognitionMode.handwritten
    if ver(azver) > ver("0.3.0"):
        sys.stderr.write("The --handwritten option is no longer required.\n")
else:
    mode = TextRecognitionMode.printed
raw = True
custom_headers = None
numberOfCharsInOperationId = 36

# Asynchronous call.

if ver(azver) > ver("0.3.0"):
    if is_url(url):
        rawHttpResponse = client.batch_read_file(url, custom_headers, raw)
    else:
        path = os.path.join(get_cmd_cwd(), url)
        with open(path, 'rb') as fstream:
            rawHttpResponse = client.batch_read_file_in_stream(
                fstream, custom_headers, raw)
else:
    if is_url(url):
        rawHttpResponse = client.batch_read_file(url, mode, custom_headers,
                                                 raw)
    else:
        path = os.path.join(get_cmd_cwd(), url)
        with open(path, 'rb') as fstream:
            rawHttpResponse = client.batch_read_file_in_stream(
                fstream, mode, custom_headers, raw)

# Get ID from returned headers.
コード例 #5
0
    sys.stdout.write(f"{detectedl}," + f"{detecteds:0.2f}," +
                     f"{result[0]['translations'][0]['to']},")

    if args.keep:
        sys.stdout.write(f"{txt.rstrip()},")

    sys.stdout.write(f"{result[0]['translations'][0]['text']}")


# ------------------------------------------------------------------------
# Translate text obtained from command line, pipe, or interactively.
# ------------------------------------------------------------------------

txt = " ".join(args.text)

fname = os.path.join(get_cmd_cwd(), txt)

if len(args.text) == 1 and os.path.isfile(fname):
    with open(fname) as f:
        lines = f.readlines()
    lines = [x.strip() for x in lines]
    for l in lines:
        helper(l, fr, to)
        print()
elif txt != "":
    helper(txt, fr, to)
    print()
elif not sys.stdin.isatty():
    for txt in sys.stdin.readlines():
        helper(txt, fr, to)
else:
コード例 #6
0
ファイル: identify.py プロジェクト: mlhubber/objects
    get_model_api,
    tab_complete_path,
    validateURL,
)

import json
import os
import sys
import readline
import urllib
from mlhub import utils as mlutils
import socket

# The working dir of the command which invokes this script.

CMD_CWD = mlutils.get_cmd_cwd()

# Utilities


def _score_for_one_img(img, label='image'):
    """Score for a single image in url.

    Args:
        img (str): a url to an image, or a path to an image.
    """

    try:
        jsonimg = img_url_to_json(img, label=label)
    except (urllib.error.URLError, socket.gaierror, FileNotFoundError,
            OSError):
コード例 #7
0
credentials = CognitiveServicesCredentials(subscription_key)

# Create client.

client = ComputerVisionClient(endpoint, credentials)

# Check the URL supplied or path exists and is an image.

# Send provided image (url or path) to azure to extract text.

url = args.path

width = 50
height = 50

if is_url(url):
    analysis = client.generate_thumbnail(width, height, url)
    sname = re.sub('\.(\w+)$', r'-thumbnail.\1', os.path.basename(urlparse(url).path))
    sname = os.path.join(get_cmd_cwd(), sname)
else:
    path = os.path.join(get_cmd_cwd(), url)
    with open(path, 'rb') as fstream:
        analysis = client.generate_thumbnail_in_stream(width, height, fstream)
    sname = re.sub('\.(\w+)$', r'-thumbnail.\1', path)

for x in analysis:
    image = Image.open(io.BytesIO(x))

image.save(sname)
print(sname)
コード例 #8
0
ファイル: sentiment.py プロジェクト: gjwgit/zynlp
option_parser.add_argument('sentence', nargs='*', help='sentence to analyse')

option_parser.add_argument('--input',
                           '-i',
                           help='path to a text file to analyse sentences')

args = option_parser.parse_args()

# ----------------------------------------------------------------------
# Read the sentences to be analysed.
# ----------------------------------------------------------------------

text = ""
if args.input:
    text = open(os.path.join(get_cmd_cwd(), args.input), "r").read()
elif args.sentence:
    text = " ".join(args.sentence)

# Split the text into a list of sentences. Each sentence is sent off
# for analysis.

text = " ".join(text.splitlines())
text = re.sub("\\. +", ".\n", text)
text = text.splitlines()

sentiment_map = {2: "neutral", 1: "positive", 0: "negative"}

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

MAX_LEN = 256
コード例 #9
0
#-----------------------------------------------------------------------

fname = "results.xlsx"

mlcat(
    "Saving to Spreadsheet", f"""\
The results can be saved to a spreadsheet '{fname}' with a workbook (tab) for each of the
measures listed above. From this spreadsheet it is straightforward for spreadsheet jockeys
to create any required plots.
""")

sys.stdout.write("Do you want to save the results [y/N]? ")
choice = input().lower().strip()

if choice in ("y", "yes"):
    with pd.ExcelWriter(os.path.join(get_cmd_cwd(), fname)) as writer:
        for k in list(outcomes.keys()):
            df = pd.DataFrame(outcomes[k])
            df.to_excel(writer, sheet_name=k, header=False, index=False)
print()

#-----------------------------------------------------------------------
# Plot
#-----------------------------------------------------------------------

measure = "deaths"

mlcat(
    "Generating Plots", f"""\
Pltos can be created from the input and output datasets.
As a simple example we plot the Expected Daily {measure.capitalize()}
コード例 #10
0
    speech_recognizer.stop_continuous_recognition()
    global done
    done = True


# Create an instance of a speech config with the provided subscription
# key and service region. Then create an audio configuration to load
# the audio from file rather than from microphone. A sample audio file
# is available as harvard.wav from:
#
# https://github.com/realpython/python-speech-recognition/raw/master/
# audio_files/harvard.wav
#
# A recognizer is then created with the given settings.

pth = os.path.join(get_cmd_cwd(), args.path)

speech_config = speechsdk.SpeechConfig(subscription=subscription_key,
                                       region=region)
audio_config = speechsdk.audio.AudioConfig(use_default_microphone=False,
                                           filename=pth)
speech_recognizer = speechsdk.SpeechRecognizer(speech_config=speech_config,
                                               audio_config=audio_config)

# We connect callbacks to the events fired by the speech
# recognizer. Most are commented out as examples here to allow tracing
# if you are interested in exploring the interactions with the server.
#
# speech_recognizer.recognizing.connect(lambda evt:
#                                       print('RECOGNIZING: {}'.format(evt)))
# speech_recognizer.session_started.connect(lambda evt:
コード例 #11
0
ファイル: transcribe.py プロジェクト: gjwgit/DeepSpeech
def main():

    # -----------------------------------------------------------------------
    # Load pre-built models
    # -----------------------------------------------------------------------

    scorer = os.path.join(os.getcwd(), "deepspeech-0.9.3-models.scorer")
    model = os.path.join(os.getcwd(), "deepspeech-0.9.3-models.pbmm")

    parser = argparse.ArgumentParser(
        description='Running DeepSpeech inference.')

    parser.add_argument('--model',
                        default=model,
                        help='Path to the model (protocol buffer binary file)')
    parser.add_argument('--scorer',
                        default=scorer,
                        help='Path to the external scorer file')
    parser.add_argument('audio', help='path or url to wav file')
    parser.add_argument('--beam_width',
                        type=int,
                        help='Beam width for the CTC decoder')
    parser.add_argument(
        '--lm_alpha',
        type=float,
        help=
        'Language model weight (lm_alpha). If not specified, use default from the scorer package.'
    )
    parser.add_argument(
        '--lm_beta',
        type=float,
        help=
        'Word insertion bonus (lm_beta). If not specified, use default from the scorer package.'
    )
    parser.add_argument('--extended',
                        required=False,
                        action='store_true',
                        help='Output string from extended metadata')
    parser.add_argument(
        '--json',
        required=False,
        action='store_true',
        help='Output json from metadata with timestamp of each word')
    parser.add_argument(
        '--candidate_transcripts',
        type=int,
        default=3,
        help='Number of candidate transcripts to include in JSON output')
    parser.add_argument('--hot_words',
                        type=str,
                        help='Hot-words and their boosts.')
    args = parser.parse_args()

    path = args.audio

    if is_url(path):
        response = requests.get(path)
        if response.status_code != 200:
            print(
                f"The URL does not appear to exist. Please check.\n    {path}")
            sys.exit()
    else:
        path = os.path.join(get_cmd_cwd(), path)

    ds, desired_sample_rate = utils.load(args.model, args.scorer, False,
                                         args.beam_width, args.lm_alpha,
                                         args.lm_beta, args.hot_words)
    utils.deepspeech(ds, desired_sample_rate, path, "transcribe", False,
                     args.extended, args.json, args.candidate_transcripts)
コード例 #12
0
        url = os.path.abspath(os.path.expanduser(url))

        if CMD_CWD != '':
            os.chdir(oldwd)

        if os.path.isdir(url):
            for img in os.listdir(url):
                img_file = os.path.join(url, '', img)
                _colorize_one_img(img_file)
        else:
            _colorize_one_img(url)


# The working dir of the command which invokes this script.

CMD_CWD = get_cmd_cwd()


# Setup input path completion

readline.set_completer_delims('\t')
readline.parse_and_bind("tab: complete")
readline.set_completer(_tab_complete_path)

# Scoring

if len(args.path) == 0:
    try:
        url = input("Path or URL of images to colorize (Quit by Ctrl-d):\n(You could try images in '~/.mlhub/colorize/images/')\n> ")
    except EOFError:
        print()