Ejemplo n.º 1
0
import time

from distutils.version import StrictVersion as ver
from textwrap import fill
from PIL import Image

# pip3 install azure-cognitiveservices-vision-computervision

from azure.cognitiveservices.vision.computervision import ComputerVisionClient
from azure.cognitiveservices.vision.computervision import VERSION as azver
from azure.cognitiveservices.vision.computervision.models import VisualFeatureTypes
from azure.cognitiveservices.vision.computervision.models import OperationStatusCodes
from msrest.authentication import CognitiveServicesCredentials
from utils import catch_exception

if ver(azver) < ver("0.6.0"):
    sys.exit(
        f"""*** WARNING *** Currently you have installed version {azver} of the
Azure Cognitives Services Computer Vision library. This might have
been installed automatically as part of the *configure* of the
package. Some incompatible changes have emerged in recent
upgrades. Please upgrade to the latest version of that library using:

    pip3 install --upgrade azure-cognitiveservices-vision-computervision
""")

# ----------------------------------------------------------------------
# Request subscription key and endpoint from user.
# ----------------------------------------------------------------------
key, endpoint = get_private()
Ejemplo n.º 2
0
def infer_model_direct(schema_str,
                       stage,
                       data,
                       data_as_float=False,
                       blend_pct=0,
                       model_overrides=None,
                       prediction_type=None):
    """Infer model with directly passing in all required experience/user data.
    Still downloads model to test.
    Expects data in the form of:
        data = {
            'weatherreport': {
                'apparentTemperature': <float>,
                'cloudCover': <float>,
                'humidity': <float>, # converted to humidity_temp
                'precipIntensity': <float>,
                'precipProbability': <float>,
                'temperature': <float>,
                'windGust': <float>,  # converted to burst
                'windSpeed': <float>,
                'precipType': <str or None>,
                'uvIndex': <float>
            },
            'experience': {
                'activity': <str>,
                'upper_clothing': <str>,
                'lower_clothing': <str>
            },
            'location': {},
            'user': {
                'user_id': <str>,
                'model_created': <str>,
                'model_job_name': <str>
            }
        }
    """

    # Configuration variables
    bucket = 'wibsie-ml3-sagebucket-' + stage
    bucket_prefix = 'sagemaker'
    region = 'us-east-1'
    file_path = ''
    schema_obj = ver(schema_str)

    # Setup filesystem information
    model_artifacts_location = os.path.join(bucket_prefix,
                                            data['user']['user_id'], 'models',
                                            data['user']['model_created'])
    model_prefix = 'model_' + data['user']['user_id'] + '_' + data['user'][
        'model_created']
    local_file = model_prefix + '.tar.gz'
    local_file_path = file_path + local_file
    extract_path = file_path + model_prefix

    # Only download and extract if data doesn't already exist
    if not os.path.exists(extract_path):
        print('Downloading and extracting data: ', model_artifacts_location,
              local_file_path, extract_path)
        try:
            boto3.Session().resource('s3').Bucket(bucket).download_file(
                model_artifacts_location + '/model.tar.gz', local_file_path)
            tarfile.open(local_file_path, 'r').extractall(extract_path)
        except botocore.exceptions.ClientError as e:
            if e.response['Error']['Code'] == "404":
                print("Model zip file does not exist: ", e)
            else:
                print("Model zip file download threw unexpected error: ", e)
                raise
    else:
        print('Using locally available model')

    # Update extract path
    final_extract_path = None
    for root, dirs, files in os.walk(extract_path):
        for file in files:
            if file.endswith('.pbtxt'):
                final_extract_path = root
                break

    extract_path = final_extract_path
    print('Updated extract path: ', extract_path)

    # Convert data to inputs
    if data_as_float == False:
        # Modify data to support sun intensity
        data['weatherreport']['raw'] = {
            'daily': {
                'data': [{
                    'sunriseTime': data['weatherreport']['sunrise'],
                    'sunsetTime': data['weatherreport']['sunset']
                }]
            }
        }

        # Create input for model
        model_input_all = model_helper.table_to_floats(data['weatherreport'],
                                                       data['experience'],
                                                       data['location'],
                                                       model_overrides)

        # Convert input to dict of lists (input func will restrict cols)
        model_input = {model.LABEL_COL: [-1]}
        for i in range(len(model_input_all)):
            model_input[model_helper.FEATURE_COLS_ALL[i]] = [
                model_input_all[i]
            ]

    else:
        model_input = {model.LABEL_COL: [-1]}
        for col in model_helper.FEATURE_COLS_ALL:
            model_input[col] = [data[col]]

    print('model_input', model_input)

    # Load model
    tf_model = tf.estimator.LinearClassifier(
        feature_columns=model.get_feature_columns(),
        n_classes=3,
        warm_start_from=extract_path)

    # Setup prediction
    pred_iter = tf_model.predict(
        lambda: model.easy_input_function(model_input,
                                          label_key=model.LABEL_COL,
                                          num_epochs=1,
                                          shuffle=False,
                                          batch_size=5))

    # Run prediction iteration
    pred_raw = []
    for pred_dict in pred_iter:
        print('pred_dict:', pred_dict)
        pred_raw.append(pred_dict)

    # Convert raw prediction result to dict
    attribute_array = [{'blend': blend_pct}]
    prediction_json = prediction_to_dict(pred_raw, attribute_array, schema_obj)
    print('Prediction json: ', prediction_json)

    # Adds extended values to prediction result
    # prediction_type = None
    # if config.get('prediction_type'):
    #     print('Reading prediction_type from config:', config['prediction_type'])
    #     prediction_type = config['prediction_type']

    prediction_json_extended = prediction_extended(prediction_json, schema_obj,
                                                   prediction_type)

    print('Prediction json extended: ', prediction_json_extended)

    return prediction_json_extended
Ejemplo n.º 3
0
# Check the URL supplied or path exists and is an image.

# Send provided image (url or path) to azure to extract text.

url = args.path

# Choose between handwritten and printed. Default is printed. Use
# --handwritten to use the handwritten model. New release of the pip
# package 0.4.0 does not have a mode anymore. We won't use it if
# version is > 0.3.0.

# https://github.com/Azure/azure-sdk-for-python/issues/5889

if args.handwritten:
    mode = TextRecognitionMode.handwritten
    if ver(azver) > ver("0.3.0"):
        sys.stderr.write("The --handwritten option is no longer required.\n")
else:
    mode = TextRecognitionMode.printed
raw = True
custom_headers = None
numberOfCharsInOperationId = 36

# Asynchronous call.

if ver(azver) > ver("0.3.0"):
    if is_url(url):
        rawHttpResponse = client.batch_read_file(url, custom_headers, raw)
    else:
        path = os.path.join(get_cmd_cwd(), url)
        with open(path, 'rb') as fstream:
Ejemplo n.º 4
0
def infer(event, context):
    """Deploy previously uploaded model locally and make a prediction"""

    print('Event: ', event)
    print('Context: ', context)

    # Read in relevant environment variables, and allow for local run
    if event.get('runlocal'):
        print('Running local and using environment variable placeholders')
        bucket = 'wibsie-ml3-sagebucket-dev'
        bucket_prefix = 'sagemaker'
        region = 'us-east-1'
        stage = 'dev'
        role = 'arn:aws:iam::530583866435:role/service-role/AmazonSageMaker-ExecutionRole-20180616T150039'
        file_path = ''
    else:
        print('Running using importing environments')
        bucket = os.environ['SAGE_BUCKET']
        bucket_prefix = os.environ['SAGE_BUCKET_PREFIX']
        region = os.environ['REGION']
        stage = os.environ['STAGE']
        service = os.environ['SERVICE']
        function_prefix = os.environ['FUNCTION_PREFIX']
        role = os.environ['SAGE_ROLE']
        file_path = '/tmp/'
        #print('SM execution role: ', sm.get_execution_role()) #not needed

    if event.get('warm_only'):
        print('Warming only, exiting')
        return {
            "message": "Infer function exiting for warm only",
            "event": event
        }

    now_epoch = round(time.time() * 1000)

    # Parse AWS HTTP request (optional)
    queryParams = None
    if 'body' in event:
        queryParams = event.get('queryStringParameters')
        event = json.loads(event['body'])

    # Load schema
    schema = None
    schema_obj = None
    if queryParams and queryParams.get('schema'):
        schema = queryParams['schema']
        schema_obj = ver(schema)
        print('Loaded schema version: ', schema, schema_obj)

    dynamodb = boto3.resource('dynamodb', region_name=region)

    # Get configuration parameters
    config_stage = stage
    if event.get('config_stage'):
        config_stage = event['config_stage']
        print('Overriding config_stage: ', stage, config_stage)

    config = dynamodb.Table('wibsie-config').query(
        KeyConditionExpression=Key('stage').eq(config_stage))['Items'][0]
    print('Config: ', config)

    # Retrieve user info
    user_id = event['user_id']
    experience_created = int(event['experience_created'])
    table_users = dynamodb.Table('wibsie-users-' + stage)

    response = table_users.query(KeyConditionExpression=Key('id').eq(user_id))
    data_users = response['Items']

    if len(data_users) == 0:
        print('No user found')
        return {"statusCode": 500, "body": "No user found", "event": event}
    else:
        data_user = data_users[0]

    # Determine if user has a model loaded
    user_has_model = False

    if data_user.get('model'):
        if data_user['model'].get('model_created') and \
        data_user['model'].get('model_completed') and \
        data_user['model']['model_completed'] == 'true':
            user_has_model = True

        elif data_user['model'].get('model_created_prev') and \
        data_user['model'].get('model_completed_prev') and \
        data_user['model']['model_completed_prev'] == 'true':
            user_has_model = True

        else:
            print('No completed model found')

    else:
        print('Model key is not loaded for user')

    # Setup user for model
    blend_pct = 0.0
    print('Starting user model parse: ', event.get('user_id_global'),
          config.get('user_id_global'), schema, user_has_model)

    if event.get('user_id_global'):
        print('Using event user_id: ', event['user_id_global'])
        user_id_global = event['user_id_global']

    elif config.get('user_id_global') and config['user_id_global'] != 'user':
        print('Using config user_id: ', config['user_id_global'])
        user_id_global = config['user_id_global']

    elif config.get('user_id_global') == 'user' and schema and user_has_model:
        print('Setting user_id_global to user_id based on config')
        user_id_global = user_id

        if data_user['model'].get('model_blend_pct'):
            blend_pct = float(data_user['model']['model_blend_pct'])
        else:
            blend_pct = 100.0

    else:
        user_id_global = 'be1f64e0-6c1d-11e8-b0b9-e3202dfd59eb'  #'global'
        print('Using default user_id: ', user_id_global, user_has_model)

    user_bucket = os.path.join(bucket, bucket_prefix, user_id_global)

    # Retrieve model user info
    response = table_users.query(
        KeyConditionExpression=Key('id').eq(user_id_global))
    data_user_global = response['Items'][0]

    # Check user model details for actual load
    model_valid = False
    model_keys_expected = [
        'model_created', 'model_job_name', 'model_created_prev',
        'model_job_name_prev'
    ]

    if data_user_global.get('model'):
        model_keys = data_user_global['model'].keys()

        for k in model_keys_expected:
            if k not in model_keys:
                break

        # Convert created decimal to int
        if type(data_user_global['model']['model_created']) == Decimal:
            data_user_global['model']['model_created'] = int(
                data_user_global['model']['model_created'])

        if type(data_user_global['model']['model_created_prev']) == Decimal:
            data_user_global['model']['model_created_prev'] = int(
                data_user_global['model']['model_created_prev'])

        model_valid = True

    if not model_valid:
        print('Valid model details not found', data_user_global)
        return {
            "statusCode": 500,
            "body": "Valid model details not found",
            "event": event
        }

    # Download and extract model file
    data_user_global['model']['model_available'] = False

    suf_list = ['']
    if data_user_global['model']['model_created_prev'] != 'none':
        suf_list.append('_prev')

    for suf in suf_list:
        print('Attempting model suffix: ', suf_list.index(suf))
        model_artifacts_location = os.path.join(
            bucket_prefix, user_id_global, 'models',
            str(data_user_global['model']['model_created' + suf]))
        model_prefix = 'model_' + user_id_global + '_' + str(
            data_user_global['model']['model_created' + suf])
        local_file = model_prefix + '.tar.gz'
        local_file_path = file_path + local_file
        extract_path = file_path + model_prefix

        # Only download and extract if data doesn't already exist
        if not os.path.exists(extract_path):
            # Clean up tmp folder before download
            if 'tmp' in file_path:
                print('Starting tmp cleanup')
                for item in os.listdir(file_path):
                    absolute_item = os.path.join(file_path, item)

                    if os.path.isfile(absolute_item):
                        os.unlink(absolute_item)

                    elif os.path.isdir(absolute_item):
                        shutil.rmtree(absolute_item)

            print('Downloading and extracting data: ',
                  model_artifacts_location, local_file_path, extract_path)

            try:
                boto3.Session().resource('s3').Bucket(bucket).download_file(
                    model_artifacts_location + '/model.tar.gz',
                    local_file_path)
                tarfile.open(local_file_path, 'r').extractall(extract_path)
                data_user_global['model']['model_available'] = True

            except botocore.exceptions.ClientError as e:
                if e.response['Error']['Code'] == "404":
                    print("Model zip file does not exist: ", e)
                else:
                    print("Model zip file download threw unexpected error: ",
                          e)
                    raise
        else:
            print('Using locally available model')
            data_user_global['model']['model_available'] = True

        if data_user_global['model']['model_available']:
            print('Using model suffix: ', suf_list.index(suf))
            data_user_global['model'][
                'model_created_available'] = data_user_global['model'][
                    'model_created' + suf]
            data_user_global['model'][
                'model_job_name_available'] = data_user_global['model'][
                    'model_job_name' + suf]
            data_user_global['model'][
                'model_extract_path_available'] = extract_path
            break

    # Future resolve extract_path
    final_extract_path = None
    for root, dirs, files in os.walk(
            data_user_global['model']['model_extract_path_available']):
        for file in files:
            if file.endswith('.pbtxt'):
                final_extract_path = root
                break

    if not final_extract_path:
        data_user_global['model']['model_available'] = False
    else:
        data_user_global['model'][
            'model_extract_path_available'] = final_extract_path
        print('final_extract_path:', final_extract_path)

    # Break if model cannot be resolved
    if not data_user_global['model']['model_available']:
        print('No model could be resolved')
        return {
            "statusCode": 500,
            "body": "No model could be resolved",
            "event": event
        }

    ## Stitch together data for prediction input
    # Retrieve experience data
    table_experiences = dynamodb.Table('wibsie-experiences-' + stage)

    response = table_experiences.query(
        KeyConditionExpression=Key('created').eq(experience_created)
        & Key('user_id').eq(user_id))
    data_experiences = response['Items']

    if len(data_experiences) == 0:
        print('No experiences found')
        return {
            "statusCode": 500,
            "body": "No experiences found",
            "event": event
        }
    else:
        data_experience = data_experiences[0]

    # Get weather data
    table_weatherreports = dynamodb.Table('wibsie-weatherreports-' + stage)

    response = table_weatherreports.query(
        KeyConditionExpression=Key('expires').eq(
            int(data_experience['weather_expiration']))
        & Key('zip').eq(data_experience['zip']))
    data_weatherreports = response['Items']

    if len(data_weatherreports) == 0:
        print('No weather report found')
        return {
            "statusCode": 500,
            "body": "No weather report found",
            "event": event
        }
    else:
        data_weatherreport = data_weatherreports[0]

    # Get location loop (sleep in case new loc and data not yet loaded)
    infer_loc_loops = 2
    if config.get('infer_loc_loops'):
        infer_loc_loops = int(config['infer_loc_loops'])
        print('Overriding infer_loc_loops default: ', infer_loc_loops)

    infer_loc_sleep = 1
    if config.get('infer_loc_sleep'):
        infer_loc_sleep = int(config['infer_loc_sleep'])
        print('Overriding infer_loc_sleep default: ', infer_loc_sleep)

    for i in range(0, infer_loc_loops):
        table_locations = dynamodb.Table('wibsie-locations-' + stage)

        response = table_locations.query(
            KeyConditionExpression=Key('zip').eq(data_experience['zip']))
        data_locations = response['Items']

        if len(data_locations) == 0:
            print('No location data found')
            return {
                "statusCode": 500,
                "body": "No location data found",
                "event": event
            }
        else:
            data_location = data_locations[0]

        if data_location.get('loc_type'):
            break
        else:
            print('loc_type not defined, sleeping and trying again')
            time.sleep(infer_loc_sleep)

    # Create input for model
    model_overrides = {}
    if config.get('model_overrides'):
        print('Found model_overrides:', config['model_overrides'])
        model_overrides = config['model_overrides']

    model_input_all = model_helper.table_to_floats(data_user,
                                                   data_weatherreport,
                                                   data_experience,
                                                   data_location,
                                                   model_overrides)

    # Convert input to dict of lists (input func will restrict cols)
    model_input = {model.LABEL_COL: [-1]}
    for i in range(len(model_input_all)):
        model_input[model_helper.FEATURE_COLS_ALL[i]] = [model_input_all[i]]

    # Load model
    tf_model = tf.estimator.LinearClassifier(
        feature_columns=model.get_feature_columns(),
        n_classes=3,
        model_dir=data_user_global['model']['model_extract_path_available'],
        warm_start_from=data_user_global['model']
        ['model_extract_path_available'])

    # Setup prediction
    pred_iter = tf_model.predict(
        lambda: model.easy_input_function(model_input,
                                          label_key=model.LABEL_COL,
                                          num_epochs=1,
                                          shuffle=False,
                                          batch_size=5))

    # Run prediction iteration
    pred_raw = []
    for pred_dict in pred_iter:
        print('pred_dict:', pred_dict)
        pred_raw.append(pred_dict)

    # Convert raw prediction result to dict
    attribute_array = [{'blend': blend_pct}]
    prediction_json = prediction_to_dict(pred_raw, attribute_array, schema_obj)
    print('Prediction json: ', prediction_json)

    # Adds extended values to prediction result
    prediction_type = None
    if config.get('prediction_type'):
        print('Reading prediction_type from config:',
              config['prediction_type'])
        prediction_type = config['prediction_type']

    prediction_json_extended = prediction_extended(prediction_json, schema_obj,
                                                   prediction_type)

    print('Prediction json extended: ', prediction_json_extended)

    # Pull first value and add to experience table
    if len(prediction_json_extended) > 1:
        print('Skipping database storage due to len greater than 1')
    else:
        prediction_json_decimal = prediction_decimal(prediction_json_extended)

        response = table_experiences.update_item(
            Key={
                'created': experience_created,
                'user_id': user_id
            },
            UpdateExpression=
            """set comfort_level_prediction=:comfort_level_prediction, prediction_result=:prediction_result""",
            ExpressionAttributeValues={
                ':comfort_level_prediction':
                prediction_json_decimal[0]['comfortable'],
                ':prediction_result':
                prediction_json_decimal[0]
            },
            ReturnValues="UPDATED_NEW")

        print('table_experiences updated result: ', response)

    return {"statusCode": 200, "body": json.dumps(prediction_json_extended)}