Exemplo n.º 1
0
def show_environment(
        workspace=None,
        environment_name=None,
        environment_version=None,
        # We should enforce a logger
        logger=None):
    return Environment._serialize_to_dict(Environment.get(workspace, environment_name, environment_version))
Exemplo n.º 2
0
def get_inference_config(aml_interface):
    aml_env = Environment.get(workspace=aml_interface.workspace,
                              name=AML_ENVIRONMENT_NAME)
    scoring_script_path = os.path.join(__here__, 'score.py')
    inference_config = InferenceConfig(entry_script=scoring_script_path,
                                       environment=aml_env)
    return inference_config
Exemplo n.º 3
0
def download_environment(
        workspace=None,
        environment_name=None,
        environment_version=None,
        environment_directory=None,
        environment_overwrite=None,
        # We should enforce a logger
        logger=None):
    definition = Environment.get(workspace, environment_name, environment_version)
    definition.save_to_directory(environment_directory, environment_overwrite)
Exemplo n.º 4
0
def main():
    try:
        ws = connectToWorkspace(TENANT_ID, APP_ID, SP_PASSWORD,
                                SUBSCRIPTION_ID, RESOURCE_GROUP,
                                WORKSPACE_NAME)
    except ProjectSystemException as err:
        print('Authentication did not work.')
        return json.dumps('ProjectSystemException')
    except Exception as err:
        print(err)
        sys.exit()
    print("connect")
    model = Model.register(model_path=os.path.join(
        os.getcwd(), "retailai_recommendation_model.zip"),
                           model_name="retailai_recommendation_model",
                           description="Retail.AI Item-Based Recommender",
                           workspace=ws)
    print("model registered")

    myenv = Environment.get(ws, name='AzureML-PySpark-MmlSpark-0.15')
    myenv.name = "myenv"
    conda_dep = CondaDependencies()
    conda_dep.add_pip_package("azureml-defaults")
    conda_dep.add_pip_package("azure-storage")
    conda_dep.add_pip_package("azure-storage-file-datalake")
    myenv.python.conda_dependencies = conda_dep
    print("Environment Configured")
    inference_config = InferenceConfig(entry_script='score.py',
                                       environment=myenv)

    aks_target_name = "raiaks"

    try:
        aks_target = AksCompute(ws, aks_target_name)
        print(aks_target)
    except ComputeTargetException as err:
        aks_target = attachAksComputeToWorkspace(ws, RESOURCE_GROUP,
                                                 AKS_CLUSTER_NAME,
                                                 aks_target_name, True)
        print(aks_target)
    except Exception as err:
        print(err)
        sys.exit()
    try:
        deployToAks(ws, aks_target, "retail-ai-item-recommender", model,
                    inference_config, True)
    except Exception as err:
        print(err)
        sys.exit()
Exemplo n.º 5
0
def main():
    workspace_name = os.environ['AML_WORKSPACE_NAME']
    resource_group = os.environ['RESOURCE_GROUP']
    subscription_id = os.environ['SUBSCRIPTION_ID']

    spn_credentials = {
        'tenant_id': os.environ['TENANT_ID'],
        'service_principal_id': os.environ['SPN_ID'],
        'service_principal_password': os.environ['SPN_PASSWORD'],
    }

    aml_interface = AMLInterface(spn_credentials, subscription_id,
                                 workspace_name, resource_group)

    scoring_script_path = os.path.join(__here__, 'score.py')
    aml_env = Environment.get(workspace=aml_interface.workspace,
                              name=AML_ENV_NAME)
    service_name = 'aml-pipeline-deploy-3'
    inference_config = InferenceConfig(entry_script=scoring_script_path,
                                       environment=aml_env)
    deploy(aml_interface, inference_config, service_name)
Exemplo n.º 6
0
def deploy_models():
    aciconfig = AciWebservice.deploy_configuration(
        cpu_cores=1,
        memory_gb=1,
        tags={
            "data": "Earthquake",
            "method": "sklearn"
        },
        description='Predict aftershock situation '
        'using linear models in sklearn')

    # env = Environment('aftershock-env')
    # cd = CondaDependencies.create(pip_packages=['azureml-dataset-runtime[pandas,fuse]',
    #                                             'azureml-defaults'],
    #                               conda_packages=['scikit-learn==0.24.2'])
    # env.python.conda_dependencies = cd
    env = Environment.get(workspace=ws, name='aftershock-env')

    inference_config = InferenceConfig(entry_script="predict.py",
                                       environment=env)
    model = Model(ws, "aftershock_model")

    # service_name = 'sklearn-aftershock-svc-' + str(uuid.uuid4())[:4]
    service_name = "sklearn-aftershock-svc-f41b"
    service = Model.deploy(workspace=ws,
                           name=service_name,
                           models=[model],
                           overwrite=True,
                           inference_config=inference_config,
                           deployment_config=aciconfig)

    service.wait_for_deployment(show_output=True)
    print(service.get_logs())
    print(service.scoring_uri)
    print("service keys")
    print(service.get_keys())
    print("service token")
    print(service.get_token())
Exemplo n.º 7
0
def file_to_inference_config(workspace, inference_config_file, description):
    with open(inference_config_file) as inference_file_stream:
        inference_config_obj = file_stream_to_object(inference_file_stream)

        # Retrieve Environment object from the name in the InferenceConfig file
        if 'environment' not in inference_config_obj:
            raise OMLException("need to specify environment in --deploy-config-file")
        environment_name = inference_config_obj.get('environment')["name"]
        environment = Environment.get(workspace, name=environment_name)

        inference_config = InferenceConfig(
            entry_script=inference_config_obj.get('entryScript'),
            runtime=inference_config_obj.get('runtime'),
            conda_file=inference_config_obj.get('condaFile'),
            extra_docker_file_steps=inference_config_obj.get('extraDockerfileSteps'),
            source_directory=inference_config_obj.get('sourceDirectory'),
            enable_gpu=inference_config_obj.get('enableGpu'),
            base_image=inference_config_obj.get('baseImage'),
            base_image_registry=inference_config_obj.get('baseImageRegistry'),
            cuda_version=inference_config_obj.get('cudaVersion'),
            environment=environment,
            description=description)
        return inference_config
print(args)

# Load the stored workspace
ws = Workspace.from_config()

# Get the registered training dataset from azure
if args.data_subset.endswith('npy'):
    train_dataset = Dataset.get_by_name(ws, name='birdsongs_npy')
else:
    train_dataset = Dataset.get_by_name(ws, name='birdsongs_10sec')

# get the validation dataset
val_test_dataset = Dataset.get_by_name(ws, name='birdsongs_10sec')

## Get saved enviornment
env = Environment.get(workspace=ws, name="birdsong-env-gpu")

# set the expiriment name
if args.test:
    experiment_name = 'test'
else:
    experiment_name = 'birdsongs_2'
exp = Experiment(workspace=ws, name=experiment_name)

# get the compute cluster
if args.gpus == 1:
    compute_name = "gpu-cluster-NC6"
elif args.gpus == 2:
    compute_name = "gpu-cluster-NC12"
elif args.gpus == 4:
    compute_name = "gpu-cluster-NC24"
Exemplo n.º 9
0
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <https://www.gnu.org/licenses/>.

# Deploy an existing model from the model repository, in this case to a local
# service (ie a local docker container)
from azureml.core.environment import Environment
from azureml.core.model import InferenceConfig
from azureml.core import Model
from azureml.core.webservice import LocalWebservice
from workspace import get_workspace

workspace = get_workspace()

# Create an environment based on the AzureML-Minimal environment
env = Environment.get(workspace=workspace, name="AzureML-Minimal").clone("TestEnv")

# Add packages manually. This can also be done automatically from either a
# conda dependencies export or a pip requirements file
for pip_package in ["scikit-learn"]:
    env.python.conda_dependencies.add_pip_package(pip_package)

# Create the inverence config from the deploy/ folder
inference_config = InferenceConfig(
    entry_script='deploy/main.py',
    environment=env)

# Use the following model from the repository
model = Model(workspace, 'model_from_test_experiment_1')

service = Model.deploy(
Exemplo n.º 10
0
from azureml.data import OutputFileDatasetConfig
from azureml.pipeline.core import Pipeline
from azureml.pipeline.steps import PythonScriptStep

COMPUTE_TARGET_NAME = 'learnml-big'
ENV_NAME = 'gensim-environment'

print("Azure ML SDK Version: ", azureml.core.VERSION)

workspace = Workspace.from_config()
print(workspace.name, workspace.location, workspace.resource_group, sep='\t')

# Re-use pre-defined Compute Target and Environment

compute_target = workspace.compute_targets[COMPUTE_TARGET_NAME]
env = Environment.get(workspace=workspace, name=ENV_NAME)

# Step 1: data preparation

ds_20news_source = Dataset.File.from_files(
    path='http://qwone.com/~jason/20Newsgroups/20news-bydate.tar.gz')

train_ds = OutputFileDatasetConfig().read_delimited_files(separator='\t').register_on_complete(name='20news-train')

dataprep_step = PythonScriptStep(
    script_name="dataprep.py",
    source_directory="./topicmodel",
    compute_target=compute_target,
    arguments=[
        '--input', ds_20news_source.as_named_input('source').as_download(),
        '--output', train_ds
Exemplo n.º 11
0
def deploy(ws_name,model_name,path_to_model, 
           environment_name,register_environment,pip_packages,conda_packages,
           cpu_cores , memory_gb, path_to_entry_script,service_name):

    '''
        Get Workspace
    '''
    ws = Workspace.from_config()
    print("Got Workspace {}".format(ws_name))


    '''
        Register Model
    '''
    model = Model.register(workspace = ws,
                        model_path =path_to_model,
                        model_name = model_name,
                        )
    print("Registered Model {}".format(model_name))

    '''
        Register Environment
    '''

    # to install required packages
    if register_environment:
        env = Environment(environment_name)
        cd = CondaDependencies.create(pip_packages=pip_packages, conda_packages = conda_packages)
        env.python.conda_dependencies = cd
        # Register environment to re-use later
        env.register(workspace = ws)
        print("Registered Environment")
    myenv = Environment.get(workspace=ws, name=environment_name)
    
    # Uncomment to save environment
    # myenv.save_to_directory('./environ', overwrite=True)

    '''
        Config Objects
    '''
    aciconfig = AciWebservice.deploy_configuration(
            cpu_cores=cpu_cores, 
            memory_gb=memory_gb, 
            )
    inference_config = InferenceConfig(entry_script=path_to_entry_script, environment=myenv) 

    '''
        Deploying
    '''

    print("Deploying....... This may take a few mins, check the status in MLS after the function finishes executing")
    service = Model.deploy(workspace=ws, 
                        name=ws_name, 
                        models=[model], 
                        inference_config=inference_config, 
                        deployment_config=aciconfig, overwrite = True)

    service.wait_for_deployment(show_output=True)
    url = service.scoring_uri    
    print(url)

    service = Webservice(ws,ws_name)
    print(service.get_logs()) 

    return url
Exemplo n.º 12
0
from azureml.core.environment import Environment
from azureml.core.model import InferenceConfig
from azureml.core import Workspace
from azureml.core.authentication import AzureCliAuthentication

interactive_auth = AzureCliAuthentication(cloud="AzureCloud")
ws = Workspace.get(name="Machine-Learning",
                   subscription_id="ecadfaca-baf6-4554-b903-5d4de6dea911",
                   resource_group="Projekt-Azure",
                   auth=interactive_auth)

env = Environment.get(ws, "AzureML-Minimal").clone("test_env")

for pip_package in ["sentence-transformers"]:
    env.python.conda_dependencies.add_pip_package(pip_package)

m_inference_config = InferenceConfig(entry_script="./entry_script.py",
                                     environment=env)
Exemplo n.º 13
0
def get_env(workspace, name, version='1'):
    env = Environment.get(workspace=workspace, name=name, version=version)
    return env
Exemplo n.º 14
0
model.fit(X_train, Y_train)
filename = 'dummy.pkl'
pickle.dump(model, open(filename, 'wb'))

# register the dummy model
ws = Workspace.from_config()
registration_params = {
    'model_path': "dummy.pkl",
    'model_name': "dummy-model",
    'description': "mock test deployment",
    'workspace': ws
}
model = register_model(**registration_params)

myenv = Environment(name='my_env')
myenv.get(workspace=ws, name='ls-ds-ml-env')
conda_dep = CondaDependencies()
conda_dep.add_pip_package("azureml-defaults==1.3.0")
conda_dep.add_pip_package("joblib")
conda_dep.add_pip_package("json")
myenv.python.conda_dependencies = conda_dep
inference_config = InferenceConfig(entry_script="src/dummy_score.py",
                                   environment=myenv)
aci_config = AciWebservice.deploy_configuration(cpu_cores=1, memory_gb=1)

service_name = 'dummy-service'
try:
    service = Model.deploy(ws,
                           service_name, [model],
                           inference_config,
                           deployment_config=aci_config)
# COMMAND ----------

from azureml.core.webservice import AciWebservice, Webservice
from azureml.exceptions import WebserviceException
from azureml.core.model import InferenceConfig
from azureml.core.environment import Environment
from azureml.core.conda_dependencies import CondaDependencies

# Remove any existing service under the same name.
try:
    Webservice(ws, webservice_name).delete()
except WebserviceException:
    pass

env = Environment.get(ws, name='AzureML-PySpark-MmlSpark-0.15')

inference_config = InferenceConfig(entry_script="score_sparkml.py",
                                   environment=env)

deployment_config = AciWebservice.deploy_configuration(cpu_cores=1,
                                                       memory_gb=1,
                                                       auth_enabled=True)

#image_config = ContainerImage.image_configuration(execution_script = "score_sparkml.py",
#                                    runtime = "spark-py",
#                                    conda_file = "mydeployenv.yml")

myservice = Model.deploy(ws, webservice_name, [mymodel], inference_config,
                         deployment_config)
Exemplo n.º 16
0
#retrieve workspace
#get experiment name
ws = utils.retrieve_workspace()
experiment = Experiment(workspace=ws, name='myexp')

#get compute target
try:
    compute_name = os.environ.get("AML_COMPUTE_CLUSTER_NAME", "cpucluster")
    compute_target = ws.compute_targets[compute_name]
except ComputeTargetException as e:
    print('Error while retrieving compute', e)
    sys.exit(-1)

env = None
try:
    env = Environment.get(workspace=ws,
                          name=os.environ.get('AML_ENVIRONMENT', 'myenv'))
except Exception as e:
    print('Environment not found in workspace')
    print('Trying to retrieve from local config')

if env is None:
    try:
        dir_path = Path(__file__).resolve().parent.parent
        env_path = dir_path / '< folder to use >'
        env = Environment.load_from_directory(path=env_path)
    except Exception as e:
        print('Environment folder not found')
        print('Shutting everything down !')
        sys.exit(-1)

src = ScriptRunConfig(source_directory='./src', script='/train.py')
Exemplo n.º 17
0
                                      uniform)

experiment_name = 'tm-gensim-20news'
compute_target_name = 'learnml-big'
env_name = 'gensim-environment'

print("Azure ML SDK Version: ", azureml.core.VERSION)

ws = Workspace.from_config()
print(ws.name, ws.location, ws.resource_group, sep='\t')

# Prepare experiment

exp = Experiment(workspace=ws, name=experiment_name)
compute_target = ws.compute_targets[compute_target_name]
env = Environment.get(workspace=ws, name=env_name)
ds = Dataset.get_by_name(workspace=ws, name='20news-train', version=2)

args = [
    '--input-data', ds.as_named_input('train_data'),
    #'--num-topics', 10,
    '--chunksize', 2000,
    '--passes', 20,
    '--iterations', 400
]

src = ScriptRunConfig(source_directory="./topicmodel",
                      script='train.py',
                      arguments=args,
                      compute_target=compute_target,
                      environment=env)
                   subscription_id=subscription_id,
                   resource_group=resource_group)

aciconfig = AciWebservice.deploy_configuration(
    cpu_cores=1,
    memory_gb=1,
    tags={
        "data": "Titanic",
        "method": "sklearn"
    },
    description='Predict Titanic with sklearn',
    auth_enabled=True)

model = Model(ws, 'decision_tree_model')

env = Environment.get(workspace=ws, name="sklearn-env")
inference_config = InferenceConfig(entry_script="src/score.py",
                                   environment=env)

service = Model.deploy(workspace=ws,
                       name='sklearn-titanic',
                       models=[model],
                       inference_config=inference_config,
                       deployment_config=aciconfig,
                       overwrite=True)

service.wait_for_deployment(show_output=True)

######### Lets test the web service

df = pd.read_csv(