Beispiel #1
0
    cv_results['n_features'] = X.shape[1]
    cv_results['y_0'] = y.tolist().count(0)
    cv_results['y_1'] = y.tolist().count(1)

    print(cv_results["mean_test_pr_auc"].to_string(index=False))
    run.log(name="mean_test_pr_auc",
            value=cv_results["mean_test_pr_auc"].to_string(index=False))

    if not os.path.isdir(args.train_model):
        os.makedirs(args.train_model, exist_ok=True)

    timestamp_id = datetime.datetime.now()
    time = timestamp_id.strftime("%m-%d-%Y_%H%M")

    model_name = "{}__{}.json".format(args.repo_owner, args.repo_name)
    output_path = os.path.join(args.train_model, model_name)

    with open(output_path, 'w') as outfile:
        cv_results.to_json(outfile, orient='table', index=False)

    # Get the blob storage associated with the workspace
    pipeline_datastore = Datastore(ws, "datastore_pipeline")

    #Upload production model to main blob folder
    pipeline_datastore.upload_files([args.train_model + '/' + model_name],
                                    target_path="train_model" + '/' +
                                    args.repo_owner + '/' + args.repo_name +
                                    '/' + time,
                                    overwrite=True)

print("Model is trained!")
    }
    word_vectors = {
        "en_vectors_web_lg":
        "https://github.com/explosion/spacy-models/releases/download/en_vectors_web_lg-2.1.0/en_vectors_web_lg-2.1.0.tar.gz"
    }

    toDownload = [dataset, word_vectors]
    download_files(toDownload, data_temp_folder)

    zip_file = zipfile.ZipFile(os.path.join(data_temp_folder, "train.csv.zip"),
                               'r')
    zip_file.extractall(data_temp_folder)
    zip_file.close()

    def_blob_store.upload_files(
        [os.path.join(data_temp_folder, "train.csv")],
        target_path=f"{project_config['project_name']}/data/original/",
        overwrite=True)
    def_blob_store.upload_files(
        [os.path.join(data_temp_folder, "en_vectors_web_lg-2.1.0.tar.gz")],
        target_path=f"{project_config['project_name']}/data/install/",
        overwrite=False)
    #step1
    cluster_name = "cpucluster"

    try:
        compute_target_cpu = ComputeTarget(workspace=ws, name=cluster_name)
    except ComputeTargetException:
        compute_config = AmlCompute.provisioning_configuration(
            vm_size='STANDARD_D3_V2', max_nodes=1, min_nodes=1)
        compute_target_cpu = ComputeTarget.create(ws, cluster_name,
                                                  compute_config)
Beispiel #3
0
        lambda x: x.created_time == max(model.created_time for model in model_list),
        model_list,
    )
)

model = Model(ws, name=production_model.name)
model

service = Model.deploy_from_model(workspace=ws,
                       name=service_name,
                       models=[model],
                       inference_config=inference_config,
                       deployment_config=aci_config)

service.wait_for_deployment(show_output=True)

aci_webservice = {}
aci_webservice["aci_name"] = service.name
aci_webservice["aci_url"] = service.scoring_uri
with open(args.aci_store+"/aci_webservice.json", "w") as outfile:
    json.dump(aci_webservice, outfile)

# Get the blob storage associated with the workspace
pipeline_datastore = Datastore(ws, "datastore_pipeline")

#Upload production model to main blob folder
pipeline_datastore.upload_files([args.aci_store+"/aci_webservice.json"], target_path="webservice"+'/'+args.repo_owner+'/'+args.repo_name, overwrite=True)
    
print("Deployed ACI Webservice: {} \nWebservice Uri: {}".format(service.name, service.scoring_uri)