def main():
    from tatk.pipelines.text_classification.text_classifier import TextClassifier
    from sklearn.linear_model import LogisticRegression
    import pandas

    init()
    df = pandas.DataFrame(data=[['please add your good text here.']],
                          columns=['tweets'])
    #input1 = pandas.DataFrame(data=[['please add your good text here.']], columns=['tweets'])
    inputs = {"input_df": SampleDefinition(DataTypes.PANDAS, df)}

    #Generate the schema
    generate_schema(run_func=run,
                    inputs=inputs,
                    filepath='service-schema.json')
    print("Schema generated")
Beispiel #2
0
def main():
    # Generating random 28x28 pixels to use as sample input
    sample_input = (np.random.rand(28, 28, 1) * 255)  #.astype('uint8')
    sample_input = sample_input.reshape(1, 28, 28,
                                        1)  # Reshaping to match training data

    # Calling init() and run()
    init()
    inputs = {"input_array": SampleDefinition(DataTypes.NUMPY, sample_input)}
    result_string = run(sample_input)
    print("resultString = " + str(result_string))

    # Generating the schema
    generate_schema(run_func=run,
                    inputs=inputs,
                    filepath='outputs/schema.json')
    print('Schema generated')
Beispiel #3
0
def main():
    # Test the init and run functions using test data
    init()

    test_doc_text = "🍪"
    category = run(test_doc_text)
    print(category)

    test_doc_text = "tomato pizza"

    # Generate the schema file (schema.json) needed for AML operationalization
    inputs = {"doc_text": SampleDefinition(DataTypes.STANDARD, test_doc_text)}
    generate_schema(run_func=run,
                    inputs=inputs,
                    filepath='./outputs/schema.json')
    block_blob_service.create_blob_from_path('embeddings', 'schema.json',
                                             './outputs/schema.json')
Beispiel #4
0
def main():
    from azureml.api.schema.dataTypes import DataTypes
    from azureml.api.schema.sampleDefinition import SampleDefinition
    from azureml.api.realtime.services import generate_schema

    sentences = ['This is a sentence.', 'This is another one.']

    # Test the output of the functions
    init()
    result = run(sentences)

    inputs = {"sentences": SampleDefinition(DataTypes.STANDARD, sentences)}

    #Generate the schema
    generate_schema(run_func=run,
                    inputs=inputs,
                    filepath='infersent_service_schema.json')
    print("Schema generated")
Beispiel #5
0
def main():
  from azureml.api.schema.dataTypes import DataTypes
  from azureml.api.schema.sampleDefinition import SampleDefinition
  from azureml.api.realtime.services import generate_schema
  import pandas

  df = pandas.DataFrame(data=[[190, 60, 38]], columns=['height', 'width', 'shoe_size'])

  # Test the functions' output
  init()
  input1 = pandas.DataFrame([[190, 60, 38]])
  print("Result: " + run(input1))
  
  inputs = {"input_df": SampleDefinition(DataTypes.PANDAS, df)}

  # Generate the service_schema.json
  generate_schema(run_func=run, inputs=inputs, filepath='service_schema.json')
  print("Schema generated")
Beispiel #6
0
def main():
    from azureml.api.schema.dataTypes import DataTypes
    from azureml.api.schema.sampleDefinition import SampleDefinition
    from azureml.api.realtime.services import generate_schema
    import numpy as np

    text_entry = 'This is a sentence. This is an awesome sentence!'

    # Test the output of the functions
    init()
    print("Result: " + run(text_entry))

    inputs = {"text_entry": SampleDefinition(DataTypes.STANDARD, text_entry)}

    #Generate the schema
    generate_schema(run_func=run,
                    inputs=inputs,
                    filepath='./outputs/cnn_service_schema.json')
    print("Schema generated")
def main():
    from azureml.api.schema.dataTypes import DataTypes
    from azureml.api.schema.sampleDefinition import SampleDefinition
    from azureml.api.realtime.services import generate_schema
    import pandas

    # Turn on data collection debug mode to view output in stdout
    os.environ["AML_MODEL_DC_DEBUG"] = 'true'

    inputs = {
        "input_df": SampleDefinition(DataTypes.PANDAS, yourinputdataframe)
    }
    generate_schema(run_func=run,
                    inputs=inputs,
                    filepath='service_schema.json')
    print("Schema generated")

    if __name__ == "__main__":
        main()
def generate_api_schema():
    import os
    print("create schema")
    d = {
        'num-of-doors': [4],
        'fuel-type': [1],
        'width': [68.9],
        'height': [55.5],
        'num-of-cylinders': [6],
        'engine-type': [0],
        'horsepower': [106]
    }
    df = pd.DataFrame(data=d)
    input = df.to_json()
    inputs = {"input": SampleDefinition(DataTypes.STANDARD, input)}
    os.makedirs('outputs', exist_ok=True)
    print(
        generate_schema(inputs=inputs,
                        filepath="outputs/schema.json",
                        run_func=run))
Beispiel #9
0
def main():
    from azureml.api.schema.dataTypes import DataTypes
    from azureml.api.schema.sampleDefinition import SampleDefinition
    from azureml.api.realtime.services import generate_schema
    import pandas

    df = pandas.DataFrame(data=["What a waste of time and money! The story was not realistic at all! Actually it was completely far fetched!"], columns=['Text'])

    # Turn on data collection debug mode to view output in stdout
    os.environ["AML_MODEL_DC_DEBUG"] = 'true'

    # Test the output of the functions
    init()
    input1 = pandas.DataFrame(data=["What a waste of time and money! The story was not realistic at all! Actually it was completely far fetched!"], columns=['Text'])
    print("The input {0} created the following output: {1}".format(input1['Text'], run(input1)))

    inputs = {"input_df": SampleDefinition(DataTypes.PANDAS, df)}

    #Genereate the schema
    generate_schema(run_func=run, inputs=inputs, filepath='./outputs/service_schema.json')
    print("Schema generated")
def main():
    from azureml.api.schema.dataTypes import DataTypes
    from azureml.api.schema.sampleDefinition import SampleDefinition
    from azureml.api.realtime.services import generate_schema
    import pandas

    df = pandas.DataFrame(data=[[380, 120, 76]],
                          columns=['indicator1', 'NF1', 'cellprofiling'])

    # Check the output of the function
    init()
    input1 = pandas.DataFrame([[380, 120, 76]])
    print("Result: " + run(input1))

    inputs = {"input_df": SampleDefinition(DataTypes.PANDAS, df)}

    # Generate the service_schema.json
    generate_schema(run_func=run,
                    inputs=inputs,
                    filepath='output/service_schema.json')
    print("Schema generated")
Beispiel #11
0
def main():
    from azureml.api.schema.dataTypes import DataTypes
    from azureml.api.schema.sampleDefinition import SampleDefinition
    from azureml.api.realtime.services import generate_schema
    import pandas

    df = pandas.DataFrame(data=[[1,1,1,1,1,1,1,1,1,1]], columns=['MAX_ACCX', 'MAX_ACCY', 'MAX_ACCZ', 'MAX_GYROX', 'MAX_GYROY', 'MAX_GYROZ', 'ACC_AVG', 'ACC_VAR', 'GYRO_AVG', 'GYRO_VAR'])

    # Turn on data collection debug mode to view output in stdout
    os.environ["AML_MODEL_DC_DEBUG"] = 'true'

    # Test the output of the functions
    init()
    input1 = pandas.DataFrame([[1,1,1,1,1,1,1,1,1,1]])
    print("The input {0} created the following output: {1}".format(input1.values, run(input1)))

    inputs = {"input_df": SampleDefinition(DataTypes.PANDAS, df)}

    #Genereate the schema
    generate_schema(run_func=run, inputs=inputs, filepath='./outputs/service_schema.json')
    print("Schema generated")
def main():
  from azureml.api.schema.dataTypes import DataTypes
  from azureml.api.schema.sampleDefinition import SampleDefinition
  from azureml.api.realtime.services import generate_schema
  import pandas
  
  df = pandas.DataFrame(data=[[3.0, 3.6, 1.3, 0.25]], columns=['sepal length', 'sepal width','petal length','petal width'])

  # Turn on data collection debug mode to view output in stdout
  os.environ["AML_MODEL_DC_DEBUG"] = 'true'

  # Test the output of the functions
  init()
  input1 = pandas.DataFrame([[3.0, 3.6, 1.3, 0.25]])
  print("Result: " + run(input1))
  
  inputs = {"input_df": SampleDefinition(DataTypes.PANDAS, df)}
  
  #Genereate the schema
  generate_schema(run_func=run, inputs=inputs, filepath='./outputs/service_schema.json')
  print("Schema generated")
Beispiel #13
0
def main():
    from azureml.api.schema.dataTypes import DataTypes
    from azureml.api.schema.sampleDefinition import SampleDefinition
    from azureml.api.realtime.services import generate_schema
    import pandas

    df = pandas.DataFrame(
        data=[[3.0, 3.6, 1.3, 0.25]],
        columns=['sepal length', 'sepal width', 'petal length', 'petal width'])

    # Test the output of the functions
    init()
    input1 = pandas.DataFrame([[3.0, 3.6, 1.3, 0.25]])
    print("Result: " + run(input1))

    inputs = {"input_df": SampleDefinition(DataTypes.PANDAS, df)}

    #Genereate the schema
    generate_schema(run_func=run,
                    inputs=inputs,
                    filepath='service_schema.json')
    print("Schema generated")
Beispiel #14
0
def create_schema():
    # This is used to define the schema and example
    df = pandas.DataFrame(data=[[10, 5]], columns=["x1", "x2"])

    init()
    # This is used to execute the run to get the schema of the prediction
    input1 = pandas.DataFrame(data=[[10, 5]])
    run(input1)

    print(os.getcwd())

    print(df)
    print(input1)

    inputs = {"input_df": SampleDefinition(DataTypes.PANDAS, df)}
    # The prepare statement writes the scoring file (main.py) and
    # the scchema file (service_schema.json) the the output folder.
    #prepare(run_func=run, init_func=init, input_types=inputs, )
    generate_schema(run_func=run,
                    inputs=inputs,
                    filepath='./outputs/service_schema.json')

    print("Schema generated")
Beispiel #15
0
def main():
    from azureml.api.schema.dataTypes import DataTypes
    from azureml.api.schema.sampleDefinition import SampleDefinition
    from azureml.api.realtime.services import generate_schema

    import numpy as np

    df = np.array([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]])
    # Turn on data collection debug mode to view output in stdout
    os.environ["AML_MODEL_DC_DEBUG"] = 'true'

    # Test the output of the functions
    init()
    input1 = np.array([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]])
    print("Result: " + run(input1))

    inputs = {"input_df": SampleDefinition(DataTypes.NUMPY, df)}

    # Genereate the schema
    generate_schema(run_func=run,
                    inputs=inputs,
                    filepath='./outputs/sgd_automated_learn_v2.json')
    print("Schema generated")
Beispiel #16
0
def main():

    # Turn on data collection debug mode to view output in stdout
    os.environ["AML_MODEL_DC_DEBUG"] = 'false'
    os.environ["AML_MODEL_DC_STORAGE_ENABLED"] = 'false'

    # create the outputs folder
    os.makedirs('./outputs', exist_ok=True)

    # Read in json, mod bus sample msg
    input_msg = read_msg()

    # Debugging - remove when deploying
    #print (" ");
    #print ("Input Json:")
    #print (input_msg);

    # Test init function
    init()

    # Write out json, sample response msg
    output_msg = run(input_msg)

    # Debugging - remove when deploying
    #print (" ");
    #print ("Output Json:")
    print(output_msg)

    # Sample input string
    input_str = {
        "input_str": SampleDefinition(DataTypes.STANDARD, input_msg)
    }

    # Generate swagger document for web service
    generate_schema(run_func=run,
                    inputs=input_str,
                    filepath='./outputs/service_schema.json')
Beispiel #17
0
def main():
    from azureml.api.schema.dataTypes import DataTypes
    from azureml.api.schema.sampleDefinition import SampleDefinition
    from azureml.api.realtime.services import generate_schema
    import pandas

    # Anomaly
    df = pandas.DataFrame(data=[[33.66995566, 2.44341267, 21.39450979, 26]], columns=['machine_temperature', \
      'machine_pressure','ambient_temperature','ambient_humidity'])

    # Turn on data collection debug mode to view output in stdout
    os.environ["AML_MODEL_DC_DEBUG"] = 'true'

    # Test the output of the functions
    init()
    # Anomaly
    #input1 = '{ "machine": { "temperature": 33.66995566, "pressure": 2.44341267 }, \
    #      "ambient": { "temperature": 21.39450979, "humidity": 26 },\
    #      "timeCreated": "2017-10-27T18:14:02.4911177Z" }'

    # Normal
    #input1 = '{ "machine": { "temperature": 31.16469009, "pressure": 2.158002669 }, \
    #  "ambient": { "temperature": 21.17794693, "humidity": 25 },\
    #   "timeCreated": "2017-10-27T18:14:02.4911177Z" }'

    input2 = pandas.DataFrame(data=[[31.16469009, 2.158002669, 21.17794693, 25]], columns=['machine_temperature', \
      'machine_pressure','ambient_temperature','ambient_humidity'])

    print("Result: " + str(run(input2)))

    inputs = {"input_df": SampleDefinition(DataTypes.PANDAS, df)}
    generate_schema(run_func=run,
                    inputs=inputs,
                    filepath='./outputs/service_schema.json')

    print("Schema generated")
Beispiel #18
0
    return_str = str_webmap_count + str(pred[0])
    return json.dumps(return_str)

def main():
  from azureml.api.schema.dataTypes import DataTypes
  from azureml.api.schema.sampleDefinition import SampleDefinition
  from azureml.api.realtime.services import generate_schema
  import pandas
  from arcgis_search import search_fire
  
  df = pandas.DataFrame(data=[[3.0, 3.6, 1.3, 0.25]], columns=['sepal length', 'sepal width','petal length','petal width'])

  # Turn on data collection debug mode to view output in stdout
  os.environ["AML_MODEL_DC_DEBUG"] = 'true'

  # Test the output of the functions
  init()
  input1 = 'forest'
  input2 = pandas.DataFrame([[3.7, 2.6, 1.3, 0.25]])
    print("Result: " + run(input1, input2))
  
  inputs = {'input_str':SampleDefinition(DataTypes.STRING, input1),
      "input_df": SampleDefinition(DataTypes.PANDAS, df)}
  
  #Genereate the schema
  generate_schema(run_func=run, inputs=inputs, filepath='./outputs/service_schema.json')
  print("Schema generated")

if __name__ == "__main__":
    main()
# Implement test code to run in IDE or Azure ML Workbench
if __name__ == '__main__':
    # Import the logger only for Workbench runs
    from azureml.logging import get_azureml_logger
    import numpy as np
    import pandas as pd
    columns = ['TimeSpentOnWeb', 'TimeSpentOnProductPage']
    sample_input = pd.DataFrame(data=[(5.1, 3.5)], columns=columns)

    init()
    #input = "{}"
    result = run(sample_input)
    print("The predicted Product which will be Purchased is -", str(result))

    ##Generating Schema
    inputs = {"input_df": SampleDefinition(DataTypes.PANDAS, sample_input)}
    print(
        generate_schema(inputs=inputs,
                        filepath="service-schema.json",
                        run_func=run))

    logger = get_azureml_logger()
    logger.log("Result", result)

    ## To use your existing model management account
    ## az ml account modelmanagement set -n dsazmlexpmodelmgmt -g datascience
    ## Setup a NEW Model management
    ##az ml account modelmanagement create -l [Azure region, e.g. eastus2] -n [your account name] -g [resource group name] --sku-instances [number of instances, e.g. 1] --sku-name [Pricing tier for example S1]
    ## Next setup your local or cluster mode deployment
    ## az ml env setup -l eastus2 -n dslocal -g datascience
    ## az ml env show -l eastus2 -n dslocal -g datascience
Beispiel #20
0
    from sklearn.externals import joblib
    global model
    model = joblib.load(RandomForest_model_file)
        
def run(input_df):
    import json
    input = input_df.as_matrix()
    try:
            pred = model.predict(input)
            return json.dumps(str(pred[0]))
    except Exception as e:
        return (str(e))

####################
#  Main function
####################
if __name__ == '__main__':
    
    init()
    X_str =  '[{"capital_loss":-0.219095674,"hours_per_week":0.7559573744,"education_num":-0.4507068474,"capital_gain":-0.1480462751,"age":-0.0311032178,"Federal-gov":0.0,"Local-gov":0.0,"Private":1.0,"Self-emp-inc":0.0,"Self-emp-not-inc":0.0,"State-gov":0.0,"Without-pay":0.0,"Divorced":0.0,"Married-AF-spouse":0.0,"Married-civ-spouse":1.0,"Married-spouse-absent":0.0,"Never-married":0.0,"Separated":0.0,"Widowed":0.0,"Adm-clerical":0.0,"Armed-Forces":0.0,"Craft-repair":0.0,"Exec-managerial":0.0,"Farming-fishing":1.0,"Handlers-cleaners":0.0,"Machine-op-inspct":0.0,"Other-service":0.0,"Priv-house-serv":0.0,"Prof-specialty":0.0,"Protective-serv":0.0,"Sales":0.0,"Tech-support":0.0,"Transport-moving":0.0,"Husband":1.0,"Not-in-family":0.0,"Other-relative":0.0,"Own-child":0.0,"Unmarried":0.0,"Wife":0.0,"Amer-Indian-Eskimo":0.0,"Asian-Pac-Islander":0.0,"Black":0.0,"Other":0.0,"White":1.0,"Female":0.0,"Male":1.0,"Canada":0.0,"El-Salvador":0.0,"Germany":0.0,"Mexico":0.0,"Philippines":0.0,"Puerto-Rico":0.0,"United-States":1.0}]'
    X_test = pandas.read_json(X_str)
    # Get predictions
    y_pred = run(X_test)
    
    inputs = {"input_df": SampleDefinition(DataTypes.PANDAS, X_test)}
    # The prepare statement writes the scoring file (main.py) and
    # the schema file (service_schema.json) the the output folder.
    generate_schema(run_func=run, inputs=inputs, filepath = 'service_schema.json')
    logger = get_azureml_logger()
    logger.log("amlrealworld.uciincome.score", "true")
Beispiel #21
0
def load_image_into_numpy_array(image):
    (im_width, im_height) = image.size
    return np.array(image.getdata()).reshape(
        (im_height, im_width, 3)).astype(np.uint8)


# Implement test code to run in IDE or Azure ML Workbench
if __name__ == '__main__':
    from azureml.api.schema.dataTypes import DataTypes
    from azureml.api.schema.sampleDefinition import SampleDefinition
    from azureml.api.realtime.services import generate_schema

    # Import the logger only for Workbench runs
    #from azureml.logging import get_azureml_logger
    #logger = get_azureml_logger()

    init()

    pilImg = Image.open("yourimage.jpg")
    base64ImgString = pilImgToBase64(pilImg)
    np_imgstring = np.array([base64ImgString], dtype=np.unicode)
    inputs = {"input_array": SampleDefinition(DataTypes.NUMPY, np_imgstring)}
    resultString = run(np_imgstring)
    print("resultString = " + str(resultString))

    # Genereate the schema
    generate_schema(run_func=run,
                    inputs=inputs,
                    filepath='service_schema.json')
    print("Schema generated.")
Beispiel #22
0
f.close()

#========================= CREATE WEB SERVICE SCHEMA =========================
def run(inputData):
    import json
        
    prediction = classifier.predict(input_df)

    prediction = "%s %d" % (str(input_df), classifier)
    return json.dumps(str(prediction))

from azureml.api.schema.dataTypes import DataTypes
from azureml.api.schema.sampleDefinition import SampleDefinition
from azureml.api.realtime.services import generate_schema

inputs = {"inputData": SampleDefinition(DataTypes.NUMPY, X_test)}
print(generate_schema(run_func=run, inputs=inputs, filepath="./outputs/schema.json"))

'''
#========================= VISUALISING THE RESULTS =========================
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap

# Visualising the Training set results
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
                     np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
             alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
Beispiel #23
0
# Plot ROC curve
localPrediction = prediction.select(' income',
                                    'scored_probabilities').toPandas()
y_true = localPrediction[' income'] == ' >50K'
y_pred = [elem[1] for elem in localPrediction['scored_probabilities']]
plot_roc(y_true, y_pred)

print("******** SAVE THE MODEL ***********")
model.write().overwrite().save("./outputs/AdultCensus.mml")

# save model in wasb if running in HDI.
#model.write().overwrite().save("wasb:///models/AdultCensus.mml")

# create web service schema
from azureml.api.schema.dataTypes import DataTypes
from azureml.api.schema.sampleDefinition import SampleDefinition
from azureml.api.realtime.services import generate_schema

# Define the input dataframe
sample = spark.createDataFrame(
    [('10th', 'Married-civ-spouse', 35.0)],
    [' education', ' marital-status', ' hours-per-week'])
inputs = {"input_df": SampleDefinition(DataTypes.SPARK, sample)}

# Create the schema file (service_schema.json) the the output folder.
import score_mmlspark
generate_schema(run_func=score_mmlspark.run,
                inputs=inputs,
                filepath='./outputs/service_schema.json')
def main():
    from azureml.api.schema.dataTypes import DataTypes
    from azureml.api.schema.sampleDefinition import SampleDefinition
    from azureml.api.realtime.services import generate_schema

    print('Entered main function:')
    print(os.getcwd())
    
    amlWBSharedDir = os.environ['AZUREML_NATIVE_SHARE_DIRECTORY'] 
    print(amlWBSharedDir)

    def get_files_in_dir(crt_dir):
        return( [f for f in os.listdir(crt_dir) if os.path.isfile(os.path.join(crt_dir, f))])

    fully_trained_weights_dir=os.path.join(
        amlWBSharedDir,
        os.path.join(*(['chestxray', 'output',  'trained_models_weights'])))
    crt_models = get_files_in_dir(fully_trained_weights_dir)
    print(fully_trained_weights_dir)
    print(crt_models)

    test_images_dir=os.path.join(
        amlWBSharedDir, 
        os.path.join(*(['chestxray', 'data', 'ChestX-ray8', 'test_images'])))
    test_images = get_files_in_dir(test_images_dir)
    print(test_images_dir)
    print(len(test_images))

    # score in local mode (i.e. here in main function)
    model = azure_chestxray_utils.build_DenseNetImageNet201_model()
    model.load_weights(os.path.join(
        fully_trained_weights_dir, densenet_weights_file_name))

    print('Model weoghts loaded!')

    import cv2
    cv2_image = cv2.imread(os.path.join(test_images_dir,test_images[0]))
    x, serialized_cam_image = get_image_score_and_serialized_cam(cv2_image, model)
    file_bytes = np.asarray(bytearray(serialized_cam_image.read()), dtype=np.uint8)
    recovered_image = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)

    # x = model.predict(cv2_image[None,:,:,:])
    print(test_images[0])
    print(x)
    print(recovered_image.shape)

    #  score in local mode (i.e. here in main function) using encoded data
    encoded_image = as_string_b64encoded_pickled(cv2_image)
    df_for_api = pd.DataFrame(data=[[encoded_image]], columns=[as_string_b64encoded_pickled_data_column_name])
    del encoded_image 
    del cv2_image
    del serialized_cam_image
    
    input_df = df_for_api[as_string_b64encoded_pickled_data_column_name][0]
    input_cv2_image = unpickled_b64decoded_as_bytes(input_df); 
    x, serialized_cam_image = get_image_score_and_serialized_cam(input_cv2_image, model) 
    file_bytes = np.asarray(bytearray(serialized_cam_image.read()), dtype=np.uint8)
    recovered_image = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)

    # x = model.predict(input_cv2_image[None,:,:,:])
    print('After encoding and decoding:')
    print(x)
    print(recovered_image.shape)

    del model

    # now create the post deployment env, i.e. score using init() and run()
    crt_dir = os.getcwd()
    working_dir = os.path.join(crt_dir, 'tmp_cam_deploy')
    if not os.path.exists(working_dir):
        os.makedirs(working_dir)

    import shutil
    shutil.copyfile(
        os.path.join( fully_trained_weights_dir,densenet_weights_file_name), 
        os.path.join( working_dir,densenet_weights_file_name)) 

    os.chdir(working_dir)

    # Turn on data collection debug mode to view output in stdout
    os.environ["AML_MODEL_DC_DEBUG"] = 'true'

    # Test the output of the functions
    init()
    print("Result: " + run(df_for_api))

     # #Generate the schema
    data_for_schema = {"input_df": SampleDefinition(DataTypes.PANDAS, df_for_api)}
    schema_file = os.path.join(fully_trained_weights_dir, 'chest_XRay_cam_service_schema.json')
    generate_schema(run_func=run, inputs=data_for_schema, filepath=schema_file)
    print("Schema saved in " +schema_file)   
input_df = spark.createDataFrame(
    [["this grant will provide funding for biostatistics and neuroscience"],
     ["this is an example abstract"]],
    schema=sch)

print(run(input_df))
# COMMAND ----------

# MAGIC %md
# MAGIC ### Write the Schema to Blob

# COMMAND ----------

# define the input data frame
inputs = {
    "input_df": SampleDefinition(DataTypes.SPARK,
                                 input_df.select(input_features))
}

# The Generate_Schema will attempt to write the file to the databricks cluster.
# But we will take the results and write it to our blob storage account defined in the `train_on_dbr.py` file.
json_schema = generate_schema(run_func=run,
                              inputs=inputs,
                              filepath='service_schema.json')
with open("/dbfs/mnt/misc/service_schema.json", 'w') as f:
    f.write(json.dumps(json_schema))

# COMMAND ----------

# Take a look at the schema
print(json.dumps(json_schema, indent=2))