Exemplo n.º 1
0
    def call_model_cdsw(self, record):
        """
        Not Implemented - currently performs 42% slower than call_model.
        """

        response = cdsw.call_model(
            model_access_key=self.deployment_details["model_access_key"],
            ipt={"record": record},
        )

        return record["id"], response["response"]["uuid"]
Exemplo n.º 2
0
# Create an array of model responses.
response_labels_sample = []

# Make 250 calls to the model with increasing error
percent_counter = 0
percent_max = len(df_sample_clean)

for record in json.loads(df_sample_clean.to_json(orient='records')):
    print("Added {} records".format(percent_counter)) if (percent_counter %
                                                          50 == 0) else None
    percent_counter += 1
    no_churn_record = copy.deepcopy(record)
    no_churn_record.pop('customerID')
    no_churn_record.pop('Churn')
    # **note** this is an easy way to interact with a model in a script
    response = cdsw.call_model(latest_model["accessKey"], no_churn_record)
    response_labels_sample.append({
        "uuid":
        response["response"]["uuid"],
        "final_label":
        churn_error(record["Churn"], percent_counter / percent_max),
        "response_label":
        response["response"]["prediction"]["probability"] >= 0.5,
        "timestamp_ms":
        int(round(time.time() * 1000))
    })

# The "ground truth" loop adds the updated actual label value and an accuracy measure
# every 100 calls to the model.
for index, vals in enumerate(response_labels_sample):
    print("Update {} records".format(index)) if (index % 50 == 0) else None
Exemplo n.º 3
0
        ]].to_numpy()[0]

        try:

            input_data[1] = int(input_data[1])
            input_data[4] = int(input_data[4])
            input_data[5] = int(input_data[5])
            input_data[6] = int(input_data[6])
            input_data[9] = int(input_data[9])

            input_data_string = ""
            for record in input_data[:-1]:
                input_data_string = input_data_string + str(record) + ","

            input_data_string = input_data_string[:-1]
            response = cdsw.call_model(latest_model["accessKey"],
                                       {"feature": input_data_string})

            predicted_result.append(
                response["response"]["prediction"]["prediction"])
            actual_result.append(input_data[-1:][0])
            cdsw.track_delayed_metrics({"actual_result": input_data[-1:][0]},
                                       response["response"]["uuid"])
            print(str(i) + " adding " + input_data_string)
        except:
            print("invalid row")
        time.sleep(0.2)
    end_time_ms = int(math.floor(time.time() * 1000))
    accuracy = classification_report(actual_result,
                                     predicted_result,
                                     output_dict=True)['accuracy']
    cdsw.track_aggregate_metrics({"accuracy": accuracy},
Exemplo n.º 4
0
# This is the input data for which we want to make predictions.
# Ground truth is generally not yet known at prediction time.
score_x = iris.data[:test_size, 2].reshape(-1, 1) # Petal length

# Record the current time so we can retrieve the metrics
# tracked for these calls.
start_timestamp_ms=int(round(time.time() * 1000))

print("output type")
type(output)

uuids = []
predictions = []
for i in range(len(score_x)):
    output = cdsw.call_model(model_access_key, {"petal_length": score_x[i][0]}, api_key)
    #Record the UUID of each prediction for correlation with ground truth.
    uuids.append(output["response"]["uuid"])
    predictions.append(output["response"]["prediction"])

    
print(output)
    
# Record the current time.
end_timestamp_ms=int(round(time.time() * 1000))

# We can now use the read_metrics function to read the metrics we just
# generated into the current session, by querying by time window.
data = cdsw.read_metrics(model_deployment_crn=model_deployment_crn,
            start_timestamp_ms=start_timestamp_ms,
            end_timestamp_ms=end_timestamp_ms)
Exemplo n.º 5
0
# Run Similation to make 1000 calls to the model with increasing error
percent_counter = 0
percent_max = len(df)

for record in json.loads(df.astype("str").to_json(orient="records")):
    print("Added {} records".format(percent_counter)) if (
        percent_counter % 50 == 0
    ) else None
    percent_counter += 1
    no_approve_record = copy.deepcopy(record)
    
    no_approve_record = {'acc_now_delinq': '1.0', 'acc_open_past_24mths': '2.0', 'annual_inc': '3.0', 'avg_cur_bal': '4.0', 'funded_amnt': '5.0'}
    
    # **note** this is an easy way to interact with a model in a script
    response = cdsw.call_model(Model_AccessKey, no_approve_record)
    response_labels_sample.append(
        {
            "uuid": response["response"]["uuid"],
            "final_label": label_error(record["label"], percent_counter / percent_max),
            "response_label": response["response"]["prediction"],
            "timestamp_ms": int(round(time.time() * 1000)),
        }
    )

#{
#    "model_deployment_crn": "crn:cdp:ml:us-west-1:8a1e15cd-04c2-48aa-8f35-b4a8c11997d3:workspace:f5c6e319-47e8-4d61-83bf-2617127acc36/d54e8925-a9e1-4d1f-b7f1-b95961833eb6",
#    "prediction": {
#        "input_data": "{'acc_now_delinq': '1.0', 'acc_open_past_24mths': '2.0', 'annual_inc': '3.0', 'avg_cur_bal': '4.0', 'funded_amnt': '5.0'}",
#        "prediction": "0.0"
#    },