def __init__(self, api_token, endpoint):
     self.api_token = api_token
     self.endpoint = endpoint
     dr.Client(endpoint=self.endpoint, token=self.api_token)
     self.my_projects = None
     self.project = None
     self.target = None
예제 #2
0
def upload(database, table, project_id, model_id, datasource_id):
    # SetUp
    DR_API_TOKEN = os.environ['DR_API_TOKEN']
    TD_USERNAME = os.environ['TD_USERNAME']
    TD_PASSWORD = os.environ['TD_PASSWORD']
    TD_API_KEY = os.environ['TD_API_KEY']
    TD_API_SERVER = os.environ['TD_API_SERVER']
    MAX_WAIT = 60 * 60  # Maximum number of seconds to wait for prediction job to finish

    dr.Client(endpoint='https://app.datarobot.com/api/v2', token=DR_API_TOKEN)
    project = dr.Project.get(project_id)
    model = dr.Model.get(project_id, model_id)
    dataset = project.upload_dataset_from_data_source(datasource_id,
                                                      TD_USERNAME, TD_PASSWORD)

    # Predict
    pred_job = model.request_predictions(dataset.id)
    pred_df = pred_job.get_result_when_complete(max_wait=MAX_WAIT)

    # Upload
    from pytd import pandas_td as td
    con = td.connect(apikey=TD_API_KEY, endpoint=TD_API_SERVER)
    td.to_td(pred_df,
             '{}.{}'.format(database, table),
             con=con,
             if_exists='replace',
             index=False)
예제 #3
0
def _push_inference(model_config, code_dir, token=None, endpoint=None):
    dr_client.Client(token=token, endpoint=endpoint)
    if "inferenceModel" not in model_config:
        raise DrumCommonException(
            "You must include the inferenceModel top level key for custom infernece models"
        )
    if "targetName" not in model_config["inferenceModel"]:
        raise DrumCommonException(
            "For inference models, you must include targetName under the inferenceModel key"
        )
    if "modelID" in model_config:
        model_id = model_config["modelID"]
    else:
        model_id = dr_client.CustomInferenceModel.create(
            name=model_config["name"],
            target_type=_convert_target_type(model_config["targetType"]),
            target_name=model_config["inferenceModel"]["targetName"],
            description=model_config.get("description", "Pushed from DRUM"),
            positive_class_label=model_config["inferenceModel"].get("positiveClassLabel"),
            negative_class_label=model_config["inferenceModel"].get("negativeClassLabel"),
            prediction_threshold=model_config["inferenceModel"].get("predictionThreshold"),
        ).id
    dr_client.CustomModelVersion.create_clean(
        custom_model_id=model_id,
        base_environment_id=model_config["environmentID"],
        folder_path=code_dir,
        is_major_update=model_config.get("majorVersion", True),
    )
    _print_model_started_dialogue(model_id)
예제 #4
0
def main(env_dir, model_dir, max_wait, update_env):
    client = dr.Client(os.environ["DATAROBOT_API_TOKEN"],
                       os.environ["DATAROBOT_ENDPOINT"])
    if env_dir is not None:
        if update_env:
            logger.info("updating environment")
            push_environment(env_dir, max_wait)
    if model_dir is not None:
        push_model(model_dir, env_dir)
예제 #5
0
파일: cli.py 프로젝트: recruit-tech/drctrl
    def preprocess(*args, **kwargs):
        ctx = args[0]
        ctx.obj['credential'] = load_credential(kwargs['credential'])
        credential = ctx.obj['credential']

        # initialize
        dr.Client(token=credential['token'], endpoint=credential['endpoint'])

        func(*args, **kwargs)
예제 #6
0
def generate_model(name, data):

    api_key = "apuu5rs3mpuIbAGGbadkMOQicY5btndS"
    dr.Client(token=api_key, endpoint='https://app.datarobot.com/api/v2')

    project = dr.Project.start(project_name=name,
                        sourcedata=data,
                        target='target')


    return project.id
예제 #7
0
def pipeline():
    
    print(os.getcwd())
    print("Reading data")
    
    df = get_data()
    
    print("Project creation process started")
    dr.Client(token=DATAROBOT_API_TOKEN, endpoint=DATAROBOT_ENDPOINT)
    projects_list = build_onevsall(df = df, multiclass_column_name = multiclass_column_name)
    
    
    return projects_list
예제 #8
0
def _push_inference(model_config, code_dir, token=None, endpoint=None):
    dr_client.Client(token=token, endpoint=endpoint)
    if ModelMetadataKeys.MODEL_ID in model_config:
        model_id = model_config[ModelMetadataKeys.MODEL_ID]
    else:
        create_params = dict(
            name=model_config[ModelMetadataKeys.NAME],
            target_type=_convert_target_type(
                model_config[ModelMetadataKeys.TARGET_TYPE]),
            target_name=model_config[ModelMetadataKeys.INFERENCE_MODEL]
            ["targetName"],
            description=model_config.get(ModelMetadataKeys.DESCRIPTION,
                                         "Pushed from DRUM"),
        )
        if model_config[
                ModelMetadataKeys.TARGET_TYPE] == TargetType.BINARY.value:
            create_params.update(
                dict(
                    positive_class_label=model_config[
                        ModelMetadataKeys.INFERENCE_MODEL].get(
                            "positiveClassLabel"),
                    negative_class_label=model_config[
                        ModelMetadataKeys.INFERENCE_MODEL].get(
                            "negativeClassLabel"),
                    prediction_threshold=model_config[
                        ModelMetadataKeys.INFERENCE_MODEL].get(
                            "predictionThreshold"),
                ))
        elif model_config[
                ModelMetadataKeys.TARGET_TYPE] == TargetType.MULTICLASS.value:
            class_labels = model_config[ModelMetadataKeys.INFERENCE_MODEL].get(
                "classLabels")
            class_labels_file = model_config[
                ModelMetadataKeys.INFERENCE_MODEL].get("classLabelsFile")
            if not ((class_labels is None) ^ (class_labels_file is None)):
                raise DrumCommonException(
                    "Multiclass inference models must specify either classLabels or classLabelsFile"
                )
            if class_labels_file:
                with open(class_labels_file) as f:
                    class_labels = f.read().split(os.linesep)
            create_params.update(dict(class_labels=class_labels))
        model_id = dr_client.CustomInferenceModel.create(**create_params).id
    dr_client.CustomModelVersion.create_clean(
        custom_model_id=model_id,
        base_environment_id=model_config[ModelMetadataKeys.ENVIRONMENT_ID],
        folder_path=code_dir,
        is_major_update=model_config.get(ModelMetadataKeys.MAJOR_VERSION,
                                         True),
    )
    _print_model_started_dialogue(model_id)
import datarobot as dr
from datarobot.models.modeljob import wait_for_async_model_creation
import datetime
import pandas as pd
import time

pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)

# Start the DataRobot python client
dr.Client()

# Set some variables
# path = "C:\\Users\\Brett\\Downloads\\datarobot_examples\\examples\\time_series\\"
path = '/Users/brett.olmstead/Downloads/examples/time_series/'
filename = 'DR_Demo_Sales_Multiseries_training.xlsx'
now = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M')
project_name = 'DR_Demo_Retail_Multiseries_PROJECT_1_{}'.format(now)

# Create a project
print("Starting Project. Uploading file: " + str(filename))
proj = dr.Project.create(sourcedata=path + filename,
                         project_name=project_name,
                         max_wait=3600)

print('Project ID: {}'.format(proj.id))

# What projects are there?
# my_projects = dr.Project.list()
# proj = my_projects[0]
예제 #10
0
# 04 - Deploy a model
# Corresponding article: https://community.datarobot.com/t5/guided-ai-learning/deploy-a-model/ta-p/2780

# Environment setup
import os
import datarobot as dr

API_KEY = os.environ["API_KEY"]
YOUR_DR_URL = os.environ["YOUR_DR_URL"]
FILE_PATH = os.environ["FILE_PATH"]
ENDPOINT = YOUR_DR_URL + "/api/v2"

# Instantiate DataRobot instance
dr.Client(token=API_KEY, endpoint=ENDPOINT)

#TODO: Replace with your values
MODEL_ID = "YOUR_MODEL_ID"  # Get from step 03
PREDICTION_SERVER_ID = "YOUR_PREDICTION_SERVER_ID"  #Get from step 04

deployment = dr.Deployment.create_from_learning_model(
    model_id=MODEL_ID,
    label='New Deployment',
    description='A new deployment',
    default_prediction_server_id=PREDICTION_SERVER_ID)

print("Deployment created.")
print(deployment)
예제 #11
0
def pytest_sessionstart(session):
    dr.Client(endpoint=ENDPOINT_URL, token=os.environ["DATAROBOT_API_TOKEN"])
예제 #12
0
dte_x.to_excel('dte_x.xlsx', index=False)
dte_z.to_excel('dte_z.xlsx', index=False)
dtr_xhat.to_excel('dtr_xhat.xlsx', index=False)
dte_xhat.to_excel('dte_xhat.xlsx', index=False)
dtr_xz.to_excel('dtr_xz.xlsx', index=False)
dte_xz.to_excel('dte_xz.xlsx', index=False)

# ========= #
# Datarobot #
# ========= #

API_TOKEN = '-aP9mLf539Zy_1FLr2FzZkY8ZeoI59uA'
END_POINT = 'https://app.datarobot.com/api/v2'

# Intantiate DataRobot Client
dr.Client(token=API_TOKEN, endpoint=END_POINT)


def get_projects_by_name(name):
    return list(filter(lambda x: name in x.project_name, dr.Project.list()))


# ======= #
# Model x #
# ======= #

# create project
TRAIN_SET = '/Users/alex/Desktop/roar/test/dtr_x.xlsx'
TEST_SET = '/Users/alex/Desktop/roar/test/dte_x.xlsx'
# TRAIN_SET = '/Users/yihewang/Desktop/test/dtr_x.xlsx'
# TEST_SET = '/Users/yihewang/Desktop/test/dte_x.xlsx'
예제 #13
0
def _push_training(model_config, code_dir, endpoint=None, token=None):
    try:
        from datarobot._experimental import CustomTrainingBlueprint, CustomTrainingModel
    except ImportError:
        raise DrumCommonException(
            "You tried to run custom training models using a version of the \n"
            "datarobot client which doesn't have this beta functionality yet. \n"
            "Please pip install datarobot>=2.22.0b0 to access this functionality. \n"
            "This requires adding the internal datarobot artifactory index \n"
            "as your pip index. "
        )
    dr_client.Client(token=token, endpoint=endpoint)
    if "modelID" in model_config:
        model_id = model_config["modelID"]
    else:
        model_id = CustomTrainingModel.create(
            name=model_config["name"],
            target_type=_convert_target_type(model_config["targetType"]),
            description=model_config.get("description", "Pushed from DRUM"),
        ).id
        print(
            "You just created a new custom model. Please add this model ID to your metadata file "
            "by adding the line 'modelID:{}'".format(model_id)
        )

    try:
        dr_client.CustomModelVersion.create_clean(
            model_id,
            base_environment_id=model_config["environmentID"],
            folder_path=code_dir,
            is_major_update=model_config.get("majorVersion", True),
        )
    except dr_client.errors.ClientError as e:
        print("Error adding model with ID {} and dir {}: {}".format(model_id, code_dir, str(e)))
        raise SystemExit(1)

    blueprint = CustomTrainingBlueprint.create(
        environment_id=model_config["environmentID"],
        custom_model_id=model_id,
    )

    print("A blueprint was created with the ID {}".format(blueprint.id))

    _print_model_started_dialogue(model_id)

    if "trainOnProject" in model_config.get("trainingModel", ""):
        try:
            project = dr_client.Project(model_config["trainingModel"]["trainOnProject"])
            model_job_id = project.train(blueprint)
            lid = dr_client.ModelJob.get(project_id=project.id, model_job_id=model_job_id).model_id
        except dr_client.errors.ClientError as e:
            print("There was an error training your model: {}".format(e))
            raise SystemExit()
        print("\nIn addition...")
        print("Model training has started! Follow along at this link: ")
        print(
            MODEL_LOGS_LINK_FORMAT.format(
                url=re.sub(r"/api/v2/?", "", dr_client.client._global_client.endpoint),
                model_id=lid,
                project_id=model_config["trainingModel"]["trainOnProject"],
            )
        )
예제 #14
0
# Create a project, run autopilot, and export the blueprint for the best model.
#
# This scripts assumes a yaml config file exists: 'datarobot_config.yaml'
# containing the path to DataRobot, and your token:
# endpoint: https://app.datarobot.com/api/v2/
# token: not-my-real-token

import pandas as pd
import datarobot as dr
import pickle

dr.Client(config_path='datarobot_config.yaml')

################################################################################
# project creation

# from a local file
data_path = 'logan-US-2013.csv'
df = pd.read_csv(data_path)
df.head()

# create a project from a dataset, this can also be a filepath directly or a url
project = dr.Project.create(df, project_name='delays2013')

################################################################################
# options

# see available metrics (optional)
project.get_metrics('was_delayed')['available_metrics']

# custom feature lists (optional)
예제 #15
0
import yaml
import datarobot as dr
import os
import argparse
import logging

parser = argparse.ArgumentParser(description="MLOps test model")
parser.add_argument("--model-dir", default=None)
parser.add_argument("--logging-level", default="INFO")
parser.add_argument("--max-wait", default=3600, type=int)

client = dr.Client(os.environ["DATAROBOT_API_TOKEN"], os.environ["DATAROBOT_ENDPOINT"])

logging.basicConfig(
    format="{} - %(levelname)s - %(asctime)s - %(message)s".format(__name__)
)
logger = logging.getLogger("custom model test")

def main(model_dir, max_wait):
    logger.info("loading model config")
    try:
        with open( os.path.join(model_dir,"model-config.yaml"), "r") as f:
            model_config = yaml.load(f, Loader=yaml.FullLoader)
    except:
        raise Exception("no model config yaml found")  
    test_dataset_id = model_config.get("testDatasetID")
    if test_dataset_id is None:
        path_to_dataset = model_config.get("testData")
        logger.info("loading dataset to ai catalog")
        test_dataset = dr.Dataset.create_from_file(file_path = path_to_dataset)
        test_dataset_id = test_dataset.id
예제 #16
0
def _push_training(model_config, code_dir, endpoint=None, token=None):
    try:
        from datarobot._experimental import CustomTrainingBlueprint, CustomTrainingModel
    except ImportError:
        raise DrumCommonException(
            "You tried to run custom training models using a version of the \n"
            "datarobot client which doesn't have this beta functionality yet. \n"
            "Please pip install datarobot>=2.22.0b0 to access this functionality. \n"
            "This requires adding the internal datarobot artifactory index \n"
            "as your pip index. ")
    dr_client.Client(token=token, endpoint=endpoint)
    if ModelMetadataKeys.MODEL_ID in model_config:
        model_id = model_config[ModelMetadataKeys.MODEL_ID]
    else:
        model_id = CustomTrainingModel.create(
            name=model_config[ModelMetadataKeys.NAME],
            target_type=_convert_target_type(
                model_config[ModelMetadataKeys.TARGET_TYPE]),
            description=model_config.get("description", "Pushed from DRUM"),
        ).id
        print(
            "You just created a new custom model. Please add this model ID to your metadata file "
            "by adding the line 'modelID:{}'".format(model_id))

    try:
        model_version = dr_client.CustomModelVersion.create_clean(
            model_id,
            base_environment_id=model_config[ModelMetadataKeys.ENVIRONMENT_ID],
            folder_path=code_dir,
            is_major_update=model_config.get(ModelMetadataKeys.MAJOR_VERSION,
                                             True),
        )
    except dr_client.errors.ClientError as e:
        print("Error adding model with ID {} and dir {}: {}".format(
            model_id, code_dir, str(e)))
        raise SystemExit(1)

    # TODO: Update this once the datarobot client is updated
    payload = dict(custom_model_version_id=model_version.id)
    response = dr_client.client.get_client().post("customTrainingBlueprints/",
                                                  data=payload)
    user_blueprint_id = response.json()["userBlueprintId"]

    print("A user blueprint was created with the ID {}".format(
        user_blueprint_id))

    _print_model_started_dialogue(model_id)

    if "trainOnProject" in model_config.get("trainingModel", ""):
        try:
            pid = model_config["trainingModel"]["trainOnProject"]
            current_task = "fetching the specified project {}".format(pid)
            project = dr_client.Project(pid)

            # TODO: Update this once the datarobot client is updated
            payload = dict(user_blueprint_id=user_blueprint_id)
            current_task = "adding your model to the menu"
            response = dr_client.client.get_client().post(
                "projects/{}/blueprints/fromUserBlueprint/".format(pid),
                data=payload)
            blueprint_id = response.json()["id"]

            current_task = "actually training of blueprint {}".format(
                blueprint_id)
            model_job_id = project.train(blueprint_id)
            lid = dr_client.ModelJob.get(project_id=pid,
                                         model_job_id=model_job_id).model_id
        except dr_client.errors.ClientError as e:
            print("There was an error training your model while {}: {}".format(
                current_task, e))
            raise SystemExit(1)
        print("\nIn addition...")
        print("Model training has started! Follow along at this link: ")
        print(
            MODEL_LOGS_LINK_FORMAT.format(
                url=re.sub(r"/api/v2/?", "",
                           dr_client.client._global_client.endpoint),
                model_id=lid,
                project_id=model_config["trainingModel"]["trainOnProject"],
            ))
예제 #17
0
def deploy_custom_model(model_dir, token, endpoint, max_wait):

    client = dr.Client(token=token, endpoint=f"{endpoint}/api/v2")
    logger.info(f"connected {client.endpoint}")
    logger.info("load model config for custom model deployment")
    try:
        with open(os.path.join(model_dir, "model-config.yaml"), "r") as f:
            model_config = yaml.load(f, Loader=yaml.FullLoader)
    except:
        raise Exception("no model config yaml found")

    target_type_dict = {
        "regression": dr.enums.TARGET_TYPE.REGRESSION,
        "binary": dr.enums.TARGET_TYPE.BINARY,
        "multiclass": dr.enums.TARGET_TYPE.MULTICLASS,
        "unstructured": None
    }

    logger.info(pprint.pformat(model_config))
    env_id = model_config["environmentID"]
    version_id = model_config.get("modelVersionID")
    model_id = model_config.get("id")
    name = model_config["name"]
    target_type = model_config["targetType"]
    target_name = model_config["targetName"]
    language = model_config.get("language")
    description = model_config.get("description")
    positive_class_label = model_config.get("positiveClassLabel")
    negative_class_label = model_config.get("negativeClassLabel")
    prediction_threshold = model_config.get("predictionThreshold")
    major_update = model_config.get("majorVersion")
    training_data_path = model_config.get("trainingData")
    training_data_catalog_id = model_config.get(
        "datasets", {}).get("trainingDataCatalogId")

    pred_server = dr.PredictionServer.list()[0]
    deployment = dr.Deployment.create_from_custom_model_version(
        custom_model_version_id=version_id,
        label=name,
        description=description,
        max_wait=max_wait,
        default_prediction_server_id=pred_server.id)
    logger.info("enabling data drift tracking")
    deployment.update_drift_tracking_settings(feature_drift_enabled=True)
    model_config["deploymentID"] = deployment.id
    model_config["deploymentType"] = "custom inference"
    logger.info("update model metadata yaml")
    logger.info(model_config)

    url = "customModels/predictionExplanationsInitialization/"
    data = {
        "customModelId": model_config["id"],
        "customModelVersionId": model_config["modelVersionID"],
    }
    logger.info("initializing prediction explanations for deployment")
    try:
        resp = client.post(url, data=data)
    except Exception as e:
        logger.warning(e)

    with open(os.path.join(model_dir, "model-config.yaml"), "w") as f:
        yaml.dump(model_config, f)
예제 #18
0
"""
Creates DataRobot projects and deployments for specified dataset and use case.

Information about the projects and deployments are stored in a reference file.
"""

import datarobot as dr
import pandas as pd
import drutils as du

# setup
cf = du.load_config('../usecase_config.yaml')
dr.Client(config_path='../drconfig.yaml')

df = pd.read_csv(cf['dataset'])
# optional filtering to reduce deployments
df = df[df[cf['series']].isin(cf['filter'])]

################################################################################
# project setup
spec = du.setup_basic_time_spec(cf)

# check existing deployments
deployments = dr.Deployment.list()
deployment_names = [d.label for d in deployments]

# pull out server information for deployment
prediction_server = dr.PredictionServer.list()[0]

reference = pd.DataFrame()
pip3 install datarobot
"""
import json
import os
from pathlib import Path
from pprint import pprint
import requests

import datarobot as dr

datarobot_api_token = os.getenv("DATAROBOT_API_TOKEN")
datarobot_endpoint = os.getenv("DATAROBOT_ENDPOINT")
dataset_file_path = Path(".").parent.joinpath("data", "auto-mpg.csv")

# Step 1: Upload a dataset
dr.Client(endpoint=datarobot_endpoint, token=datarobot_api_token)
project = dr.Project.start(
    project_name="Auto MPG DR-Client",
    sourcedata=dataset_file_path.as_posix(),
    target="mpg",
)

# Step 2: Train & Deploy an AI
# Train
# Autopilot will take a bit to complete.
# Run the following and then grab a coffee or catch up on email.
project.wait_for_autopilot()
model = dr.ModelRecommendation.get(project.id)
# Deploy
prediction_server = dr.PredictionServer.list()[0]
deployment = dr.Deployment.create_from_learning_model(
# %% #########################################
# Import libraries and set the config path
import sys, os
import pandas as pd
import numpy as np
import datarobot as dr
from datarobot import Project, Deployment
from datarobot.enums import SERVICE_STAT_METRIC, ACCURACY_METRIC
import pickle
import requests
import json
import time
from datetime import datetime
import matplotlib
print(sys.version)
dr.Client(config_path=os.getcwd() + '/API_Workflow_EndtoEnd/config.yaml')


# Custom fuction required for tracking data drift
def set_association_id(deployment_id,
                       association_id,
                       allow_missing_values=False):
    """Assigns the association ID for a deployment"""
    url = f'{BASE_URL}/modelDeployments/{deployment_id}/associationIdSettings/'

    data = {
        'allowMissingValues': allow_missing_values,
        'columnName': association_id
    }

    resp = requests.patch(url, json=data, headers=HEADERS)
# 
# You can find your API token by logging into the DataRobot Web User Interface and looking in your `Profile.`
# 
# The Python client can be configured in several ways. The example we'll use in this notebook is to point to a `yaml` file that has the information. This is a text file containing two lines like this:
# ```yaml
# endpoint: https://app.datarobot.com/api/v2/
# token: not-my-real-token
# ```
# 
# If you want to run this notebook without changes, please save your configuration in a file located under your home directory called `~/.config/datarobot/drconfig.yaml`.

# In[3]:


# Initialization with arguments
dr.Client(token='NWYwZTE3MzNjZjZlZTUxYjE3ZGUzY2U1OjdjVU9Tdis3QWxmdm9XMUg1Vnd4dTd5ZHF0OHo2cTJqWkJPTENnRXRscmM9', endpoint='https://app.datarobot.com/api/v2/')

# Initialization with a config file in the same directory as this notebook
# dr.Client(config_path='drconfig.yaml')


# ## Create the Project
# Here, we use the `datarobot` package to upload a new file and create a project. The name of the project is optional, but can be helpful when trying to sort among many projects on DataRobot.

# In[4]:


filename = 'data/DR_Demo_Sales_Multiseries_training.xlsx'
now = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M')
project_name = 'DR_Demo_Sales_Multiseries_{}'.format(now)
proj = dr.Project.create(sourcedata=filename,
예제 #22
0
import sys
import datarobot as dr
'''
Download the model jar from DR. Make sure codegen FF is enabled.
'''

model_id = sys.argv[1]
project_id = sys.argv[2]
token = sys.argv[3]

dr.Client(token=token, endpoint='https://app.datarobot.com/api/v2')
model = dr.Model.get(project=project_id, model_id=model_id)
filename = '{}.jar'.format(model.id)
model.download_scoring_code(filename)
예제 #23
0
    response = requests.post('%s/api/v1/%s/%s/predict' % (host, project_id, model_id),
                            auth=(username, token), data=data, headers=headers)
    output = response.json()
    if classif:
        predictions = [record['class_probabilities']['1.0'] 
                       for record in output['predictions']]
    else:
        predictions = [record['prediction'] for record in output['predictions']]
    return predictions

### Modelling API parameters
token = 'StcF5iwvv8IAjk9Mytl0e8EKkS35hWeA'
host = 'https://qlik.orm.datarobot.com'
username = '******'

dr.Client(token=token, endpoint='%s/api/v2' % host)

### Prediction API parameters
prediction_host = 'https://qlik.orm.datarobot.com'
headers = {'Content-Type': 'application/json', 'datarobot-key': '94d2823a-cd48-8be3-03de-fd0f58515e66'}


### Make prediction for expected loss
output = predict_API_call(host=prediction_host, 
                         headers=headers, 
                         username=username, 
                         token=token, 
                         model_id=loss_model_id,
                         project_id=loss_project_id,
                         data=row_json,
                         classif=False)
#    Zepl notebook https://app.zepl.com/ODFHKV0LJ/notebooks/e1be0bcb11264260bede11649f0795ec

# Matching for Quasi-Experimentation with DataRobot
#   Jason Miller, CFDS

# Load Libraries
import datarobot as dr
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt

# Connect to DR
## Note: if running this as a .py you should replace MAIN_TOKEN with your
##          .yaml configuration file
# Connect
dr.Client(token=MAIN_TOKEN, endpoint='https://app.datarobot.com/api/v2')

# Import Data Set
## This (wget) part is for Zepl only!
# !wget http://zdata/marketing_promotional_campaign.csv
## This part is for regular Python as well:
data = pd.read_csv('marketing_promotional_campaign.csv')
## drop the target
data.drop('spend', inplace=True, axis=1)
print(data)
## write a CSV of the subset of data
data.to_csv('marketing_promo_no_target.csv')
## send the new data set to AI Catalog
dataset = dr.Dataset.create_from_file(
    file_path='marketing_promo_no_target.csv')
예제 #25
0
HOST = environ.get('DATAROBOT_ENDPOINT')
API_TOKEN = environ.get('DATAROBOT_API_TOKEN')
CHANNEL_CONFIG = environ.get('CHANNEL_CONFIG')
PROJECT_ID = environ.get('PROJECT_ID')
MODEL_ID = environ.get('MODEL_ID')
MLOPS_MODELID = environ.get('MLOPS_MODELID')
DEPLOYMENT_ID = environ.get('DEPLOYMENT_ID')

# flippers to customize autoML model training flow
USE_AUTOPILOT = False
PRIME = False
SCORINGCODE = True
USE_EXISTING = True

drclient = dr.Client(endpoint=HOST, token=API_TOKEN)
if USE_EXISTING == False:
    # training set
    TrainingDataSet = pd.read_csv(
        'https://s3.amazonaws.com/datarobot_public_datasets/DR_Demo_LendingClub_Guardrails.csv',
        nrows=1900)
    # 1. create a new project, upload data and determine best model
    if USE_AUTOPILOT == True:
        newProject = dr.Project.start(sourcedata=TrainingDataSet,
                                      project_name='Python Lending Club ' +
                                      str(date.today()),
                                      target='is_bad',
                                      autopilot_on=True)
        newProject.set_worker_count(8)
        newProject.wait_for_autopilot()
        recommendation_type = dr.enums.RECOMMENDED_MODEL_TYPE.RECOMMENDED_FOR_DEPLOYMENT
예제 #26
0
# Maximum number of seconds to wait for prediction job to finish
MAX_WAIT = 60 * 60

# Number of messags to predict in batches
MAX_MESSAGES = 2

# Tokens / credentials
dr_api_token = open('../dr_api_token').read().strip()
slack_credentials = json.load(open('../slack_credentials.json'))
daniel_id = slack_credentials['dm_id']

# DR models
models = json.load(open('../sentiment_model_ids.json'))

# Connect to DR
dr.Client(endpoint='https://app.datarobot.com/api/v2', token=dr_api_token)

for model_name, model in models.items():
    model['project'] = dr.Project.get(model['project_id'])
    model['model'] = dr.Model.get(model['project_id'], model['best_model_id'])

# Connect to slack
sc = SlackClient(slack_credentials['token'])
sc.rtm_connect()

print('Connected; waiting for messages...')

# Read incoming messages from testing channel
messages = []
while True:
    event = sc.rtm_read()
import pandas as pd
import datarobot as dr
import mlb_pull_year as mlb
import requests
import os
import sys
import time
import datetime
import argparse as ap

API_TOKEN = os.getenv('DATAROBOT_API_TOKEN')
ENDPOINT = os.getenv('DATAROBOT_ENDPOINT')
dr.Client(endpoint=ENDPOINT, token=API_TOKEN)

PROJECT_ID = '5bdb7caa7c6f8b71e0428016'  # The bseball project
project = dr.Project.get(PROJECT_ID)

DEPLOYMENT_ID = '5bdf672f7c6f8b2939428077'  # Eg, the project's recommended model: XGBoost @ 80%
USERNAME = os.getenv('DATAROBOT_USERNAME')

TRAINING_DATA = 'pitch_scoring.csv'  # Only used if you train a new baseball project


def create_new_baseball_project():
    '''
    Helper function to create a new baseball project using source training data
    '''
    t1 = time.time()
    # Read source data
    pitches_train = pd.read_csv(TRAINING_DATA, parse_dates=['date'])
    print('Source data shape:', pitches_train.shape)