示例#1
0
 def _create_wml_client(self):
     tracer.debug("Creating WML client")
     wml_client = APIClient(self._wml_credentials)
     # set space before using any client function
     wml_client.set.default_space(self._deployment_space)
     tracer.debug("WML client created")
     return wml_client
示例#2
0
def predict_price_wml(location,sqft,bath,bhk):
    getWmlCredentials()
    
    client = APIClient(wml_credentials)
    client.set.default_space(space_id)
    deployments = client.deployments.get_details()

    with open(app.config["SERVICES"]+'wmlDeployment.json', 'r') as wmlDeployment:
        cred = json.loads(wmlDeployment.read())

    scoring_endpoint = client.deployments.get_scoring_href(cred)
    X = pd.read_csv(app.config['DATASET']+'intermediate.csv')
    loc_index=np.where(X.columns==location)[0][0]
    x=np.zeros(len(X.columns), dtype=int)
    x[0]=sqft
    x[1]=bath
    x[2]=bhk
    if loc_index >=0:
        x[loc_index]=1
    
    y = [x.tolist()]
    z = list(list(y))
    did = client.deployments.get_uid(cred)

    job_payload = {
    client.deployments.ScoringMetaNames.INPUT_DATA: [{
     'values': z
    }]
    }
    scoring_response = client.deployments.score(did, job_payload)
    return math.ceil(scoring_response['predictions'][0]['values'][0][0])
def apicall_vehicle_loan(id):
    api_key = 'teuonH42NK9fs-h2kbKROGBHhiirxc6tFhyXQ7eIOcgg'
    location = 'us-south'

    wml_credentials = {
        "apikey": api_key,
        "url": 'https://' + location + '.ml.cloud.ibm.com'
    }

    from ibm_watson_machine_learning import APIClient

    client = APIClient(wml_credentials)

    space_id = 'f6f6501f-c8e1-4c4f-9c65-59ecafdcc386'
    client.set.default_space(space_id)
    #client.deployments.get_details('d8f0e83a-0f90-4afb-9022-14b6d7922191')
    deployment_uid = 'b85fc3cb-4824-491b-baa3-88f981ba26ba'
    #scoring_endpoint = 'https://private.us-south.ml.cloud.ibm.com/ml/v4/deployments/4de29f68-d30f-4f01-945d-c5483e88ec8a/predictions'

    scoring_payload = {
        "input_data": [{
            "fields": [
                "Loan_ID", "Gender", "Married", "Dependents", "Education",
                "Self_Employed", "ApplicantIncome", "CoapplicantIncome",
                "LoanAmount", "Loan_Amount_Term", "Credit_History",
                "Property_Area"
            ],
            "values": [id]
        }]
    }

    predictions = client.deployments.score(deployment_uid, scoring_payload)

    #print(predictions)
    result = json.dumps(predictions)
    #print(type(result))
    #print(result)

    y = result.split(':')
    # print(y)
    z = y[3].split(',')
    # print(z)
    class_var = z[0][4]
    #print(class_var)
    if class_var == 'N':
        Class_N = z[1][2:6]
        #print(Class_N)
        class_type = 'No'
        pri = Class_N
    else:
        Class_Y = z[2][1:5]
        #print(Class_Y)
        class_type = 'Yes'
        pri = Class_Y

    lst = []
    lst.append(class_type)
    lst.append(pri)
    return lst
def apicall_personal_loan(id):
    api_key = 'teuonH42NK9fs-h2kbKROGBHhiirxc6tFhyXQ7eIOcgg'
    location = 'us-south'

    wml_credentials = {
        "apikey": api_key,
        "url": 'https://' + location + '.ml.cloud.ibm.com'
    }

    from ibm_watson_machine_learning import APIClient

    client = APIClient(wml_credentials)

    space_id = 'f6f6501f-c8e1-4c4f-9c65-59ecafdcc386'
    client.set.default_space(space_id)
    #client.deployments.get_details('85280b66-0f77-40a5-b420-a2ac53ea18d0')
    deployment_uid = '85280b66-0f77-40a5-b420-a2ac53ea18d0'
    #scoring_endpoint = 'https://us-south.ml.cloud.ibm.com/ml/v4/deployments/d8f0e83a-0f90-4afb-9022-14b6d7922191/predictions'

    scoring_payload = {
        "input_data": [{
            "fields": [
                "age", "job_cat", "marital status_cat", "education_cat",
                "credit default_cat", "housing loan_cat", "Vehicle loan_cat"
            ],
            "values": [id]
        }]
    }

    predictions = client.deployments.score(deployment_uid, scoring_payload)

    #print(predictions)
    result = json.dumps(predictions)
    #print(type(result))
    #print(result)

    y = result.split(':')
    # print(y)
    z = y[3].split(',')
    # print(z)
    class_var = z[0][3]

    #print(class_var)
    if class_var == '0':
        Class_N = z[1][2:6]
        class_type = 'No'
        #print(Class_N)
        pri = Class_N
    else:
        Class_Y = z[2][1:5]
        #print(Class_Y)
        class_type = 'Yes'
        pri = Class_Y

    lst = []
    lst.append(class_type)
    lst.append(pri)
    return lst
def apicall_housing_loan(id):
    api_key = 'teuonH42NK9fs-h2kbKROGBHhiirxc6tFhyXQ7eIOcgg'
    location = 'us-south'

    wml_credentials = {
        "apikey": api_key,
        "url": 'https://' + location + '.ml.cloud.ibm.com'
    }

    from ibm_watson_machine_learning import APIClient

    client = APIClient(wml_credentials)

    space_id = 'f6f6501f-c8e1-4c4f-9c65-59ecafdcc386'
    client.set.default_space(space_id)
    #client.deployments.get_details('d8f0e83a-0f90-4afb-9022-14b6d7922191')
    deployment_uid = '44695a6b-6153-454e-ae58-f26c19fb9b95'
    #scoring_endpoint = 'https://private.us-south.ml.cloud.ibm.com/ml/v4/deployments/4de29f68-d30f-4f01-945d-c5483e88ec8a/predictions'

    scoring_payload = {
        "input_data": [{
            "fields":
            ["age", "job", "marital status", "education", "credit default?"],
            "values": [id]
        }]
    }

    predictions = client.deployments.score(deployment_uid, scoring_payload)

    result = json.dumps(predictions)
    #print(type(result))
    #print(result)

    y = result.split(':')
    # print(y)
    z = y[3].split(',')
    # print(z)
    class_var = z[0][4]
    #print(class_var)
    if class_var == 'n':
        Class_N = z[1][2:6]
        #print(Class_N)
        class_type = 'No'
        pri = Class_N
    else:
        Class_Y = z[2][1:5]
        #print(Class_Y)
        class_type = 'Yes'
        pri = Class_Y

    lst = []
    lst.append(class_type)
    lst.append(pri)

    return lst
示例#6
0
def connect_wml_service():
    """
    Instantiate a client using credentials
    """

    wmlcreds = load_wml_credentials()
    wml_credentials = {
        "apikey": wmlcreds['apikey'],
        "url": wmlcreds['url']
    }

    client = APIClient(wml_credentials)
    return(client)
示例#7
0
def authentication():

    if os.getenv("IBMCLOUD_API_KEY"):

        wml_credentials = {
            "url": "https://us-south.ml.cloud.ibm.com",
            "apikey": os.environ.get("IBMCLOUD_API_KEY"),
        }
        client = APIClient(wml_credentials)  # Connect to IBM cloud

        return client

    raise Exception("API_KEY environment variable not defined")
示例#8
0
    def set_connection(self, username=None, password=None, url=None):
        '''
        Instantiate WML and WOS python clients.

        Uses the same CP4D credentials for both WML and WOS, meaning both 
        services must be on same CP4D cluster.

        Passed values override ENV vars which override default values.
        '''

        _credentials = {"username": username, "password": password, "url": url}

        # check for env vars if args not passed
        env_keys = dict(
            zip(_credentials.keys(),
                ['CP4D_USERNAME', "CP4D_PASSWORD", "CP4D_URL"]))
        _credentials = {
            k: v if v else os.environ.get(env_keys[k])
            for k, v in _credentials.items()
        }

        # get default values if args not passed and env vars not present
        defaults = {
            "username": "******",
            "password": "******",
            "url": "https://zen-cpd-zen.apps.pwh.ocp.csplab.local"
        }
        _credentials = {
            k: v if v else defaults[k]
            for k, v in _credentials.items()
        }

        self._credentials = _credentials
        self.wos_client = APIClient4ICP(self._credentials)
        self._credentials['instance_id'] = 'wml_local'
        self._credentials['version'] = '3.0.1'
        self.wml_client = APIClient(self._credentials)
def wml_scoring(space_id, deployment_id):
	if not request.json:
		abort(400)
	wml_credentials = WML_CREDENTIALS
	payload_scoring = {
        "input_data": [
            request.json
        ]
    }

	wml_client = APIClient(wml_credentials)
	wml_client.set.default_space(space_id)

	records_list=[]
	scoring_response = wml_client.deployments.score(deployment_id, payload_scoring)
	return jsonify(scoring_response["predictions"][0])
    def __init__(self, wml_vcap: dict) -> None:
        self.logger = logging.getLogger(self.__class__.__name__)
        self.logger.setLevel(logging.DEBUG)

        self.logger.info("Client authentication. URL: {}".format(
            wml_vcap["url"]))
        self.client = APIClient(wml_vcap.copy())
        self.client.set.default_space(wml_vcap['space_id'])
        self.deployment_list = self.client.deployments.get_details(
        )['resources']

        self.transaction_id = 'transaction-id-' + uuid.uuid4().hex
        self.logger.info("Transaction ID: {}".format(self.transaction_id))

        self.area_action_deployment_guid = ""
        self.satisfaction_deployment_guid = ""
        self.area_action_scoring_url = ""
        self.satisfaction_scoring_url = ""

        self.update_scoring_functions()

        self.neutral_templates = [
            "We’re sorry that you were unhappy with your experience with Cars4U. We will open a case to investigate the issue with <b>{} {}</b>. In the meantime, we’d like to offer you a <b>{}</b> on your next rental with us.",
            "We're very sorry for the trouble you experienced with Cars4U. We will open a case to investigate the issue with <b>{} {}</b>. In the meantime, we’d like to offer you a <b>{}</b> on your next rental with us.",
            "We sincerely apologize for this experience with Cars4U. We will open a case to investigate the issue with <b>{} {}</b>. In the meantime, we’d like to offer you a <b>{}</b> on your next rental with us.",
            "I am very disappointed to hear about your experience with Cars4U. We will open a case to investigate the issue with <b>{} {}</b>. In the meantime, we’d like to offer you a <b>{}</b> on your next rental with us."
        ]

        self.negative_templates = [
            "We’re sorry that you were unhappy with your experience with Cars4U. We will open a case to investigate the issue with <b>{} {}</b>. Our customer agent will contact you shortly.",
            "We're very sorry for the trouble you experienced with Cars4U. We will open a case to investigate the issue with <b>{} {}</b>. Our customer agent will contact you shortly.",
            "We sincerely apologize for this experience with Cars4U. We will open a case to investigate the issue with <b>{} {}</b>. Our customer agent will contact you shortly.",
            "I am very disappointed to hear about your experience with Cars4U. We will open a case to investigate the issue with <b>{} {}</b>. Our customer agent will contact you shortly."
        ]

        self.positive_templates = [
            "We are very happy to have provided you with such a positive experience!",
            "We are glad to hear you had such a great experience! ",
            "We appreciate your positive review about your recent experience with us!"
        ]
示例#11
0
def get_project_space(credentials):
    """Returns the notebooks project space GUID.
       
    Argument:
    credentials -- the credentials to be used to connect to WML
      
    Call it only in the notebook where the topology is created, not at Streams runtime.
    Models and deployments are placed in projects space if no other space is given at 
    their creation time.
    The space GUID is needed to instantiate a WMLOnlineScoring object.
    """
    from project_lib import Project
        
    wml_client = APIClient(copy.copy(credentials))
    spaces = wml_client.spaces.get_details()["resources"]
    project = Project.access()
    project_guid = project.get_metadata()["metadata"]["guid"]
    # get the space associated with the project
    project_space=None
    for space in spaces:
        for tag in space["entity"]["tags"]:
            if tag["value"]=="dsx-project."+project_guid:
                project_space = space["metadata"]["id"]
    return project_space
示例#12
0
class Pipeline:
    '''Object that represents a WML deployed ML model'''
    def __init__(self,
                 project_name=None,
                 deployment_space_name=None,
                 model_name=None,
                 software_spec=None,
                 problem_type=None,
                 label_column=None,
                 dataset_name=None,
                 model_path=None,
                 model_type=None,
                 **kwargs):
        self.project_name = project_name
        self.deployment_space_name = deployment_space_name
        self.model_name = model_name
        self.model_path = model_path
        self.software_spec = software_spec
        self.problem_type = getattr(ProblemType,
                                    problem_type) if problem_type else None
        self.model_type = model_type
        self.project_uid = None
        self.deployment_space_uid = None

        self.dataset = {}
        self.dataset['name'] = dataset_name
        self.dataset['label_column'] = label_column

        self._problem_types = {attr:getattr(ProblemType, attr) \
        for attr in vars(ProblemType) if not attr.startswith('_')}

    def set_connection(self, username=None, password=None, url=None):
        '''
        Instantiate WML and WOS python clients.

        Uses the same CP4D credentials for both WML and WOS, meaning both 
        services must be on same CP4D cluster.

        Passed values override ENV vars which override default values.
        '''

        _credentials = {"username": username, "password": password, "url": url}

        # check for env vars if args not passed
        env_keys = dict(
            zip(_credentials.keys(),
                ['CP4D_USERNAME', "CP4D_PASSWORD", "CP4D_URL"]))
        _credentials = {
            k: v if v else os.environ.get(env_keys[k])
            for k, v in _credentials.items()
        }

        # get default values if args not passed and env vars not present
        defaults = {
            "username": "******",
            "password": "******",
            "url": "https://zen-cpd-zen.apps.pwh.ocp.csplab.local"
        }
        _credentials = {
            k: v if v else defaults[k]
            for k, v in _credentials.items()
        }

        self._credentials = _credentials
        self.wos_client = APIClient4ICP(self._credentials)
        self._credentials['instance_id'] = 'wml_local'
        self._credentials['version'] = '3.0.1'
        self.wml_client = APIClient(self._credentials)

    def set_project(self, project_name=None):
        ''' 
        Set default project for wml python client + define client method 
        to extract asset details
        '''
        if project_name: self.project_name = project_name
        assert self.project_name, 'project_name must be passed.'

        # get list (len 1) of CP4D projects matching specified name
        token = self.wml_client.wml_token
        headers = {
            "content-type": "application/json",
            "Accept": "application/json",
            "Authorization": "Bearer " + token
        }
        project_uid_list = [
            x.get('metadata').get('guid')
            for x in requests.get(self._credentials.get('url') +
                                  '/v2/projects/',
                                  headers=headers,
                                  verify=False).json().get('resources')
            if x.get('entity').get('name') == self.project_name
        ]
        # set project
        # ISSUE: setting default CP$D project seems to unset the default deployment space!
        if len(project_uid_list) < 1:
            raise ValueError((
                f'No project named {self.project_name} exists in'
                ' your CP4D Instance. Please provide the name of an existing project.'
            ))
        self.project_uid = project_uid_list[0]
        self.wml_client.set.default_project(self.project_uid)

        def get_asset_details(self, project_uid=None):
            if project_uid:
                self.set.default_project(project_uid)
            if self.default_project_id is None:
                raise ValueError((
                    'There is no default project set. Set a '
                    'default project first or pass a project_uid to this function.'
                ))
            temp_stdout = StringIO()
            true_stdout = sys.stdout
            sys.stdout = temp_stdout
            self.data_assets.list()
            #sys.stdout = sys.__stdout__
            sys.stdout = true_stdout
            lines = temp_stdout.getvalue().split('\n')
            keys = [x.split(' ') for x in lines][1]
            keys = [x.lower() for x in keys if len(x) != 0]
            end = len(lines) - 2
            values = [[x for x in x.split(' ') if len(x) != 0] for x in lines
                      if len(x) != 0]
            new_list = []
            for i in range(2, end):
                new_list.append(dict(zip(keys, values[i])))
            return new_list

        self.wml_client.get_asset_details = types.MethodType(
            get_asset_details, self.wml_client)

    # self = Pipeline()
    # self.set_connection()
    # path="/Users/[email protected]/Desktop/projects/LowesDeploy/bitbucket_GIT_REPO/val_breast_cancer.csv"
    def set_data(self,
                 dataset_name=None,
                 label_column=None,
                 problem_type=None):
        '''
        Downloads data set stored in CP4D project data assets and loads into 
        memeory. The deployed model will be used to make predictions on the 
        downloaded dataset. 
        '''
        if label_column: self.dataset['label_column'] = label_column
        if dataset_name: self.dataset['name'] = dataset_name
        if problem_type: self.problem_type = problem_type

        uids = [
            i['asset_id'] for i in self.wml_client.get_asset_details()
            if i['name'] == self.dataset['name']
        ]
        if len(uids) == 0:
            raise ValueError('Specified dataset %s is not available.' %
                             (self.dataset['name']))

        # select first data asset with specified name
        path = self.wml_client.data_assets.download(uids[0],
                                                    self.dataset['name'])
        self.dataset['data'] = pd.read_csv(path)
        os.remove(path)

        self.dataset['FEATURE_COLUMNS'] = self.dataset['data'].columns.drop(
            self.dataset['label_column']).tolist()
        # is_num = lambda dtype: np.issubdtype(dtype, np.number)
        # CATEGORICAL_COLUMNS = [i for i in data_bunch.feature_names if not is_num(data_bunch.frame[i].dtype)]
        # if len(CATEGORICAL_COLUMNS) == 0: CATEGORICAL_COLUMNS = None

        print(self.dataset['data'].head())

    def set_namespace(self, deployment_space_name=None):
        '''
        Establish deployment space with specified name.
        '''
        if deployment_space_name:
            self.deployment_space_name = deployment_space_name

        # create new deployment space
        default_space = self.wml_client.spaces.store({
            self.wml_client.spaces.ConfigurationMetaNames.NAME:
            self.deployment_space_name
        })
        uid = default_space.get('metadata').get('guid')
        # set new space as default space for future actions
        # ISSUE: setting default deployment space seems to unset the default CP4D project!
        self.wml_client.set.default_space(uid)
        print("Deployment space created: " + self.deployment_space_name)

    def store_model(self,
                    model_path=None,
                    model_name=None,
                    model_type=None,
                    software_spec=None):
        '''
        Store a python ML model in the WML instance's repository
        
        Params:
            model_path: (str) model must be a .tar.gz file
        '''
        if model_name: self.model_name = model_name
        if model_path: self.model_path = model_path
        if model_type: self.model_type = model_type
        if software_spec: self.software_spec = software_spec

        assert self.model_name, 'model_name must be passed.'
        assert self.model_path, 'model_path must be passed.'
        assert self.model_type, 'model_type must be passed.'
        assert self.software_spec, 'software_spec must be passed.'

        sofware_spec_uid = self.wml_client.software_specifications.get_id_by_name(
            self.software_spec)

        # wml seems to do some kind of path resolution that caused a problem at some point
        self.model_path = os.path.abspath(self.model_path)
        print('model path: ', self.model_path)
        self.model_details = self.wml_client.repository.store_model(
            self.model_path,
            meta_props={
                self.wml_client.repository.ModelMetaNames.NAME:
                self.model_name,
                self.wml_client.repository.ModelMetaNames.TYPE:
                self.model_type,
                self.wml_client.repository.ModelMetaNames.SOFTWARE_SPEC_UID:
                sofware_spec_uid
            })
        self.model_uid = self.model_details.get('metadata').get('guid')

        print('Stored model:', self.model_details)

    def deploy_model(self):
        '''Deploy stored wml model'''

        self.deployment = self.wml_client.deployments.create(
            artifact_uid=self.model_uid,
            meta_props={
                self.wml_client.deployments.ConfigurationMetaNames.NAME:
                self.model_name,
                self.wml_client.deployments.ConfigurationMetaNames.ONLINE: {}
            })
        self.deployment_uid = self.deployment.get('metadata').get('guid')
        print("Deployment succesful! at " +
              self.deployment['entity']['status']['online_url']['url'])

    def score_deployed_model(self):
        #request_data = {self.wml_client.deployments.ScoringMetaNames.INPUT_DATA: [{"fields":self.dataset.data.columns.tolist(), "values":self.dataset.data.values.tolist()}]}
        print('Scoring deployed model...')
        request_payload = {
            'input_data': [{
                'fields':
                self.dataset['FEATURE_COLUMNS'],
                'values':
                self.dataset['data'][
                    self.dataset['FEATURE_COLUMNS']].values.tolist()
            }]
        }
        response_payload = self.wml_client.deployments.score(
            self.deployment_uid, request_payload)
        if response_payload: print('Deployed model succesfully scored.')
        return request_payload, response_payload

    def set_subscription(self):
        '''Create subscription to the stored model and log a request/response payload'''

        # set binding to external WML instance cluster
        # self.wos_client.data_mart.bindings.add('WML instance',
        #     WatsonMachineLearningInstance4ICP(wml_credentials = openscale_credentials)
        #     )

        # create subscription to stored model
        print('Creating subscription to WML model...')
        self.subscription = self.wos_client.data_mart.subscriptions.add(
            WatsonMachineLearningAsset(
                self.model_uid,
                problem_type=self.problem_type,
                input_data_type=InputDataType.STRUCTURED,
                label_column=self.dataset['label_column'],
                feature_columns=self.dataset['FEATURE_COLUMNS'],
                #categorical_columns=self.dataset.CATEGORICAL_COLUMNS,
                prediction_column='prediction',
                probability_column='probability'))

        # log payload
        request_payload, response_payload = self.score_deployed_model()
        record = PayloadRecord(request=request_payload,
                               response=response_payload)
        #self.subscription.payload_logging.enable() # apparently not necessary
        self.subscription.payload_logging.store(records=[record])
        # give WOS time to ingest Payload data before attempting any monitoring.
        wait = 60
        print(f'Wait {wait} seconds for WOS database to update...')
        time.sleep(wait)
        print('Payload Table:')
        self.subscription.payload_logging.show_table(limit=5)

    def run_quality_monitor(self):
        self.subscription.quality_monitoring.enable(threshold=.8,
                                                    min_records=50)
        wait = 60
        print(f'Wait {wait} seconds for WOS database to update...')
        time.sleep(wait)
        # log feedback
        ordered_features_and_target = [
            col['name'] for col in self.subscription.get_details()['entity']
            ['asset_properties']['training_data_schema']['fields']
        ]
        feedback_data = self.dataset['data'][ordered_features_and_target]

        self.subscription.feedback_logging.store(feedback_data.values.tolist(),
                                                 data_header=True)
        run_details = self.subscription.quality_monitoring.run(
            background_mode=False)
        run_details = self.subscription.quality_monitoring.get_run_details(
            run_details['id'])
        print('Model Qaulity Validation:')
        print(pd.Series(run_details['output']['metrics']))
        print(
            pd.DataFrame(run_details['output']['confusion_matrix']
                         ['metrics_per_label']).T)

    def _init_cleanup(self,
                      deployment_space_name=None,
                      model_name=None,
                      delete_all=False):
        '''
        If deployment space with specified name already exists (or multiple with 
        same name), delete any deployments and assets existing in that 
        deployment space.
        If WOS subscriptions to models with specified name exist, delete that 
        subscription.

        Params:
            delete_all: (bool) If true, delete all subscriptions and spaces not just 
            those with specified name.
        '''
        if not self.model_name: self.model_name = model_name
        if not self.deployment_space_name:
            self.deployment_space_name = deployment_space_name

        # delete WOS subscriptions to models with specified name
        # note: we are not checking if models were stored in specified namespace
        subscription_details = self.wos_client.data_mart.subscriptions.get_details(
        )['subscriptions']
        for record in subscription_details:
            if delete_all or keys_exist(record, 'entity', 'asset',
                                        'name') == self.model_name:
                print(("Deleting existing subscription to model with name "
                       f"{keys_exist(record, 'entity', 'asset', 'name')}."))
                subscription_uid = record['metadata']['guid']
                # disable quality monitor from running hourly
                # assume quality monitoring is automatically disabled if subscription is deleted
                #self.wos_client.data_mart.subscriptions.get(subscription_uid).quality_monitoring.disable()
                self.wos_client.data_mart.subscriptions.delete(
                    subscription_uid)

        # list existing deployment spaces with specified name
        # nb: wml_client.spaces is not mentioned in the CP4D client docs,
        # only in the IBM cloud client docs, yet it is used here. hmmm?
        get_uid = lambda x: x.get('metadata').get('guid') if (
            (x.get('metadata').get('name') == self.deployment_space_name
             ) or delete_all) else None
        spaces = list(
            filter(
                lambda x: x is not None,
                map(get_uid,
                    self.wml_client.spaces.get_details().get('resources'))))
        if len(spaces) == 0:
            print(
                f'No deployment spaces with name {self.deployment_space_name} found'
            )
        # delete all assests and deployments in each space, and space itself
        for space in spaces:
            print("Found existing deployment space with name " + \
                self.deployment_space_name + ". Deleting deployments and assets from previous runs")
            self.wml_client.set.default_space(space)
            for deployment in self.wml_client.deployments.get_details().get(
                    'resources'):
                uid = deployment.get('metadata').get('guid')
                self.wml_client.deployments.delete(uid)
                print('Deleting deployment ' + uid)
            for model in self.wml_client.repository.get_details().get(
                    'models').get('resources'):
                uid = model.get('metadata').get('guid')
                self.wml_client.repository.delete(uid)
                print('Deleting model ' + uid)
            # delete deployment space
            self.wml_client.spaces.delete(space)

    def specification(self):
        print("type: %s" % self.model_type)
        print("deployment space: %s" % self.deployment_space_name)
        print("project name: %s" % self.project_name)
        if hasattr(self, 'wml_client') and self.project_uid:
            print('data assets:', self.wml_client.get_asset_details())
    def __init__(self,
                 wml_credentials,
                 space_name: Optional[str] = None,
                 deployed_model_name: Optional[str] = None,
                 deployment_id: Optional[str] = None,
                 default_max_oaas_time_limit_sec: Optional[int] = None,
                 default_max_run_time_sec: Optional[int] = 600,
                 monitor_loop_delay_sec: int = 5):
        """Initialize the interface object.
        If the deployment_uuid is specified (WS Cloud), the space_name and model_name are optional.  TODO: test on IBM Cloud
        If no deployment_uuid (CPD), specify both the model and space name.
        Will find UUID based on space and deployed model id.
        In CPDv3.5, always define the space_name, in combination with either the model_name, or the deployment_id.
        Providing the deployment_id is more efficient. If proving the name, the DeployedDOModel will look for the DeploymentID based on the model name.

        Time limits:
        - Both are optional: if no value, no time-limit is imposed
        - These are default values. Can be overridden in solve method

        Args:
            deployed_model_name (str): name of deployed model (CPD)
            space_name (str): name of deployment space (CPD)
            deployment_id (str): Deployment UUID (WS Cloud)
            default_max_oaas_time_limit_sec (int): default oaas.timeLimit in seconds.
            default_max_run_time_sec (int): default maximum run time in seconds. Includes the queueing time.
            monitor_loop_delay_sec (int): delay in seconds in monitoring/polling loop
        """

        # Inputs
        self.wml_credentials = wml_credentials
        self.space_name = space_name
        self.model_name = deployed_model_name
        self.deployment_id = deployment_id
        self.default_max_oaas_time_limit_sec = default_max_oaas_time_limit_sec  # In seconds! None implies no time timit. Note the original oaas.timeLimit is in milli-seconds!
        self.default_max_run_time_sec = default_max_run_time_sec  #60  # In seconds: Job will be deleted. None implies no time timit.
        self.monitor_loop_delay_sec = monitor_loop_delay_sec  # In seconds
        # self.time_limit = 600  # in milliseconds. timeLimit for DO model cancel
        #         self.inputs = inputs
        #         self.debug = debug
        #         self.debug_file_dir = debug_file_dir
        #         self.log_file_name = log_file_name

        # State:
        self.solve_status = None
        self.objective = None
        self.solve_details = {}
        self.outputs = {}
        self.run_time = 0  # Run-time of job in seconds
        self.job_details = None

        # Setup and connection to deployed model
        self.client = APIClient(wml_credentials)

        # space_id = [x['metadata']['id'] for x in self.client.spaces.get_details()['resources'] if
        #             x['entity']['name'] == space_name][0]
        space_id = self.get_space_id(space_name)
        self.client.set.default_space(
            space_id)  # Also required when using deployment_id

        if self.deployment_id is None:
            #             space_id = [x['metadata']['id'] for x in self.client.spaces.get_details()['resources'] if
            #                         x['entity']['name'] == space_name][0]
            #             self.client.set.default_space(space_id)
            # self.deployment_uid = [x['metadata']['guid'] for x in self.client.deployments.get_details()['resources'] if
            #                        x['entity']['name'] == model_name][0]
            self.deployment_id = self.get_deployment_id(deployed_model_name)
    def __init__(self,
                 wml_credentials: Dict,
                 model_name: str,
                 scenario_name: str,
                 space_name: str,
                 package_paths: List[str] = [],
                 file_paths: List[str] = [],
                 deployment_name: str = 'xxx',
                 deployment_description: str = 'xxx',
                 project=None,
                 tmp_dir: str = None):
        """
        Support for custom packages:
        1. For packages in conda/PyPI: through the yaml.
        2. For other custom packages: make sure you have the zip/gz package file (.whl doesn't (yet) work)
        Specify the path(s) to the zip/gz files in package_paths.
        Yaml and multiple package files can be combined

        :param wml_credentials
        :param model_name (str): name of DO Experiment
        :param scenario_name (str): name of scenario with the Python model
        :param space_name (str): name of deployment space
        :param package_paths (List[str]): list paths to zip/gz packages that will be included.
        :param file_paths (List[str]): list paths to files that will be included along side the model. Components can be imported using `from my_file import MyClass`
        :param space_name (str): name of deployment space
        :param project (project_lib.Project): for WS Cloud, not required for CP4D on-prem. See ScenarioManager(). Used to connect to DO Experiment.
        :param tmp_dir (str): path to directory where the intermediate files will be written. Make sure this exists. Can be used for debugging to inspect the files. If None, will use `tempfile` to generate a temporary folder that will be cleaned up automatically.
        """
        self.wml_credentials = wml_credentials
        self.project = project
        self.model_name = model_name
        self.scenario_name = scenario_name
        #         self.space_name = space_name
        self.deployment_name = deployment_name
        self.deployment_description = deployment_description

        self.package_paths = package_paths
        self.file_paths = file_paths
        self.tmp_dir = tmp_dir

        # Initialize clients
        self.client = APIClient(wml_credentials)
        space_id = self.guid_from_space_name(
            space_name)  # TODO: catch error if space_name cannot be found?
        result = self.client.set.default_space(space_id)
        #         print(f"client space_id = {space_id}, result={result}")
        self.scenario_manager = ScenarioManager(model_name=model_name,
                                                scenario_name=scenario_name,
                                                project=project)

        # State
        self.model_uid = None
        self.deployment_uid = None

        # Code templates
        self.main_header_py = \
"""
from docplex.util.environment import get_environment
from os.path import splitext
import pandas
from six import iteritems

def get_all_inputs():
    '''Utility method to read a list of files and return a tuple with all
    read data frames.
    Returns:
        a map { datasetname: data frame }
    '''
    result = {}
    env = get_environment()
    for iname in [f for f in os.listdir('.') if splitext(f)[1] == '.csv']:
        with env.get_input_stream(iname) as in_stream:
            df = pandas.read_csv(in_stream)
            datasetname, _ = splitext(iname)
            result[datasetname] = df
    return result

def write_all_outputs(outputs):
    '''Write all dataframes in ``outputs`` as .csv.

    Args:
        outputs: The map of outputs 'outputname' -> 'output df'
    '''
    for (name, df) in iteritems(outputs):
        csv_file = '%s.csv' % name
        print(csv_file)
        with get_environment().get_output_stream(csv_file) as fp:
            if sys.version_info[0] < 3:
                fp.write(df.to_csv(index=False, encoding='utf8'))
            else:
                fp.write(df.to_csv(index=False).encode(encoding='utf8'))
    if len(outputs) == 0:
        print("Warning: no outputs written")

def __iter__(self): return 0
# Load CSV files into inputs dictionnary
inputs = get_all_inputs()
outputs = {}

###########################################################
# Insert model below
###########################################################
"""
        self.main_footer_py = \
"""
###########################################################

# Generate output files
write_all_outputs(outputs)
"""
        self.yaml = \
"""
示例#15
0
def main(argv):
    cplex_file = "diet.lp"
    try:
        opts, args = getopt.getopt(argv, "hf:", ["ffile="])
    except getopt.GetoptError:
        print('cplexrunonwml.py -f <file>')
        sys.exit(2)
    for opt, arg in opts:
        if opt == '-h':
            print('cplexrunonwml.py -f <file>')
            sys.exit()
        elif opt in ("-f", "--ffile"):
            cplex_file = arg
    print('CPLEX file is', cplex_file)

    basename = cplex_file.split('.')[0]
    model_name = basename + "_model"
    deployment_name = basename + "_deployment"
    space_name = basename + "_space"

    print("Creating WML Client")
    client = APIClient(wml_credentials)

    def guid_from_space_name(client, name):
        space = client.spaces.get_details()
        for item in space['resources']:
            if item['entity']["name"] == name:
                return item['metadata']['id']
        return None

    space_id = guid_from_space_name(client, space_name)

    if space_id == None:
        print("Creating space")
        cos_resource_crn = 'crn:v1:bluemix:public:cloud-object-storage:global:a/7f92ce1185a3460579ce2c76a03b1a67:69cd8af5-5427-4efd-9010-7ad13ac3e18a::'
        instance_crn = 'crn:v1:bluemix:public:pm-20:us-south:a/7f92ce1185a3460579ce2c76a03b1a67:82c6ef26-4fd2-40c4-95d3-abe3c3ad19fd::'

        metadata = {
            client.spaces.ConfigurationMetaNames.NAME: space_name,
            client.spaces.ConfigurationMetaNames.DESCRIPTION:
            space_name + ' description',
            client.spaces.ConfigurationMetaNames.STORAGE: {
                "type": "bmcos_object_storage",
                "resource_crn": cos_resource_crn
            },
            client.spaces.ConfigurationMetaNames.COMPUTE: {
                "name": "existing_instance_id",
                "crn": instance_crn
            }
        }
        space = client.spaces.store(meta_props=metadata)
        space_id = client.spaces.get_id(space)

    print("space_id:", space_id)

    client.set.default_space(space_id)

    print("Getting deployment")
    deployments = client.deployments.get_details()

    deployment_uid = None
    for res in deployments['resources']:
        if res['entity']['name'] == deployment_name:
            deployment_uid = res['metadata']['id']
            print("Found deployment", deployment_uid)
            break

    if deployment_uid == None:
        print("Creating model")
        import tarfile

        def reset(tarinfo):
            tarinfo.uid = tarinfo.gid = 0
            tarinfo.uname = tarinfo.gname = "root"
            return tarinfo

        tar = tarfile.open("model.tar.gz", "w:gz")
        tar.add(cplex_file, arcname=cplex_file, filter=reset)
        tar.close()

        print("Storing model")
        model_metadata = {
            client.repository.ModelMetaNames.NAME:
            model_name,
            client.repository.ModelMetaNames.DESCRIPTION:
            model_name,
            client.repository.ModelMetaNames.TYPE:
            "do-cplex_12.10",
            client.repository.ModelMetaNames.SOFTWARE_SPEC_UID:
            client.software_specifications.get_uid_by_name("do_12.10")
        }

        model_details = client.repository.store_model(
            model='./model.tar.gz', meta_props=model_metadata)

        model_uid = client.repository.get_model_uid(model_details)

        print(model_uid)

        print("Creating deployment")
        deployment_props = {
            client.deployments.ConfigurationMetaNames.NAME: deployment_name,
            client.deployments.ConfigurationMetaNames.DESCRIPTION:
            deployment_name,
            client.deployments.ConfigurationMetaNames.BATCH: {},
            client.deployments.ConfigurationMetaNames.HARDWARE_SPEC: {
                'name': 'S',
                'nodes': 1
            }
        }

        deployment_details = client.deployments.create(
            model_uid, meta_props=deployment_props)

        deployment_uid = client.deployments.get_uid(deployment_details)

        print('deployment_id:', deployment_uid)

    print("Creating job")
    import pandas as pd

    solve_payload = {
        client.deployments.DecisionOptimizationMetaNames.SOLVE_PARAMETERS: {
            'oaas.logAttachmentName': 'log.txt',
            'oaas.logTailEnabled': 'true',
            'oaas.includeInputData': 'false',
            'oaas.resultsFormat': 'JSON'
        },
        client.deployments.DecisionOptimizationMetaNames.INPUT_DATA: [{
            "id":
            cplex_file,
            "content":
            getfileasdata(cplex_file)
        }],
        client.deployments.DecisionOptimizationMetaNames.OUTPUT_DATA: [{
            "id":
            ".*\.json"
        }, {
            "id":
            ".*\.txt"
        }]
    }

    job_details = client.deployments.create_job(deployment_uid, solve_payload)
    job_uid = client.deployments.get_job_uid(job_details)

    print('job_id', job_uid)

    from time import sleep

    while job_details['entity']['decision_optimization']['status'][
            'state'] not in ['completed', 'failed', 'canceled']:
        print(
            job_details['entity']['decision_optimization']['status']['state'] +
            '...')
        sleep(5)
        job_details = client.deployments.get_job_details(job_uid)

    print(job_details['entity']['decision_optimization']['status']['state'])

    for output_data in job_details['entity']['decision_optimization'][
            'output_data']:
        if output_data['id'].endswith('csv'):
            print('Solution table:' + output_data['id'])
            solution = pd.DataFrame(output_data['values'],
                                    columns=output_data['fields'])
            solution.head()
        else:
            print(output_data['id'])
            if "values" in output_data:
                output = output_data['values'][0][0]
            else:
                if "content" in output_data:
                    output = output_data['content']
            output = output.encode("UTF-8")
            output = base64.b64decode(output)
            output = output.decode("UTF-8")
            print(output)
            with open(output_data['id'], 'wt') as file:
                file.write(output)
示例#16
0
def deployWMLModel():
    ''' Step 1: Build the Linear Regression Model '''
    #importing the dataset
    df1 = pd.read_csv(app.config["DATASET"]+'Bengaluru_House_Data.csv')
    df2 = df1.drop(['area_type', 'society', 'balcony',
                    'availability'], axis='columns')
    df3 = df2.dropna()
    df3['bhk'] = df3['size'].apply(lambda x: int(x.split(' ')[0]))
    df3[df3.bhk > 20]

    def is_float(x):
        try:
            float(x)
        except:
            return False
        return True

    df3[~df3['total_sqft'].apply(is_float)]

    def convert_sqft_to_num(x):
        tokens = x.split('-')
        if len(tokens) == 2:
            return(float(tokens[0])+float(tokens[1]))/2
        try:
            return float(x)
        except:
            return None

    convert_sqft_to_num('2166')
    convert_sqft_to_num('2100-3000')

    df4 = df3.copy()
    df4['total_sqft'] = df4['total_sqft'].apply(convert_sqft_to_num)

    #now we will start with feature engineering techniques and dimensionality reduction techniques
    df5 = df4.copy()
    #now we will create price per sqft
    df5['price_per_sqft'] = df5['price']*100000/df5['total_sqft']

    df5.location = df5.location.apply(lambda x: x.strip())
    location_stats = df5.groupby('location')['location'].agg(
        'count').sort_values(ascending=False)
    location_stats_less_than_10 = location_stats[location_stats <= 10]
    df5.location = df5.location.apply(
        lambda x: 'other'if x in location_stats_less_than_10 else x)

    df6 = df5[~(df5.total_sqft/df5.bhk < 300)]

    def remove_pps_outliers(df):
        df_out = pd.DataFrame()
        for key, subdf in df.groupby('location'):
            m = np.mean(subdf.price_per_sqft)
            st = np.std(subdf.price_per_sqft)
            reduced_df = subdf[(subdf.price_per_sqft > (m-st))
                               & (subdf.price_per_sqft <= (m+st))]
            df_out = pd.concat([df_out, reduced_df], ignore_index=True)
        return df_out

    df7 = remove_pps_outliers(df6)

    def remove_bhk_outliers(df):
        exclude_indices = np.array([])
        for location, location_df in df.groupby('location'):
            bhk_stats = {}
            for bhk, bhk_df in location_df.groupby('bhk'):
                bhk_stats[bhk] = {
                    'mean': np.mean(bhk_df.price_per_sqft),
                    'std': np.std(bhk_df.price_per_sqft),
                    'count': bhk_df.shape[0]
                }
            for bhk, bhk_df in location_df.groupby('bhk'):
                stats = bhk_stats.get(bhk-1)
                if stats and stats['count'] > 5:
                    exclude_indices = np.append(
                        exclude_indices, bhk_df[bhk_df.price_per_sqft < (stats['mean'])].index.values)
        return df.drop(exclude_indices, axis='index')
    df8 = remove_bhk_outliers(df7)

    df9 = df8[df8.bath < df8.bhk+2]
    df10 = df9.drop(['size', 'price_per_sqft'], axis='columns')

    dummies = pd.get_dummies(df10.location)
    df11 = pd.concat([df10, dummies], axis='columns')

    df11 = df11.drop(['other'], axis='columns')
    df12 = df11.drop('location', axis='columns')

    #will define dependent variable for training
    X = df12.drop('price', axis='columns')
    y = df12.price

    from sklearn.model_selection import train_test_split
    x_train, x_test, y_train, y_test = train_test_split(
        X, y, test_size=0.2, random_state=10)

    from sklearn.linear_model import LinearRegression
    lr_clf = LinearRegression()
    lr_clf.fit(x_train, y_train)
    lr_clf.score(x_test, y_test)
    print("Model Built Successfully")

    ''' Deploy the Model to Watson Machine Learning '''
    getWmlCredentials()

    client = APIClient(wml_credentials)
    
    client.set.default_space(space_id)
    
    sofware_spec_uid = client.software_specifications.get_id_by_name(
        "default_py3.7")

    metadata = {
        client.repository.ModelMetaNames.NAME: 'Bangalore House Price Prediction',
        client.repository.ModelMetaNames.TYPE: "default_py3.7",
        client.repository.ModelMetaNames.SOFTWARE_SPEC_UID: sofware_spec_uid
    }

    published_model = client.repository.store_model(
        lr_clf, meta_props=metadata)

    published_model_uid = client.repository.get_model_uid(published_model)
    model_details = client.repository.get_details(published_model_uid)

    # print(json.dumps(model_details, indent=2))

    models_details = client.repository.list_models()

    loaded_model = client.repository.load(published_model_uid)
    test_predictions = loaded_model.predict(x_test[:10])

    deploy_meta = {
        client.deployments.ConfigurationMetaNames.NAME: 'Deployment of Bangalore House Price Prediction',
        client.deployments.ConfigurationMetaNames.ONLINE: {}
    }
    created_deployment = client.deployments.create(
        published_model_uid, meta_props=deploy_meta)

    with open(app.config["SERVICES"]+'wmlDeployment.json', 'w') as fp:
        json.dump(created_deployment, fp,  indent=2)

    print(json.dumps(created_deployment, indent=2))
    print("Model Successfully Deployed..")
    with open(app.config["SERVICES"]+'wmlDeployment.json') as temp:
        cred = json.loads(temp.read())
    model_id = cred["entity"]["asset"]["id"]
    return jsonify({"status": "Deployed, Model ID: "+model_id})
示例#17
0
def my_form_post():
    state = request.form['state']
    account_length = request.form['account_length']
    area_code = request.form['area_code']
    international_plan = request.form['international_plan']
    voice_mail_plan = request.form['voice_mail_plan']
    number_vmail_messages = request.form['number_vmail_messages']
    total_day_minutes = request.form['total_day_minutes']
    total_day_calls = request.form['total_day_calls']
    total_day_charge = request.form['total_day_charge']
    total_eve_minutes = request.form['total_eve_minutes']
    total_eve_calls = request.form['total_eve_calls']
    total_eve_charge = request.form['total_eve_charge']
    total_night_minutes = request.form['total_night_minutes']
    total_night_calls = request.form['total_night_calls']
    total_night_charge = request.form['total_night_charge']
    total_intl_minutes = request.form['total_intl_minutes']
    total_intl_calls = request.form['total_intl_calls']
    total_intl_charge = request.form['total_intl_charge']
    customer_service_calls = request.form['customer_service_calls']

    # wml authentication
    from ibm_watson_machine_learning import APIClient

    wml_credentials = {
        "url":
        URL,  # the default url for wml services in the US is https://us-south.ml.cloud.ibm.com
        "apikey": API_KEY
    }

    client = APIClient(wml_credentials)
    print(client.version)

    # deployment space
    space_id = SPACE_ID
    client.set.default_space(space_id)

    # deployment id
    deployment_id = DEPLOYMENT_ID

    # test deployment with test data
    scoring_data = {
        client.deployments.ScoringMetaNames.INPUT_DATA: [{
            "fields": [
                "state", "account length", "area code", "international plan",
                "voice mail plan", "number vmail messages",
                "total day minutes", "total day calls", "total day charge",
                "total eve minutes", "total eve calls", "total eve charge",
                "total night minutes", "total night calls",
                "total night charge", "total intl minutes", "total intl calls",
                "total intl charge", "customer service calls"
            ],
            "values": [[
                state, account_length, area_code, international_plan,
                voice_mail_plan, number_vmail_messages, total_day_minutes,
                total_day_calls, total_day_charge, total_eve_minutes,
                total_eve_calls, total_eve_charge, total_night_minutes,
                total_night_calls, total_night_charge, total_intl_minutes,
                total_intl_calls, total_intl_charge, customer_service_calls
            ]]
        }]
    }
    print(scoring_data)

    predictions = client.deployments.score(deployment_id, scoring_data)
    print(
        "The Prediction output regarding customer churn will be displayed in this format 1 for True or 0 for False: \n ",
        predictions)

    # import values.py
    from prediction import values
    values.predictionValues(predictions)

    # obtain variables from values.py
    prediction_churn_true_or_false = values.predictionValues.prediction_churn_true_or_false
    prediction_churn_true_or_false_percentage_one = values.predictionValues.prediction_churn_true_or_false_percentage_one
    prediction_churn_true_or_false_percentage_two = values.predictionValues.prediction_churn_true_or_false_percentage_two

    # import rules.py
    from prediction import rules
    rules.predictionRules(prediction_churn_true_or_false,
                          prediction_churn_true_or_false_percentage_one,
                          prediction_churn_true_or_false_percentage_two)

    # obtain variables from rules.py
    churn_result = rules.predictionRules.churn_result
    print(churn_result)
    churn_result_percentage = rules.predictionRules.churn_result_percentage
    print(churn_result_percentage)

    return render_template('result.html',
                           state=state,
                           account_length=account_length,
                           area_code=area_code,
                           international_plan=international_plan,
                           voice_mail_plan=voice_mail_plan,
                           number_vmail_messages=number_vmail_messages,
                           total_day_minutes=total_day_minutes,
                           total_day_calls=total_day_calls,
                           total_day_charge=total_day_charge,
                           total_eve_minutes=total_eve_minutes,
                           total_eve_calls=total_eve_calls,
                           total_eve_charge=total_eve_charge,
                           total_night_minutes=total_night_minutes,
                           total_night_calls=total_night_calls,
                           total_night_charge=total_night_charge,
                           total_intl_minutes=total_intl_minutes,
                           total_intl_calls=total_intl_calls,
                           total_intl_charge=total_intl_charge,
                           customer_service_calls=customer_service_calls,
                           predictions=predictions,
                           churn_result=churn_result,
                           churn_result_percentage=churn_result_percentage)
示例#18
0
    def put(self, id):
        '''Update a temperature given its identifier'''
        return TEMP.update(id, api.payload)


@ns.route('/healthz')
class Health(Resource):
    '''Returns "OK" when application is ready'''
    @ns.doc('health')
    def get(self):
        '''Return OK'''
        return {'health': 'OK'}, 200


if __name__ == '__main__':
    load_dotenv(find_dotenv())
    api_key = os.environ.get("APIKEY")
    location = os.environ.get("REGION")

    print(os.environ.get("APIKEY"), os.environ.get("REGION"))

    wml_credentials = {
        "apikey": api_key,
        "url": 'https://' + location + '.ml.cloud.ibm.com'
    }

    client = APIClient(wml_credentials)
    client.set.default_space(os.environ.get("SPACE_UID"))
    deployment_uid = os.environ.get("DEPLOYMENT_UID")
    serve(app, host="0.0.0.0", port=5000)
    #app.run(debug=True, host="0.0.0.0")
示例#19
0
def main():
    with open(CRED_PATH) as stream:
        try:
            credentials = yaml.safe_load(stream)
        except yaml.YAMLError as exc:
            print(exc)

    with open(META_PATH) as stream:
        try:
            metadata = yaml.safe_load(stream)
        except yaml.YAMLError as exc:
            print(exc)

    data = pd.read_csv(DATA_PATH)

    X = data.iloc[:, :-1]
    y = data[data.columns[-1]]
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.3,
                                                        random_state=0)

    wml_credentials = {
        "url": credentials["url"],
        "apikey": credentials["apikey"]
    }

    client = APIClient(wml_credentials)
    client.spaces.list()

    SPACE_ID = credentials["space_id"]

    if "deployment_uid" in metadata.keys():
        DEPLOYMENT_UID = metadata["deployment_uid"]
        print("\nExtracting DEPLOYMENT UID from metadata file\n")

    else:
        DEPLOYMENT_UID = input("DEPLOYMENT UID: ")

    client.set.default_space(SPACE_ID)

    payload = {
        "input_data": [{
            "fields": X.columns.to_numpy().tolist(),
            "values": X_test.to_numpy().tolist(),
        }]
    }
    result = client.deployments.score(DEPLOYMENT_UID, payload)

    pred_values = np.squeeze(result["predictions"][0]["values"])
    y_pred_values = [i[0] for i in pred_values]

    def comb_eval(y, y_pred):
        cm = confusion_matrix(y, y_pred)
        acc = accuracy_score(y, y_pred)

        return {"cm": cm, "acc": acc}

    eval = comb_eval(y_test, y_pred_values)
    print(eval)

    return eval