def predict_price_wml(location,sqft,bath,bhk): getWmlCredentials() client = APIClient(wml_credentials) client.set.default_space(space_id) deployments = client.deployments.get_details() with open(app.config["SERVICES"]+'wmlDeployment.json', 'r') as wmlDeployment: cred = json.loads(wmlDeployment.read()) scoring_endpoint = client.deployments.get_scoring_href(cred) X = pd.read_csv(app.config['DATASET']+'intermediate.csv') loc_index=np.where(X.columns==location)[0][0] x=np.zeros(len(X.columns), dtype=int) x[0]=sqft x[1]=bath x[2]=bhk if loc_index >=0: x[loc_index]=1 y = [x.tolist()] z = list(list(y)) did = client.deployments.get_uid(cred) job_payload = { client.deployments.ScoringMetaNames.INPUT_DATA: [{ 'values': z }] } scoring_response = client.deployments.score(did, job_payload) return math.ceil(scoring_response['predictions'][0]['values'][0][0])
def _create_wml_client(self): tracer.debug("Creating WML client") wml_client = APIClient(self._wml_credentials) # set space before using any client function wml_client.set.default_space(self._deployment_space) tracer.debug("WML client created") return wml_client
def apicall_vehicle_loan(id): api_key = 'teuonH42NK9fs-h2kbKROGBHhiirxc6tFhyXQ7eIOcgg' location = 'us-south' wml_credentials = { "apikey": api_key, "url": 'https://' + location + '.ml.cloud.ibm.com' } from ibm_watson_machine_learning import APIClient client = APIClient(wml_credentials) space_id = 'f6f6501f-c8e1-4c4f-9c65-59ecafdcc386' client.set.default_space(space_id) #client.deployments.get_details('d8f0e83a-0f90-4afb-9022-14b6d7922191') deployment_uid = 'b85fc3cb-4824-491b-baa3-88f981ba26ba' #scoring_endpoint = 'https://private.us-south.ml.cloud.ibm.com/ml/v4/deployments/4de29f68-d30f-4f01-945d-c5483e88ec8a/predictions' scoring_payload = { "input_data": [{ "fields": [ "Loan_ID", "Gender", "Married", "Dependents", "Education", "Self_Employed", "ApplicantIncome", "CoapplicantIncome", "LoanAmount", "Loan_Amount_Term", "Credit_History", "Property_Area" ], "values": [id] }] } predictions = client.deployments.score(deployment_uid, scoring_payload) #print(predictions) result = json.dumps(predictions) #print(type(result)) #print(result) y = result.split(':') # print(y) z = y[3].split(',') # print(z) class_var = z[0][4] #print(class_var) if class_var == 'N': Class_N = z[1][2:6] #print(Class_N) class_type = 'No' pri = Class_N else: Class_Y = z[2][1:5] #print(Class_Y) class_type = 'Yes' pri = Class_Y lst = [] lst.append(class_type) lst.append(pri) return lst
def apicall_personal_loan(id): api_key = 'teuonH42NK9fs-h2kbKROGBHhiirxc6tFhyXQ7eIOcgg' location = 'us-south' wml_credentials = { "apikey": api_key, "url": 'https://' + location + '.ml.cloud.ibm.com' } from ibm_watson_machine_learning import APIClient client = APIClient(wml_credentials) space_id = 'f6f6501f-c8e1-4c4f-9c65-59ecafdcc386' client.set.default_space(space_id) #client.deployments.get_details('85280b66-0f77-40a5-b420-a2ac53ea18d0') deployment_uid = '85280b66-0f77-40a5-b420-a2ac53ea18d0' #scoring_endpoint = 'https://us-south.ml.cloud.ibm.com/ml/v4/deployments/d8f0e83a-0f90-4afb-9022-14b6d7922191/predictions' scoring_payload = { "input_data": [{ "fields": [ "age", "job_cat", "marital status_cat", "education_cat", "credit default_cat", "housing loan_cat", "Vehicle loan_cat" ], "values": [id] }] } predictions = client.deployments.score(deployment_uid, scoring_payload) #print(predictions) result = json.dumps(predictions) #print(type(result)) #print(result) y = result.split(':') # print(y) z = y[3].split(',') # print(z) class_var = z[0][3] #print(class_var) if class_var == '0': Class_N = z[1][2:6] class_type = 'No' #print(Class_N) pri = Class_N else: Class_Y = z[2][1:5] #print(Class_Y) class_type = 'Yes' pri = Class_Y lst = [] lst.append(class_type) lst.append(pri) return lst
def apicall_housing_loan(id): api_key = 'teuonH42NK9fs-h2kbKROGBHhiirxc6tFhyXQ7eIOcgg' location = 'us-south' wml_credentials = { "apikey": api_key, "url": 'https://' + location + '.ml.cloud.ibm.com' } from ibm_watson_machine_learning import APIClient client = APIClient(wml_credentials) space_id = 'f6f6501f-c8e1-4c4f-9c65-59ecafdcc386' client.set.default_space(space_id) #client.deployments.get_details('d8f0e83a-0f90-4afb-9022-14b6d7922191') deployment_uid = '44695a6b-6153-454e-ae58-f26c19fb9b95' #scoring_endpoint = 'https://private.us-south.ml.cloud.ibm.com/ml/v4/deployments/4de29f68-d30f-4f01-945d-c5483e88ec8a/predictions' scoring_payload = { "input_data": [{ "fields": ["age", "job", "marital status", "education", "credit default?"], "values": [id] }] } predictions = client.deployments.score(deployment_uid, scoring_payload) result = json.dumps(predictions) #print(type(result)) #print(result) y = result.split(':') # print(y) z = y[3].split(',') # print(z) class_var = z[0][4] #print(class_var) if class_var == 'n': Class_N = z[1][2:6] #print(Class_N) class_type = 'No' pri = Class_N else: Class_Y = z[2][1:5] #print(Class_Y) class_type = 'Yes' pri = Class_Y lst = [] lst.append(class_type) lst.append(pri) return lst
def authentication(): if os.getenv("IBMCLOUD_API_KEY"): wml_credentials = { "url": "https://us-south.ml.cloud.ibm.com", "apikey": os.environ.get("IBMCLOUD_API_KEY"), } client = APIClient(wml_credentials) # Connect to IBM cloud return client raise Exception("API_KEY environment variable not defined")
def connect_wml_service(): """ Instantiate a client using credentials """ wmlcreds = load_wml_credentials() wml_credentials = { "apikey": wmlcreds['apikey'], "url": wmlcreds['url'] } client = APIClient(wml_credentials) return(client)
def wml_scoring(space_id, deployment_id): if not request.json: abort(400) wml_credentials = WML_CREDENTIALS payload_scoring = { "input_data": [ request.json ] } wml_client = APIClient(wml_credentials) wml_client.set.default_space(space_id) records_list=[] scoring_response = wml_client.deployments.score(deployment_id, payload_scoring) return jsonify(scoring_response["predictions"][0])
def __init__(self, wml_vcap: dict) -> None: self.logger = logging.getLogger(self.__class__.__name__) self.logger.setLevel(logging.DEBUG) self.logger.info("Client authentication. URL: {}".format( wml_vcap["url"])) self.client = APIClient(wml_vcap.copy()) self.client.set.default_space(wml_vcap['space_id']) self.deployment_list = self.client.deployments.get_details( )['resources'] self.transaction_id = 'transaction-id-' + uuid.uuid4().hex self.logger.info("Transaction ID: {}".format(self.transaction_id)) self.area_action_deployment_guid = "" self.satisfaction_deployment_guid = "" self.area_action_scoring_url = "" self.satisfaction_scoring_url = "" self.update_scoring_functions() self.neutral_templates = [ "We’re sorry that you were unhappy with your experience with Cars4U. We will open a case to investigate the issue with <b>{} {}</b>. In the meantime, we’d like to offer you a <b>{}</b> on your next rental with us.", "We're very sorry for the trouble you experienced with Cars4U. We will open a case to investigate the issue with <b>{} {}</b>. In the meantime, we’d like to offer you a <b>{}</b> on your next rental with us.", "We sincerely apologize for this experience with Cars4U. We will open a case to investigate the issue with <b>{} {}</b>. In the meantime, we’d like to offer you a <b>{}</b> on your next rental with us.", "I am very disappointed to hear about your experience with Cars4U. We will open a case to investigate the issue with <b>{} {}</b>. In the meantime, we’d like to offer you a <b>{}</b> on your next rental with us." ] self.negative_templates = [ "We’re sorry that you were unhappy with your experience with Cars4U. We will open a case to investigate the issue with <b>{} {}</b>. Our customer agent will contact you shortly.", "We're very sorry for the trouble you experienced with Cars4U. We will open a case to investigate the issue with <b>{} {}</b>. Our customer agent will contact you shortly.", "We sincerely apologize for this experience with Cars4U. We will open a case to investigate the issue with <b>{} {}</b>. Our customer agent will contact you shortly.", "I am very disappointed to hear about your experience with Cars4U. We will open a case to investigate the issue with <b>{} {}</b>. Our customer agent will contact you shortly." ] self.positive_templates = [ "We are very happy to have provided you with such a positive experience!", "We are glad to hear you had such a great experience! ", "We appreciate your positive review about your recent experience with us!" ]
def set_connection(self, username=None, password=None, url=None): ''' Instantiate WML and WOS python clients. Uses the same CP4D credentials for both WML and WOS, meaning both services must be on same CP4D cluster. Passed values override ENV vars which override default values. ''' _credentials = {"username": username, "password": password, "url": url} # check for env vars if args not passed env_keys = dict( zip(_credentials.keys(), ['CP4D_USERNAME', "CP4D_PASSWORD", "CP4D_URL"])) _credentials = { k: v if v else os.environ.get(env_keys[k]) for k, v in _credentials.items() } # get default values if args not passed and env vars not present defaults = { "username": "******", "password": "******", "url": "https://zen-cpd-zen.apps.pwh.ocp.csplab.local" } _credentials = { k: v if v else defaults[k] for k, v in _credentials.items() } self._credentials = _credentials self.wos_client = APIClient4ICP(self._credentials) self._credentials['instance_id'] = 'wml_local' self._credentials['version'] = '3.0.1' self.wml_client = APIClient(self._credentials)
def get_project_space(credentials): """Returns the notebooks project space GUID. Argument: credentials -- the credentials to be used to connect to WML Call it only in the notebook where the topology is created, not at Streams runtime. Models and deployments are placed in projects space if no other space is given at their creation time. The space GUID is needed to instantiate a WMLOnlineScoring object. """ from project_lib import Project wml_client = APIClient(copy.copy(credentials)) spaces = wml_client.spaces.get_details()["resources"] project = Project.access() project_guid = project.get_metadata()["metadata"]["guid"] # get the space associated with the project project_space=None for space in spaces: for tag in space["entity"]["tags"]: if tag["value"]=="dsx-project."+project_guid: project_space = space["metadata"]["id"] return project_space
def __init__(self, wml_credentials, space_name: Optional[str] = None, deployed_model_name: Optional[str] = None, deployment_id: Optional[str] = None, default_max_oaas_time_limit_sec: Optional[int] = None, default_max_run_time_sec: Optional[int] = 600, monitor_loop_delay_sec: int = 5): """Initialize the interface object. If the deployment_uuid is specified (WS Cloud), the space_name and model_name are optional. TODO: test on IBM Cloud If no deployment_uuid (CPD), specify both the model and space name. Will find UUID based on space and deployed model id. In CPDv3.5, always define the space_name, in combination with either the model_name, or the deployment_id. Providing the deployment_id is more efficient. If proving the name, the DeployedDOModel will look for the DeploymentID based on the model name. Time limits: - Both are optional: if no value, no time-limit is imposed - These are default values. Can be overridden in solve method Args: deployed_model_name (str): name of deployed model (CPD) space_name (str): name of deployment space (CPD) deployment_id (str): Deployment UUID (WS Cloud) default_max_oaas_time_limit_sec (int): default oaas.timeLimit in seconds. default_max_run_time_sec (int): default maximum run time in seconds. Includes the queueing time. monitor_loop_delay_sec (int): delay in seconds in monitoring/polling loop """ # Inputs self.wml_credentials = wml_credentials self.space_name = space_name self.model_name = deployed_model_name self.deployment_id = deployment_id self.default_max_oaas_time_limit_sec = default_max_oaas_time_limit_sec # In seconds! None implies no time timit. Note the original oaas.timeLimit is in milli-seconds! self.default_max_run_time_sec = default_max_run_time_sec #60 # In seconds: Job will be deleted. None implies no time timit. self.monitor_loop_delay_sec = monitor_loop_delay_sec # In seconds # self.time_limit = 600 # in milliseconds. timeLimit for DO model cancel # self.inputs = inputs # self.debug = debug # self.debug_file_dir = debug_file_dir # self.log_file_name = log_file_name # State: self.solve_status = None self.objective = None self.solve_details = {} self.outputs = {} self.run_time = 0 # Run-time of job in seconds self.job_details = None # Setup and connection to deployed model self.client = APIClient(wml_credentials) # space_id = [x['metadata']['id'] for x in self.client.spaces.get_details()['resources'] if # x['entity']['name'] == space_name][0] space_id = self.get_space_id(space_name) self.client.set.default_space( space_id) # Also required when using deployment_id if self.deployment_id is None: # space_id = [x['metadata']['id'] for x in self.client.spaces.get_details()['resources'] if # x['entity']['name'] == space_name][0] # self.client.set.default_space(space_id) # self.deployment_uid = [x['metadata']['guid'] for x in self.client.deployments.get_details()['resources'] if # x['entity']['name'] == model_name][0] self.deployment_id = self.get_deployment_id(deployed_model_name)
def deployWMLModel(): ''' Step 1: Build the Linear Regression Model ''' #importing the dataset df1 = pd.read_csv(app.config["DATASET"]+'Bengaluru_House_Data.csv') df2 = df1.drop(['area_type', 'society', 'balcony', 'availability'], axis='columns') df3 = df2.dropna() df3['bhk'] = df3['size'].apply(lambda x: int(x.split(' ')[0])) df3[df3.bhk > 20] def is_float(x): try: float(x) except: return False return True df3[~df3['total_sqft'].apply(is_float)] def convert_sqft_to_num(x): tokens = x.split('-') if len(tokens) == 2: return(float(tokens[0])+float(tokens[1]))/2 try: return float(x) except: return None convert_sqft_to_num('2166') convert_sqft_to_num('2100-3000') df4 = df3.copy() df4['total_sqft'] = df4['total_sqft'].apply(convert_sqft_to_num) #now we will start with feature engineering techniques and dimensionality reduction techniques df5 = df4.copy() #now we will create price per sqft df5['price_per_sqft'] = df5['price']*100000/df5['total_sqft'] df5.location = df5.location.apply(lambda x: x.strip()) location_stats = df5.groupby('location')['location'].agg( 'count').sort_values(ascending=False) location_stats_less_than_10 = location_stats[location_stats <= 10] df5.location = df5.location.apply( lambda x: 'other'if x in location_stats_less_than_10 else x) df6 = df5[~(df5.total_sqft/df5.bhk < 300)] def remove_pps_outliers(df): df_out = pd.DataFrame() for key, subdf in df.groupby('location'): m = np.mean(subdf.price_per_sqft) st = np.std(subdf.price_per_sqft) reduced_df = subdf[(subdf.price_per_sqft > (m-st)) & (subdf.price_per_sqft <= (m+st))] df_out = pd.concat([df_out, reduced_df], ignore_index=True) return df_out df7 = remove_pps_outliers(df6) def remove_bhk_outliers(df): exclude_indices = np.array([]) for location, location_df in df.groupby('location'): bhk_stats = {} for bhk, bhk_df in location_df.groupby('bhk'): bhk_stats[bhk] = { 'mean': np.mean(bhk_df.price_per_sqft), 'std': np.std(bhk_df.price_per_sqft), 'count': bhk_df.shape[0] } for bhk, bhk_df in location_df.groupby('bhk'): stats = bhk_stats.get(bhk-1) if stats and stats['count'] > 5: exclude_indices = np.append( exclude_indices, bhk_df[bhk_df.price_per_sqft < (stats['mean'])].index.values) return df.drop(exclude_indices, axis='index') df8 = remove_bhk_outliers(df7) df9 = df8[df8.bath < df8.bhk+2] df10 = df9.drop(['size', 'price_per_sqft'], axis='columns') dummies = pd.get_dummies(df10.location) df11 = pd.concat([df10, dummies], axis='columns') df11 = df11.drop(['other'], axis='columns') df12 = df11.drop('location', axis='columns') #will define dependent variable for training X = df12.drop('price', axis='columns') y = df12.price from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=10) from sklearn.linear_model import LinearRegression lr_clf = LinearRegression() lr_clf.fit(x_train, y_train) lr_clf.score(x_test, y_test) print("Model Built Successfully") ''' Deploy the Model to Watson Machine Learning ''' getWmlCredentials() client = APIClient(wml_credentials) client.set.default_space(space_id) sofware_spec_uid = client.software_specifications.get_id_by_name( "default_py3.7") metadata = { client.repository.ModelMetaNames.NAME: 'Bangalore House Price Prediction', client.repository.ModelMetaNames.TYPE: "default_py3.7", client.repository.ModelMetaNames.SOFTWARE_SPEC_UID: sofware_spec_uid } published_model = client.repository.store_model( lr_clf, meta_props=metadata) published_model_uid = client.repository.get_model_uid(published_model) model_details = client.repository.get_details(published_model_uid) # print(json.dumps(model_details, indent=2)) models_details = client.repository.list_models() loaded_model = client.repository.load(published_model_uid) test_predictions = loaded_model.predict(x_test[:10]) deploy_meta = { client.deployments.ConfigurationMetaNames.NAME: 'Deployment of Bangalore House Price Prediction', client.deployments.ConfigurationMetaNames.ONLINE: {} } created_deployment = client.deployments.create( published_model_uid, meta_props=deploy_meta) with open(app.config["SERVICES"]+'wmlDeployment.json', 'w') as fp: json.dump(created_deployment, fp, indent=2) print(json.dumps(created_deployment, indent=2)) print("Model Successfully Deployed..") with open(app.config["SERVICES"]+'wmlDeployment.json') as temp: cred = json.loads(temp.read()) model_id = cred["entity"]["asset"]["id"] return jsonify({"status": "Deployed, Model ID: "+model_id})
def my_form_post(): state = request.form['state'] account_length = request.form['account_length'] area_code = request.form['area_code'] international_plan = request.form['international_plan'] voice_mail_plan = request.form['voice_mail_plan'] number_vmail_messages = request.form['number_vmail_messages'] total_day_minutes = request.form['total_day_minutes'] total_day_calls = request.form['total_day_calls'] total_day_charge = request.form['total_day_charge'] total_eve_minutes = request.form['total_eve_minutes'] total_eve_calls = request.form['total_eve_calls'] total_eve_charge = request.form['total_eve_charge'] total_night_minutes = request.form['total_night_minutes'] total_night_calls = request.form['total_night_calls'] total_night_charge = request.form['total_night_charge'] total_intl_minutes = request.form['total_intl_minutes'] total_intl_calls = request.form['total_intl_calls'] total_intl_charge = request.form['total_intl_charge'] customer_service_calls = request.form['customer_service_calls'] # wml authentication from ibm_watson_machine_learning import APIClient wml_credentials = { "url": URL, # the default url for wml services in the US is https://us-south.ml.cloud.ibm.com "apikey": API_KEY } client = APIClient(wml_credentials) print(client.version) # deployment space space_id = SPACE_ID client.set.default_space(space_id) # deployment id deployment_id = DEPLOYMENT_ID # test deployment with test data scoring_data = { client.deployments.ScoringMetaNames.INPUT_DATA: [{ "fields": [ "state", "account length", "area code", "international plan", "voice mail plan", "number vmail messages", "total day minutes", "total day calls", "total day charge", "total eve minutes", "total eve calls", "total eve charge", "total night minutes", "total night calls", "total night charge", "total intl minutes", "total intl calls", "total intl charge", "customer service calls" ], "values": [[ state, account_length, area_code, international_plan, voice_mail_plan, number_vmail_messages, total_day_minutes, total_day_calls, total_day_charge, total_eve_minutes, total_eve_calls, total_eve_charge, total_night_minutes, total_night_calls, total_night_charge, total_intl_minutes, total_intl_calls, total_intl_charge, customer_service_calls ]] }] } print(scoring_data) predictions = client.deployments.score(deployment_id, scoring_data) print( "The Prediction output regarding customer churn will be displayed in this format 1 for True or 0 for False: \n ", predictions) # import values.py from prediction import values values.predictionValues(predictions) # obtain variables from values.py prediction_churn_true_or_false = values.predictionValues.prediction_churn_true_or_false prediction_churn_true_or_false_percentage_one = values.predictionValues.prediction_churn_true_or_false_percentage_one prediction_churn_true_or_false_percentage_two = values.predictionValues.prediction_churn_true_or_false_percentage_two # import rules.py from prediction import rules rules.predictionRules(prediction_churn_true_or_false, prediction_churn_true_or_false_percentage_one, prediction_churn_true_or_false_percentage_two) # obtain variables from rules.py churn_result = rules.predictionRules.churn_result print(churn_result) churn_result_percentage = rules.predictionRules.churn_result_percentage print(churn_result_percentage) return render_template('result.html', state=state, account_length=account_length, area_code=area_code, international_plan=international_plan, voice_mail_plan=voice_mail_plan, number_vmail_messages=number_vmail_messages, total_day_minutes=total_day_minutes, total_day_calls=total_day_calls, total_day_charge=total_day_charge, total_eve_minutes=total_eve_minutes, total_eve_calls=total_eve_calls, total_eve_charge=total_eve_charge, total_night_minutes=total_night_minutes, total_night_calls=total_night_calls, total_night_charge=total_night_charge, total_intl_minutes=total_intl_minutes, total_intl_calls=total_intl_calls, total_intl_charge=total_intl_charge, customer_service_calls=customer_service_calls, predictions=predictions, churn_result=churn_result, churn_result_percentage=churn_result_percentage)
def __init__(self, wml_credentials: Dict, model_name: str, scenario_name: str, space_name: str, package_paths: List[str] = [], file_paths: List[str] = [], deployment_name: str = 'xxx', deployment_description: str = 'xxx', project=None, tmp_dir: str = None): """ Support for custom packages: 1. For packages in conda/PyPI: through the yaml. 2. For other custom packages: make sure you have the zip/gz package file (.whl doesn't (yet) work) Specify the path(s) to the zip/gz files in package_paths. Yaml and multiple package files can be combined :param wml_credentials :param model_name (str): name of DO Experiment :param scenario_name (str): name of scenario with the Python model :param space_name (str): name of deployment space :param package_paths (List[str]): list paths to zip/gz packages that will be included. :param file_paths (List[str]): list paths to files that will be included along side the model. Components can be imported using `from my_file import MyClass` :param space_name (str): name of deployment space :param project (project_lib.Project): for WS Cloud, not required for CP4D on-prem. See ScenarioManager(). Used to connect to DO Experiment. :param tmp_dir (str): path to directory where the intermediate files will be written. Make sure this exists. Can be used for debugging to inspect the files. If None, will use `tempfile` to generate a temporary folder that will be cleaned up automatically. """ self.wml_credentials = wml_credentials self.project = project self.model_name = model_name self.scenario_name = scenario_name # self.space_name = space_name self.deployment_name = deployment_name self.deployment_description = deployment_description self.package_paths = package_paths self.file_paths = file_paths self.tmp_dir = tmp_dir # Initialize clients self.client = APIClient(wml_credentials) space_id = self.guid_from_space_name( space_name) # TODO: catch error if space_name cannot be found? result = self.client.set.default_space(space_id) # print(f"client space_id = {space_id}, result={result}") self.scenario_manager = ScenarioManager(model_name=model_name, scenario_name=scenario_name, project=project) # State self.model_uid = None self.deployment_uid = None # Code templates self.main_header_py = \ """ from docplex.util.environment import get_environment from os.path import splitext import pandas from six import iteritems def get_all_inputs(): '''Utility method to read a list of files and return a tuple with all read data frames. Returns: a map { datasetname: data frame } ''' result = {} env = get_environment() for iname in [f for f in os.listdir('.') if splitext(f)[1] == '.csv']: with env.get_input_stream(iname) as in_stream: df = pandas.read_csv(in_stream) datasetname, _ = splitext(iname) result[datasetname] = df return result def write_all_outputs(outputs): '''Write all dataframes in ``outputs`` as .csv. Args: outputs: The map of outputs 'outputname' -> 'output df' ''' for (name, df) in iteritems(outputs): csv_file = '%s.csv' % name print(csv_file) with get_environment().get_output_stream(csv_file) as fp: if sys.version_info[0] < 3: fp.write(df.to_csv(index=False, encoding='utf8')) else: fp.write(df.to_csv(index=False).encode(encoding='utf8')) if len(outputs) == 0: print("Warning: no outputs written") def __iter__(self): return 0 # Load CSV files into inputs dictionnary inputs = get_all_inputs() outputs = {} ########################################################### # Insert model below ########################################################### """ self.main_footer_py = \ """ ########################################################### # Generate output files write_all_outputs(outputs) """ self.yaml = \ """
def main(argv): cplex_file = "diet.lp" try: opts, args = getopt.getopt(argv, "hf:", ["ffile="]) except getopt.GetoptError: print('cplexrunonwml.py -f <file>') sys.exit(2) for opt, arg in opts: if opt == '-h': print('cplexrunonwml.py -f <file>') sys.exit() elif opt in ("-f", "--ffile"): cplex_file = arg print('CPLEX file is', cplex_file) basename = cplex_file.split('.')[0] model_name = basename + "_model" deployment_name = basename + "_deployment" space_name = basename + "_space" print("Creating WML Client") client = APIClient(wml_credentials) def guid_from_space_name(client, name): space = client.spaces.get_details() for item in space['resources']: if item['entity']["name"] == name: return item['metadata']['id'] return None space_id = guid_from_space_name(client, space_name) if space_id == None: print("Creating space") cos_resource_crn = 'crn:v1:bluemix:public:cloud-object-storage:global:a/7f92ce1185a3460579ce2c76a03b1a67:69cd8af5-5427-4efd-9010-7ad13ac3e18a::' instance_crn = 'crn:v1:bluemix:public:pm-20:us-south:a/7f92ce1185a3460579ce2c76a03b1a67:82c6ef26-4fd2-40c4-95d3-abe3c3ad19fd::' metadata = { client.spaces.ConfigurationMetaNames.NAME: space_name, client.spaces.ConfigurationMetaNames.DESCRIPTION: space_name + ' description', client.spaces.ConfigurationMetaNames.STORAGE: { "type": "bmcos_object_storage", "resource_crn": cos_resource_crn }, client.spaces.ConfigurationMetaNames.COMPUTE: { "name": "existing_instance_id", "crn": instance_crn } } space = client.spaces.store(meta_props=metadata) space_id = client.spaces.get_id(space) print("space_id:", space_id) client.set.default_space(space_id) print("Getting deployment") deployments = client.deployments.get_details() deployment_uid = None for res in deployments['resources']: if res['entity']['name'] == deployment_name: deployment_uid = res['metadata']['id'] print("Found deployment", deployment_uid) break if deployment_uid == None: print("Creating model") import tarfile def reset(tarinfo): tarinfo.uid = tarinfo.gid = 0 tarinfo.uname = tarinfo.gname = "root" return tarinfo tar = tarfile.open("model.tar.gz", "w:gz") tar.add(cplex_file, arcname=cplex_file, filter=reset) tar.close() print("Storing model") model_metadata = { client.repository.ModelMetaNames.NAME: model_name, client.repository.ModelMetaNames.DESCRIPTION: model_name, client.repository.ModelMetaNames.TYPE: "do-cplex_12.10", client.repository.ModelMetaNames.SOFTWARE_SPEC_UID: client.software_specifications.get_uid_by_name("do_12.10") } model_details = client.repository.store_model( model='./model.tar.gz', meta_props=model_metadata) model_uid = client.repository.get_model_uid(model_details) print(model_uid) print("Creating deployment") deployment_props = { client.deployments.ConfigurationMetaNames.NAME: deployment_name, client.deployments.ConfigurationMetaNames.DESCRIPTION: deployment_name, client.deployments.ConfigurationMetaNames.BATCH: {}, client.deployments.ConfigurationMetaNames.HARDWARE_SPEC: { 'name': 'S', 'nodes': 1 } } deployment_details = client.deployments.create( model_uid, meta_props=deployment_props) deployment_uid = client.deployments.get_uid(deployment_details) print('deployment_id:', deployment_uid) print("Creating job") import pandas as pd solve_payload = { client.deployments.DecisionOptimizationMetaNames.SOLVE_PARAMETERS: { 'oaas.logAttachmentName': 'log.txt', 'oaas.logTailEnabled': 'true', 'oaas.includeInputData': 'false', 'oaas.resultsFormat': 'JSON' }, client.deployments.DecisionOptimizationMetaNames.INPUT_DATA: [{ "id": cplex_file, "content": getfileasdata(cplex_file) }], client.deployments.DecisionOptimizationMetaNames.OUTPUT_DATA: [{ "id": ".*\.json" }, { "id": ".*\.txt" }] } job_details = client.deployments.create_job(deployment_uid, solve_payload) job_uid = client.deployments.get_job_uid(job_details) print('job_id', job_uid) from time import sleep while job_details['entity']['decision_optimization']['status'][ 'state'] not in ['completed', 'failed', 'canceled']: print( job_details['entity']['decision_optimization']['status']['state'] + '...') sleep(5) job_details = client.deployments.get_job_details(job_uid) print(job_details['entity']['decision_optimization']['status']['state']) for output_data in job_details['entity']['decision_optimization'][ 'output_data']: if output_data['id'].endswith('csv'): print('Solution table:' + output_data['id']) solution = pd.DataFrame(output_data['values'], columns=output_data['fields']) solution.head() else: print(output_data['id']) if "values" in output_data: output = output_data['values'][0][0] else: if "content" in output_data: output = output_data['content'] output = output.encode("UTF-8") output = base64.b64decode(output) output = output.decode("UTF-8") print(output) with open(output_data['id'], 'wt') as file: file.write(output)
def put(self, id): '''Update a temperature given its identifier''' return TEMP.update(id, api.payload) @ns.route('/healthz') class Health(Resource): '''Returns "OK" when application is ready''' @ns.doc('health') def get(self): '''Return OK''' return {'health': 'OK'}, 200 if __name__ == '__main__': load_dotenv(find_dotenv()) api_key = os.environ.get("APIKEY") location = os.environ.get("REGION") print(os.environ.get("APIKEY"), os.environ.get("REGION")) wml_credentials = { "apikey": api_key, "url": 'https://' + location + '.ml.cloud.ibm.com' } client = APIClient(wml_credentials) client.set.default_space(os.environ.get("SPACE_UID")) deployment_uid = os.environ.get("DEPLOYMENT_UID") serve(app, host="0.0.0.0", port=5000) #app.run(debug=True, host="0.0.0.0")
def main(): with open(CRED_PATH) as stream: try: credentials = yaml.safe_load(stream) except yaml.YAMLError as exc: print(exc) with open(META_PATH) as stream: try: metadata = yaml.safe_load(stream) except yaml.YAMLError as exc: print(exc) data = pd.read_csv(DATA_PATH) X = data.iloc[:, :-1] y = data[data.columns[-1]] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) wml_credentials = { "url": credentials["url"], "apikey": credentials["apikey"] } client = APIClient(wml_credentials) client.spaces.list() SPACE_ID = credentials["space_id"] if "deployment_uid" in metadata.keys(): DEPLOYMENT_UID = metadata["deployment_uid"] print("\nExtracting DEPLOYMENT UID from metadata file\n") else: DEPLOYMENT_UID = input("DEPLOYMENT UID: ") client.set.default_space(SPACE_ID) payload = { "input_data": [{ "fields": X.columns.to_numpy().tolist(), "values": X_test.to_numpy().tolist(), }] } result = client.deployments.score(DEPLOYMENT_UID, payload) pred_values = np.squeeze(result["predictions"][0]["values"]) y_pred_values = [i[0] for i in pred_values] def comb_eval(y, y_pred): cm = confusion_matrix(y, y_pred) acc = accuracy_score(y, y_pred) return {"cm": cm, "acc": acc} eval = comb_eval(y_test, y_pred_values) print(eval) return eval