class NLC(object): """ Wrapper to a Natural Language Classifier via the `Watson developer cloud Python SDK <https://github.com/watson-developer-cloud/python-sdk>`. """ def __init__(self, url, username, password, classifier_id, corpus): self.nlc = NaturalLanguageClassifier(url=url, username=username, password=password) self.classifier_id = classifier_id self.corpus = corpus def __repr__(self): return "NLC: %s" % self.classifier_id def ask(self, question): classification = self.nlc.classify(self.classifier_id, question) class_name = classification["classes"][0]["class_name"] confidence = classification["classes"][0]["confidence"] return self.corpus.loc[class_name][ANSWER], confidence def query(self, question): classification = self.nlc.classify(self.classifier_id, question) class_name = classification["classes"][1]["class_name"] #confidence = classification["classes"][1]["confidence"] return class_name#, confidence
def getClassificationModel2(email): preprocessing_group = identify_email_group(email) if preprocessing_group == 'email_group_1': tempString = preprocessGroup1(email.text_body) outputString = tempString[:1000] #print(outputString) natural_language_classifier = NaturalLanguageClassifierV1( username=WATSON_USERNAME, password=WATSON_PASSWORD) classification = natural_language_classifier.classify(CLASSIFIER_ID_MODEL2, outputString) top_classification = classification['top_class'] temp_classes = classification['classes'] for i in temp_classes: if i['class_name'] == top_classification: top_classification_confidence = i['confidence'] classification_output = [top_classification, top_classification_confidence] elif preprocessing_group == 'email_group_2': tempString = preprocessGroup2(email.text_body) outputString = tempString[:1000] #print(outputString) natural_language_classifier = NaturalLanguageClassifierV1( username=WATSON_USERNAME, password=WATSON_PASSWORD) classification = natural_language_classifier.classify(CLASSIFIER_ID_MODEL2, outputString) top_classification = classification['top_class'] temp_classes = classification['classes'] for i in temp_classes: if i['class_name'] == top_classification: top_classification_confidence = i['confidence'] classification_output = [top_classification, top_classification_confidence] else: classification_output = ['not recognized', 0.00] return classification_output
def classifier_status(url, username, password, classifier_ids): n = NaturalLanguageClassifier(url=url, username=username, password=password) for classifier_id in classifier_ids: status = n.status(classifier_id) print("%s: %s" % (status["status"], status["status_description"]))
def train_nlc(url, username, password, truth, name): logger.info("Train model %s with %d instances" % (name, len(truth))) with tempfile.TemporaryFile() as training_file: # NLC cannot handle newlines. truth[QUESTION] = truth[QUESTION].str.replace("\n", " ") to_csv(training_file, truth[[QUESTION, ANSWER_ID]], header=False, index=False) training_file.seek(0) nlc = NaturalLanguageClassifier(url=url, username=username, password=password) r = nlc.create(training_data=training_file, name=name) logger.info(pretty_print_json(r)) return r["classifier_id"]
class Watson_api(): def __init__(self): self.fname = "" self.modelSearchList = ModelSearchList() self.text_data = [] self.target_label = [] self.watson_crediantial = watson_key() #self.watson_classifier = self.watson_crediantial.classifier_twitter_classfier #self.watson_classifier = self.watson_crediantial.classifier_twitter_hash_classfier #self.watson_classifier = self.watson_crediantial.classifier_twitter_unblance_keyword_classfier #self.watson_classifier = self.watson_crediantial.twitter_priority_classfier self.watson_classifier = self.watson_crediantial.twitter_category_classfier self.natural_language_classifier = NaturalLanguageClassifier(username=self.watson_crediantial.username, password=self.watson_crediantial.password) #print(json.dumps(self.natural_language_classifier.list(), indent=2)) def parse_args(self): p = ArgumentParser(description='Encoder-decoder neural machine trainslation') p.add_argument('data', help='[in] data') args = p.parse_args() return args def train(self): # create a classifier with open('../resources/weather_data_train.csv', 'rb') as training_data: print(json.dumps(self.natural_language_classifier.create(training_data=training_data, name='weather2'), indent=2)) def __read_data(self): for line in open(self.fname, "r"): split_line = line.split(",") self.text_data.append(split_line[0].strip()) self.target_label.append(self.modelSearchList.search_category_dictionary[split_line[1].strip()]) def predict(self, args): # replace 47C164-nlc-243 with your classifier id status = self.natural_language_classifier.status(self.watson_classifier) self.fname = args.data self.__read_data() predict_id = [] #print (json.dumps(status, indent=2, ensure_ascii=False)) for i in range(len(self.text_data)): classes = self.natural_language_classifier.classify(self.watson_classifier, self.text_data[i]) class_id = self.modelSearchList.search_category_dictionary[classes["classes"][0]["class_name"].replace("\"", "").replace("\"", "")] predict_id.append(class_id) print(self.target_label) print(predict_id) f1_score_twitter = f1_score(self.target_label, predict_id, average='macro') print("----F measure-----") print(f1_score_twitter)
def nlc_router_train(url, username, password, oracle_out, path, all_correct): """ NLC Training on the oracle experiment output to determine which system(NLC or Solr) should answer particular question. 1. Splitting up the oracle experiment output data into 8 equal training records and testing records. This is to ensure 8-fold cross validation of the data-set. All training and Testing files will be stored at the "path" 2. Perform NLC training on the all 8 training set simultaneously and returns list of classifier ids as json file in the working directory :param url: URL of NLC instance :param username: NLC Username :param password: NLC password :param oracle_out: file created by oracle experiment :param path: directory path to save intermediate results :param all_correct: optional boolean parameter to train with only correct QA pairs :return: list of classifier ids by NLC training """ ensure_directory_exists(path) sys_name = oracle_out[SYSTEM][0] oracle_out[QUESTION] = oracle_out[QUESTION].str.replace("\n", " ") kfold_split(oracle_out, path, NLC_ROUTER_FOLDS, True) classifier_list = [] list = [] for x in range(0, NLC_ROUTER_FOLDS): train = pandas.read_csv(os.path.join(path, "Train{0}.csv".format(str(x)))) if all_correct: logger.info("Training only on CORRECT examples.") # Ignore records from training which are not correct train = train[train[CORRECT]] train = train[train[IN_PURVIEW]] train = train[[QUESTION, ANSWERING_SYSTEM]] logger.info("Training set size = {0}".format(str(len(train)))) with tempfile.TemporaryFile() as training_file: to_csv(training_file, train[[QUESTION, ANSWERING_SYSTEM]], header=False, index=False) training_file.seek(0) nlc = NaturalLanguageClassifier(url=url, username=username, password=password) classifier_id = nlc.create(training_data=training_file, name="{0}_fold_{1}".format(str(sys_name), str(x))) classifier_list.append(classifier_id["classifier_id"].encode("utf-8")) list.append({classifier_id["name"].encode("utf-8"): classifier_id["classifier_id"].encode("utf-8")}) logger.info(pretty_print_json(classifier_id)) pretty_print_json(classifier_id) with open(os.path.join(path, 'classifier.json'), 'wb') as f: json.dump(list, f) return classifier_list
class NLClassifier(object): def __init__(self, username, password, classifier): # Setup Watson SDK self.natural_language_classifier = NLC(username=username,password=password) # Classifier information self.classifier = {} self.classifier['name'] = classifier['name'] self.classifier['training_file'] = classifier['training_file'] c = self.natural_language_classifier.list_classifiers() if any(d['name'] == self.classifier['name'] for d in c['classifiers'] ): self.classifier['id'] = [ d['classifier_id'] for d in c['classifiers'] if d['name'] == self.classifier['name'] ][0] print 'Found classifier id %s ' % self.classifier['id'] self.classifier['status'] = self.natural_language_classifier.status(self.classifier['id'])['status'] else: print 'No classifier found, creating new from training set' self.classifier['id'] = self.create_classifier() print 'New classifier id: %s ' % self.classifier['id'] ### Method to train the Watson Natural Language Classifier # The training set is delivered as a CSV file as specified in the Developer Guide # https://www.ibm.com/watson/developercloud/doc/nl-classifier/data_format.shtml def create_classifier(self): training_data = open(self.classifier['training_file'], 'rb') training_result = self.natural_language_classifier.create( training_data=training_data, name=self.classifier['name'] ) if training_result['status'] == "Training": self.classifier['status'] = "Training" return training_result['classifier_id'] else: print training_result return "Error" def classify(self,text): # Typically in a production system Watson NLC will be fully trained and verified by a data scientist before the system is ever # exposed in production. However because this is a demo application where Watson NLC is trained at application deployment time, # we will need to have a check to verify that the training is completed. if self.classifier['status'] == "Training": r = self.natural_language_classifier.status(self.classifier['id']) if r['status'] == "Training": return {"error": "Classifier still in training. Please try again in a few minutes."} elif r['status'] == "Available": self.classifier['status'] = 'Available' else: return {"error": "Unknown status for classifier", "message": r['status']} return self.natural_language_classifier.classify(self.classifier['id'], text)
def deleteClassifier(classifier_id,username,password): natural_language_classifier = NaturalLanguageClassifierV1( username=username, password=password) classifier_info = natural_language_classifier.delete_classifier(classifier_id) return classifier_info
def nlc(): try: logging.info('entered into nlc function') natural_language_classifier = NaturalLanguageClassifierV1( username='******', password='******') classes = [] classifiers = natural_language_classifier.list() #print(json.dumps(classifiers, indent=2)) status = natural_language_classifier.status('359f41x201-nlc-65743') #print(json.dumps(status, indent=2)) df = pd.read_csv( paths['output_dir'] + 'review_text_%s.txt' % retrieved_time.replace(':', '-'), sep='|') if (status['status'] == 'Available' and len(df.review_text) > 0): for i in range(0, len(df.review_text), 1): line = df.review_text[i] classes.append( natural_language_classifier.classify( '359f41x201-nlc-65743', line.decode("ISO-8859-1"))) with open('yelp_{}_{}.json'.format('Resto2', 'HOU'), 'w') as f: json.dump(classes, f, indent=5) else: logging.info('No Data Available') return 1 except Exception as e: logging.info('error in nlc function %s' % str(e))
def cherk(): tweet = request.args.get("tweet") natural_language_classifier = NaturalLanguageClassifierV1( username='******', password='******') classes = natural_language_classifier.classify('b8f3cex446-nlc-797', tweet) print(json.dumps(classes, indent=2))
def nlc(self, nlc_input): # try: logging.info('entered into nlc function') natural_language_classifier = NaturalLanguageClassifierV1( username=spark_conf.classifier_input['username'], password=spark_conf.classifier_input['password']) classes = [] classifiers = natural_language_classifier.list() #print(simplejson.dumps(classifiers, indent=2)) status = natural_language_classifier.status('359f41x201-nlc-65743') #print(simplejson.dumps(status, indent=2)) #df = pd.read_csv(spark_conf.classifier_input['input_file']+'reviews_%s.txt'% spark_conf.retrieved_time.replace(':','-'),sep='|') df = nlc_input.toPandas() # df1 = df.'review_text'.unique() # print df df2 = df.copy(deep=True) if (status['status'] == 'Available' and len(df2.review_text) > 0): for i in range(0, len(df2.review_text), 1): line = df2.review_text[i] # print line#ISO-8859-1 #classes.append(natural_language_classifier.classify('359f41x201-nlc-65743',line.encode("ISO-8859-1"))) classes.append( natural_language_classifier.classify( '359f41x201-nlc-65743', line)) with open( spark_conf.file_path['data_update_path'] + 'review_{}.json'.format('classifier'), 'w') as f: simplejson.dump(classes, f, indent=5) return 1 else: logging.info("NO DATA AVAILABLE") return 0
def get_illness(request): natural_language_classifier = NaturalLanguageClassifierV1(username='', password='') classes = natural_language_classifier.classify('', request) print(type(classes)) return classes['classes']
def getTrainingStatus(classifier_id,username,password): natural_language_classifier = NaturalLanguageClassifierV1( username=username, password=password) classifier_info = natural_language_classifier.get_classifier(classifier_id) return classifier_info
def post_nlc(): input_text = request.form.get('data') # nlcアクセス natural_language_classifier = NaturalLanguageClassifierV1(username='', password='') if natural_language_classifier: classes = natural_language_classifier.classify('c7fa4ax22-nlc-10554', input_text) json_file_path = './restaurants.json' with open(json_file_path) as json_file: data = json.load(json_file) restaurants = data['restaurants'] for restaurant in restaurants: if classes['top_class'] in restaurant['class']: restaurant_name = restaurant['name'] restaurant_url = restaurant['url'] restaurant_image = restaurant['image'] restaurant_location = restaurant['location'] restaurant_budget = restaurant['budget'] response = { 'restaurant_name': restaurant_name, 'restaurant_url': restaurant_url, 'image_url': restaurant_image, 'input_text': input_text, 'location': restaurant_location, 'budget': restaurant_budget, } return jsonify(response)
def initNLC(self): credentials = self.getPixieAppEntity() if credentials is None: return "<div>You must provide credentials to your Watson NLC Service</div>" self.natural_language_classifier = NaturalLanguageClassifierV1( username=credentials['username'], password=credentials['password'])
def __initialize(self, credential_file_path): if not credential_file_path: credential_file_path = os.path.expanduser(DEFAULT_CREDENTIAL_PATH) with open(credential_file_path, 'r') as credential_file: credential = json.load(credential_file) self.__nlc = NaturalLanguageClassifier(url=credential['url'], username=credential['username'], password=credential['password'])
def __init__(self, url, username, password, classifier_id): self.creds['url'] = url self.creds['username'] = username self.creds['password'] = password self.api_ids['classifier_id'] = classifier_id self.nlc = NaturalLanguageClassifierV1(username=self.creds['username'], password=self.creds['password'])
def get_illness(request): natural_language_classifier = NaturalLanguageClassifierV1(username='', password='') classes = natural_language_classifier.classify('', request) illness = classes['classes'][0]['class_name'] confidence = classes['classes'][0]['confidence'] return illness, confidence
def nlcQuery(query): natural_language_classifier = NaturalLanguageClassifierV1( username=cred.NLCUSER, password=cred.NLCPASS) #status = natural_language_classifier.status(cred.NLCCLUSTER) try: classes = natural_language_classifier.classify(cred.NLCCLUSTER, query) results = classes['top_class'] except: results = {} return results
def nlc_0(text): #NLC読み込みのためのアカウント情報 natural_language_classifier = NaturalLanguageClassifierV1( #username='******', #password='******' #新たに、アカウント作成必要 ) #watson ID:ff1c2bx159-nlc-4926 print('----- Watson NLC からの応答待ち -----') res = natural_language_classifier.classify('cedd09x164-nlc-4477', text) ans = res["top_class"] return ans
class NLC(object): def __init__(self): self.classifier = NaturalLanguageClassifier( username='******', password='******') def classify(self, request): result = self.classifier.classify('e554c3x251-nlc-16708', request)['classes'][0] if result['confidence'] > .87: return result['class_name'] else: return None
def __init__(self): self.fname = "" self.modelSearchList = ModelSearchList() self.text_data = [] self.target_label = [] self.watson_crediantial = watson_key() #self.watson_classifier = self.watson_crediantial.classifier_twitter_classfier #self.watson_classifier = self.watson_crediantial.classifier_twitter_hash_classfier #self.watson_classifier = self.watson_crediantial.classifier_twitter_unblance_keyword_classfier #self.watson_classifier = self.watson_crediantial.twitter_priority_classfier self.watson_classifier = self.watson_crediantial.twitter_category_classfier self.natural_language_classifier = NaturalLanguageClassifier(username=self.watson_crediantial.username, password=self.watson_crediantial.password)
def Analyze(): api_key = "LiI3o53WHaOU02ATKIwKhSQdirvntK1lZUPA6rhdEwCZ" workspace_ID = "6deb62x509-nlc-477" natural_language_classifier = NaturalLanguageClassifierV1( iam_apikey=api_key) comment_text = request.form['text'] classes = {} result = "" if comment_text != "": classes = natural_language_classifier.classify(workspace_ID, comment_text) result = classes.result return jsonify(result)
def __init__(self): self.fname = "" self.modelSearchList = ModelSearchList() self.text_data = [] self.target_label = [] self.watson_crediantial = watson_key() #self.watson_classifier = self.watson_crediantial.classifier_twitter_classfier #self.watson_classifier = self.watson_crediantial.classifier_twitter_hash_classfier #self.watson_classifier = self.watson_crediantial.classifier_twitter_unblance_keyword_classfier #self.watson_classifier = self.watson_crediantial.twitter_priority_classfier self.watson_classifier = self.watson_crediantial.twitter_category_classfier self.natural_language_classifier = NaturalLanguageClassifier( username=self.watson_crediantial.username, password=self.watson_crediantial.password)
def getClassificationModelD2(email): tempString = preprocessGroup1(email.text_body) outputString = tempString[:1000] #print(outputString) natural_language_classifier = NaturalLanguageClassifierV1( username=WATSON_USERNAME, password=WATSON_PASSWORD) classification = natural_language_classifier.classify(CLASSIFIER_ID_MODELD2, outputString) top_classification = classification['top_class'] temp_classes = classification['classes'] for i in temp_classes: if i['class_name'] == top_classification: top_classification_confidence = i['confidence'] classification_output = [top_classification, top_classification_confidence] return classification_output
def classify(classifier_name, sentence): classifiers = classifier.list_classifiers_name_id() # API CALL natural_language_classifier = NaturalLanguageClassifierV1( username=nlc_usr, password=nlc_psw) classes = natural_language_classifier.classify(classifiers[classifier_name], sentence) myjson = json.dumps(classes) # Parsing jsonparser = json.loads(myjson); # parse the ans of the api answer_class = jsonparser["classes"][0]["class_name"] # classified class with more confidence # print("Actual Class: ",actual_class," ","Response Class: ",answer_class,"\n") return answer_class
def apiNLCTest(comment_text): api_key = "LiI3o53WHaOU02ATKIwKhSQdirvntK1lZUPA6rhdEwCZ" workspace_ID = "6deb62x509-nlc-477" natural_language_classifier = NaturalLanguageClassifierV1( iam_apikey=api_key) # classifier instance response = natural_language_classifier.classify(workspace_ID, comment_text) result = [] response_new = response.result if "classes" in response_new.keys(): for predicted_class in response_new["classes"]: result.append( [predicted_class['class_name'], predicted_class['confidence']]) return (result)
class NLC(object): def __init__(self, credential_file_path=None): self.__nlc = None self.__initialize(credential_file_path) def __initialize(self, credential_file_path): if not credential_file_path: credential_file_path = os.path.expanduser(DEFAULT_CREDENTIAL_PATH) with open(credential_file_path, 'r') as credential_file: credential = json.load(credential_file) self.__nlc = NaturalLanguageClassifier(url=credential['url'], username=credential['username'], password=credential['password']) def create(self, traning_data, name=None, language='en'): """ :param traning_data: A csv file or file path representing the traning data :param name: The optional descriptive name for the classifier :param language: The language og the input data :return: A instance object with the classifier_id of the newly created classifier, still in traning """ create_result = None if isinstance(traning_data, file) or isinstance(traning_data, IOBase): # traning_data is file discripter create_result = self.__nlc.create(traning_data, name=name, language=language) elif isinstance(traning_data, str): # traning_data is file path with open(traning_data, newline=None, mode='r', encoding='utf-8') as csv_file: if is_valid_recode_num(csv_file): create_result = self.__nlc.create(csv_file, name=name, language=language) return CreateResult(create_result) def classifiers(self): classifiers_raw = self.__nlc.list() classifiers_ = [Classifier(c) for c in classifiers_raw['classifiers']] return Classifiers(classifiers_) def status(self, classifier_id): return Status(self.__nlc.status(classifier_id)) def classify(self, classifier_id, text): return ClassifyResult(self.__nlc.classify(classifier_id, text)) def remove(self, classifier_id): """ param: classifier_id: Unique identifier for the classifier retrun: empty dict object raise: watson_developer_cloud.watson_developer_cloud_service.WatsonException: Not found """ return self.__nlc.remove(classifier_id) def remove_all(self): classifiers_ = self.classifiers() return [self.remove(c.classifier_id) for c in classifiers_]
def main(): parser = argparse.ArgumentParser( description="Util method for Natural Language Processor") parser.add_argument('--trainingData', help='path to data file of training data') args = parser.parse_args() with open(authFile, 'r') as service_file: service_data = json.loads(service_file.read()) # authorize and get a token credentials = service_data['credentials'] global nlc nlc = NaturalLanguageClassifierV1(username=credentials['username'], password=credentials['password']) sendTrainingData(args.trainingData)
def classify(classifier_name, sentence): classifiers = list_classifiers_name_id() # API CALL natural_language_classifier = NaturalLanguageClassifierV1( username=nlc_usr, password=nlc_psw) t = time.clock() classes = natural_language_classifier.classify(classifiers[classifier_name], sentence) t = time.clock() - t print('API call time : ' , str(t)) myjson = json.dumps(classes) # Parsing jsonparser = json.loads(myjson); # parse the ans of the api answer_class = jsonparser["classes"][0]["class_name"] # classified class with more confidence return answer_class
def watson( text ): #This function determines, which school subject is being talked about in the input string. """ Desc: An IBM Watson Natural Language Classifier instance, that determines, what school subjet is being talked about. Takes: str text : The input string that the function classifies. Returns: str top_class : The school subject with the highest confidence. IF the confidence is under the certain point, it returns only none None Notes: None Raises: None """ def getTopClassConfidence(top_class, response): """ Desc: Returns the confidence of the parent funtion's top_class str. Takes: str top_class : The class with the highes confidence; See the desc of the parent function. JSON response : The JSON object representing the response from the IBM NLC. Retunrs: float confidence : The confidence of the top class. Note: Only works together with the parent funcntion. Raises: None """ for i in response['classes']: if i['class_name'] == top_class: return i['confidence'] natural_language_classifier = NaturalLanguageClassifierV1( username=watson_nlc_username, password=watson_nlc_password) response = natural_language_classifier.classify(watson_nlc_id, text) top_class = response['top_class'] print(getTopClassConfidence(top_class, response)) if top_class == 'keskustelu': return None else: return top_class
def __init__(self, username, password, classifier): # Setup Watson SDK self.natural_language_classifier = NLC(username=username,password=password) # Classifier information self.classifier = {} self.classifier['name'] = classifier['name'] self.classifier['training_file'] = classifier['training_file'] c = self.natural_language_classifier.list_classifiers() if any(d['name'] == self.classifier['name'] for d in c['classifiers'] ): self.classifier['id'] = [ d['classifier_id'] for d in c['classifiers'] if d['name'] == self.classifier['name'] ][0] print 'Found classifier id %s ' % self.classifier['id'] self.classifier['status'] = self.natural_language_classifier.status(self.classifier['id'])['status'] else: print 'No classifier found, creating new from training set' self.classifier['id'] = self.create_classifier() print 'New classifier id: %s ' % self.classifier['id']
def classifier_status(url, username, password, classifier_ids): n = NaturalLanguageClassifier(url=url, username=username, password=password) for classifier_id in classifier_ids: status = n.status(classifier_id) print(" Instance name: %s with classifier id %s is %s; Description: %s" % (status["name"],status["classifier_id"],status["status"], status["status_description"]))
import json from os.path import join, dirname from watson_developer_cloud import NaturalLanguageClassifierV1 as NaturalLanguageClassifier natural_language_classifier = NaturalLanguageClassifier(username='******', password='******') print(json.dumps(natural_language_classifier.list(), indent=2)) # create a classifier # with open('../resources/weather_data_train.csv', 'rb') as training_data: # print(json.dumps(natural_language_classifier.create(training_data=training_data, name='weather2'), indent=2)) # replace 47C164-nlc-243 with your classifier id status = natural_language_classifier.status('47C164-nlc-243') print (json.dumps(status, indent=2)) classes = natural_language_classifier.classify('47C164-nlc-243', 'How hot will it be tomorrow?') print(json.dumps(classes, indent=2))
#! /usr/bin/python from watson_developer_cloud import NaturalLanguageClassifierV1 as NLC import json with open ('credential.json') as f_cred: cred = json.load(f_cred) with open ('classifier_info.json') as f_cls: cls_id = json.load(f_cls)['classifier_id'] nlc = NLC(username = cred['username'], password = cred['password']) classes = nlc.classify(cls_id, '今天天氣如何') print(json.dumps(classes, indent = 2))
print(str(err)) print(usage()) sys.exit(2) for opt, arg in opts: if opt == '-h': usage() sys.exit() elif opt in ("-c", "---classifier_id"): classifier_id = arg elif opt == '-d': DEBUG = True if not classifier_id: print('Required argument missing.') usage() sys.exit(2) try: # create classifiers with the training data natural_language_classifier = NaturalLanguageClassifier(url=nlcConstants.getUrl(), username=nlcConstants.getUsername(), password=nlcConstants.getPassword()) # Delete the classifier sys.stdout.write('Deleting the classifier %s ...\n' % classifier_id) res = natural_language_classifier.remove(classifier_id) sys.stdout.write('Response: \n%s\n' % json.dumps(res, indent=2)) except Exception as e: sys.stdout.write(str(e)) exit(1)
import json from watson_developer_cloud import NaturalLanguageClassifierV1 as NaturalLanguageClassifier natural_language_classifier = NaturalLanguageClassifier( username='******', password='******') classifiers = natural_language_classifier.list() print(json.dumps(classifiers, indent=2))
def classifier_list(url, username, password): connection = NaturalLanguageClassifier(url=url, username=username, password=password) return connection.list()["classifiers"]
def __init__(self, url, username, password, classifier_id, corpus): self.nlc = NaturalLanguageClassifier(url=url, username=username, password=password) self.classifier_id = classifier_id self.corpus = corpus
sys.exit() elif opt in ("-c", "---classifier_id"): classifier_id = arg elif opt == '-d': DEBUG = True if not classifier_id: print('Required argument missing.') usage() sys.exit(2) try: sys.stdout.write('Watson Natural Language Classifier app. (Ctrl-z for terminating.)\n') # create classifiers with the training data natural_language_classifier = NaturalLanguageClassifier(url=nlcConstants.getUrl(), username=nlcConstants.getUsername(), password=nlcConstants.getPassword()) # set input encoding - change the encoding name according your platform sys.stdin = codecs.getreader('shift_jis')(sys.stdin) # loop for classify texts sys.stdout.write('Your input> ') for line in iter(sys.stdin.readline, ''): line = line.rstrip() if line != "": # send input message - encoding will be done in the SDK res = natural_language_classifier.classify(classifier_id, line) if DEBUG: sys.stdout.write('Response: \n%s\n' % json.dumps(res, indent=2)) sys.stdout.write('Your input was %s\n' % res['text'])
def remove_classifiers(url, username, password, classifier_ids): n = NaturalLanguageClassifier(url=url, username=username, password=password) for classifier_id in classifier_ids: n.remove(classifier_id)
#! /usr/bin/python from watson_developer_cloud import NaturalLanguageClassifierV1 as NLC import json with open('credential.json') as f_cred: cred = json.load(f_cred) with open ('classifier_info.json') as f_cls: cls_id = json.load(f_cls)['classifier_id'] nlc = NLC(username = cred['username'], password = cred['password']) status = nlc.status(cls_id) with open ('classifier_info.json', 'w') as f_cls: json.dump(status, f_cls, indent = 2) print (json.dumps(status, indent=2))
import json from os.path import join, dirname from watson_developer_cloud import NaturalLanguageClassifierV1 as NaturalLanguageClassifier natural_language_classifier = NaturalLanguageClassifier(username='******', password='******') classifiers = natural_language_classifier.list() print(json.dumps(classifiers, indent=2)) # create a classifier # with open('../resources/weather_data_train.csv', 'rb') as training_data: # print(json.dumps(natural_language_classifier.create(training_data=training_data, name='weather2'), indent=2)) # replace 47C164-nlc-243 with your classifier id status = natural_language_classifier.status('47C164-nlc-243') print (json.dumps(status, indent=2)) classes = natural_language_classifier.classify('47C164-nlc-243', 'How hot will it be tomorrow?') print(json.dumps(classes, indent=2))
sys.exit(2) for opt, arg in opts: if opt == '-h': usage() sys.exit() elif opt in ("-t", "---trainingdata"): trainingdata_filepath = arg elif opt in ("-n", "---name"): name = arg elif opt in ("-l", "---language"): language = arg elif opt == '-d': DEBUG = True if not trainingdata_filepath or not name or not language: print('Required argument missing.') usage() sys.exit(2) try: # create classifiers with the training data natural_language_classifier = NaturalLanguageClassifier(url=nlcConstants.getUrl(), username=nlcConstants.getUsername(), password=nlcConstants.getPassword()) with open(trainingdata_filepath, 'rb') as training_data: res = natural_language_classifier.create(training_data, name, language) sys.stdout.write('Response: \n%s\n' % json.dumps(res, indent=2)) except Exception as e: sys.stdout.write(str(e)) exit(1)
#! /usr/bin/python from watson_developer_cloud import NaturalLanguageClassifierV1 as NLC import json with open('credential.json') as f_cred: cred = json.load(f_cred) nlc = NLC(username = cred['username'], password = cred['password']) with open('weather_data_train.csv') as f_train: clsfier = nlc.create( training_data = f_train, name = 'python classfier', language = 'en') with open ('classifier_info.json', 'w') as f_cls: json.dump(clsfier, f_cls, indent = 2) print(json.dumps(clsfier, indent = 2))