Example #1
0
def home():
    #The tweet to classify
    tweet = str(request.args['text']).lower()
    #The path to file containing the model
    model = str(request.args['model'])
    #Should the tweet be preprocessed
    preprocess = str(request.args['preprocess']).lower()
    #Lazily load the model
    if model not in models:
        print "Model not in memory: ", model
        print "Loading model"
        models[model] = cPickle.load(open(model, "rb"))
        if (load_word_vecs):
            print "Adding wordvecs"
            models[model].add_global_word_vecs(wordvecs)
        print "Done"

    if preprocess == "True":
        tweet = clean_str(tweet)

    [y_pred, prob_pred] = models[model].classify([{'text': tweet}])
    labels = models[model].labels

    label_to_prob = {}
    for i in range(len(labels)):
        label_to_prob[labels[i]] = prob_pred[0][i]
    return json.dumps(label_to_prob)
Example #2
0
def home():
    #The tweet to classify
    try:
        tweet=request.args['text'].lower()
    except Exception as e:
        print "Error processing request. Improper format of request.args['text'] might be causing an issue. Returning empty array"
        print "request.args['text'] = ",request.args['text']
        return json.dumps({})
    #The path to file containing the model
    model=str(request.args['model'])
    #Should the tweet be preprocessed
    preprocess=str(request.args['preprocess']).lower()
    #Lazily load the model
    if model not in models:
        print "Model not in memory: ",model
        print "Loading model"
        models[model]=pickle.load(open(model,"rb"))
        if(load_word_vecs):
            print "Adding wordvecs"
            models[model].add_global_word_vecs(wordvecs)
        print "Done"

    if preprocess == "True":
        tweet = clean_str(tweet)

    [y_pred,prob_pred] = models[model].classify([{'text':tweet}])
    labels = models[model].labels

    label_to_prob={}
    for i in range(len(labels)):
        if(isinstance(prob_pred[0][i], numpy.float32) or isinstance(prob_pred[0][i], numpy.float64)):
            label_to_prob[labels[i]]=prob_pred[0][i].item()
        else:
            label_to_prob[labels[i]] = prob_pred[0][i]
    return json.dumps(label_to_prob)
Example #3
0
def predictList(tweetList):
    probList = []
    if model not in models:
        print "Model not in memory: ", model
        print "Loading model"
        models[model]=pickle.load(open(model,"rb"))
        if(load_word_vecs):
            print "Adding wordvecs"
            models[model].add_global_word_vecs(wordvecs)
        print "Done"

    if preprocess == True:
        for tweet in tweetList:
            tweet = clean_str(tweet.lower())
    for tweet in tweetList:
        [y_pred,prob_pred] = models[model].classify([{'text':tweet}])
        labels = models[model].labels
        label_to_prob={}
        for i in range(len(labels)):
            if(isinstance(prob_pred[0][i], numpy.float32) or isinstance(prob_pred[0][i], numpy.float64)):
                label_to_prob[labels[i]]=prob_pred[0][i].item()
            else:
                label_to_prob[labels[i]] = prob_pred[0][i]
        probList.append(label_to_prob)

    return probList
def predictList(tweetList):
    probList = []
    if model not in models:
        print "Model not in memory: ", model
        print "Loading model"
        models[model] = pickle.load(open(model, "rb"))
        if (load_word_vecs):
            print "Adding wordvecs"
            models[model].add_global_word_vecs(wordvecs)
        print "Done"

    if preprocess == True:
        for tweet in tweetList:
            tweet = clean_str(tweet.lower())
    for tweet in tweetList:
        [y_pred, prob_pred] = models[model].classify([{'text': tweet}])
        labels = models[model].labels
        label_to_prob = {}
        for i in range(len(labels)):
            if (isinstance(prob_pred[0][i], numpy.float32)
                    or isinstance(prob_pred[0][i], numpy.float64)):
                label_to_prob[labels[i]] = prob_pred[0][i].item()
            else:
                label_to_prob[labels[i]] = prob_pred[0][i]
        probList.append(label_to_prob)

    return probList
Example #5
0
def home():
    #The tweet to classify
    tweet=str(request.args['text']).lower()
    #The path to file containing the model
    model=str(request.args['model'])
    #Should the tweet be preprocessed
    preprocess=str(request.args['preprocess']).lower()
    #Lazily load the model
    if model not in models:
        print "Model not in memory: ",model
        print "Loading model"
        models[model]=cPickle.load(open(model,"rb"))
        if(load_word_vecs):
            print "Adding wordvecs"
            models[model].add_global_word_vecs(wordvecs)
        print "Done"

    if preprocess == "True":
        tweet = clean_str(tweet)

    [y_pred,prob_pred] = models[model].classify([{'text':tweet}])
    labels = models[model].labels

    label_to_prob={}
    for i in range(len(labels)):
        label_to_prob[labels[i]]=prob_pred[0][i]
    return json.dumps(label_to_prob)
Example #6
0
def agent_hit_home () :
	
	try :
		title = request.args.get('title').lower()
        	content = request.args.get('content').lower()
        	aid=request.args.get('aid').lower()
		converted_text = convert_text ( title, content)
		converted_text = clean_str(converted_text)
	        app.logger.info('text received')
                app.logger.info(title)
                app.logger.info(content)
                app.logger.info(aid)
	
		lc_thread = agentThread(1, '1', lc_model, title, content, aid, converted_text)
		no_thread = agentThread(2, '2', no_model, title, content, aid, converted_text)
		ac_thread = agentThread(3, '3770', ac_model, title, content, aid, converted_text)
		pa_thread = agentThread(4, '29', pa_model, title, content, aid, converted_text)
		
                app.logger.info('threads creates')
                lc_thread.start()
		no_thread.start()
		ac_thread.start()
		pa_thread.start()
                app.logger.info('threads started')
		
		threads = []
		threads.append(lc_thread)
		threads.append(no_thread)
		threads.append(ac_thread)
		threads.append(pa_thread)

		for t in threads :
			t.join()
		
		agents = {}
		
		agents['aid'] = aid

		for t in threads :
			if t.is_agent == '1':
				agents[t.name] = 'true'
			else :
				agents[t.name] = 'false'
			agents[t.name + '_probs'] = t.label_to_prob
             
                temp_str = str(agents)   
                app.logger.info ( aid + '\t' + temp_str)
		return json.dumps(agents)

    	except Exception as e:
                logging.exception("read error")
        	app.logger.error( "Error processing request. Improper format of request.args['text'] might be causing an issue. Returning empty array")
       		return json.dumps({})
Example #7
0
def predictTweet(string):
    tweet = string.lower()
    if model not in models:
        print "Model not in memory: ", model
        print "Loading model"
        models[model] = pickle.load(open(model, "rb"))
        if load_word_vecs:
            print "Adding wordvecs"
            models[model].add_global_word_vecs(wordvecs)
        print "Done"

    if preprocess == True:
        tweet = clean_str(tweet)

    [y_pred, prob_pred] = models[model].classify([{"text": tweet}])
    labels = models[model].labels
    label_to_prob = {}
    for i in range(len(labels)):
        if isinstance(prob_pred[0][i], numpy.float32) or isinstance(prob_pred[0][i], numpy.float64):
            label_to_prob[labels[i]] = prob_pred[0][i].item()
        else:
            label_to_prob[labels[i]] = prob_pred[0][i]
    return label_to_prob
Example #8
0
def home():
    #The tweet to classify
    try:
        tweet = request.args['text'].lower()
    except Exception as e:
        print "Error processing request. Improper format of request.args['text'] might be causing an issue. Returning empty array"
        print "request.args['text'] = ", request.args['text']
        return json.dumps({})
    #The path to file containing the model
    model = str(request.args['model'])
    #Should the tweet be preprocessed
    preprocess = str(request.args['preprocess']).lower()
    #Lazily load the model
    if model not in models:
        print "Model not in memory: ", model
        print "Loading model"
        models[model] = pickle.load(open(model, "rb"))
        if (load_word_vecs):
            print "Adding wordvecs"
            models[model].add_global_word_vecs(wordvecs)
        print "Done"

    if preprocess == "True":
        tweet = clean_str(tweet)

    [y_pred, prob_pred] = models[model].classify([{'text': tweet}])
    labels = models[model].labels

    label_to_prob = {}
    for i in range(len(labels)):
        if (isinstance(prob_pred[0][i], numpy.float32)
                or isinstance(prob_pred[0][i], numpy.float64)):
            label_to_prob[labels[i]] = prob_pred[0][i].item()
        else:
            label_to_prob[labels[i]] = prob_pred[0][i]
    return json.dumps(label_to_prob)