Exemplo n.º 1
0
def get_interest(request):
    P_URI = 'https://api.twitter.com/1.1/friends/ids.json'
    if request.GET:

        username = request.GET.get('username')

        payload = {'screen_name': username, 'count': MEGA_COUNT}
        auth = OAuth1(API_ID, API_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
        r = requests.get(P_URI, params=payload, auth=auth)

        abc = r.json()

        users = abc['ids']

        url = "https://api.twitter.com/1.1/users/show.json"

        interests = {}
        children = {}

        for ids in users:
            payload = {'user_id': ids}
            r = requests.get(url, params=payload, auth=auth)
            xyz = r.json()
            name = xyz['screen_name']
            topic = uclassify(xyz['description'])
            # print "Name:", str(name), " Interest:", str(topic)

            if interests.has_key(topic):
                interests[topic] = interests[topic] + "," + name
            else:
                interests[topic] = name

    return HttpResponse(json.dumps(interests))
Exemplo n.º 2
0
def topics_hear_about(request):
    NEW_URI = "https://api.twitter.com/1.1/statuses/home_timeline.json"
    if request.GET:
        username = request.GET.get('username')

        payload = {'screen_name' : username, 'count' : MEGA_COUNT}

        auth = OAuth1(KEY2[0], KEY2[1],
                  KEY2[2], KEY2[3])

        r = requests.get(NEW_URI,params=payload, auth=auth)

        abc = r.json()

        freq = collections.defaultdict(lambda: 0)

        for i in abc:
            re.sub(r'[^\x00-\x7F]+',' ', i['text'])
            freq[uclassify(i['text'])] += 1

        freq=sorted(freq.items(), key=operator.itemgetter(1), reverse=True)

        return HttpResponse(json.dumps(freq))

    return HttpResponse("Oops")
Exemplo n.º 3
0
def classify(keywords):

    # file = open('/home/pavan/Desktop/py/english/greetings.yml')
    # lines = file.readlines()
    # lineList = []
    # for line in lines:
    # 	temp = line.rstrip("\n")
    # 	lineList.append(temp.lstrip("- "))

    # fileLegal = open("/home/pavan/Desktop/py/law_words","r")
    # linesLegal = fileLegal.readlines()
    # lineLegalList = []
    # for line in linesLegal:
    # 	lineLegalList.append(line.rstrip("\n"))

    keyword_classifier = uclassify()
    keyword_classifier.setWriteApiKey("DDmXKJDNomia")
    keyword_classifier.setReadApiKey("xFA156CyIuFl")

    # keyword_classifier.create("GeneralorLegal2")
    # keyword_classifier.addClass(["General","Legal"],"GeneralorLegal2")

    # keyword_classifier.train(lineList,"General","GeneralorLegal2")
    # keyword_classifier.train(lineLegalList,"Legal","GeneralorLegal2")
    legal_or_general = (keyword_classifier.classify([keywords],
                                                    "GeneralorLegal2"))[0][2]
    if legal_or_general[0][1] >= legal_or_general[1][1]:
        return "genaral"
    else:
        return "legal"
Exemplo n.º 4
0
def get_interest(request):
    P_URI = 'https://api.twitter.com/1.1/friends/ids.json'
    if request.GET:

        username = request.GET.get('username')

        payload = {'screen_name' : username,'count': MEGA_COUNT}
        auth = OAuth1(KEY5[0], KEY5[1],
                  KEY5[2], KEY5[3])
        r = requests.get(P_URI,params=payload, auth=auth)

        abc = r.json()

        users= abc['ids']

        url= "https://api.twitter.com/1.1/users/show.json"

        interests={}
        children={}

        for ids in users:
            payload={'user_id':ids}
            r = requests.get(url,params=payload,auth=auth)
            xyz=r.json()
            name=xyz['screen_name']
            topic=uclassify(xyz['description'])
            # print "Name:", str(name), " Interest:", str(topic)

            if interests.has_key(topic):
                interests[topic]= interests[topic] + "," + name
            else:
                interests[topic]=name

    return HttpResponse(json.dumps(interests))
Exemplo n.º 5
0
def get_graph(request):

    URILL = 'https://api.twitter.com/1.1/friends/ids.json'

    if request.GET:

        username = request.GET.get('username')
        payload = {'screen_name': username, 'count': MEGA_COUNT}
        auth = OAuth1(API_ID, API_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)

        r = requests.get(URILL, params=payload, auth=auth)

        abc = r.json()

        users = abc['ids']

        url = "https://api.twitter.com/1.1/users/show.json"

        interests = {}
        children = {}
        data = {}

        for ids in users:
            payload = {'user_id': ids}
            r = requests.get(url, params=payload, auth=auth)

            xyz = r.json()

            name = xyz['screen_name']
            topic = uclassify(xyz['description'])
            # print "Name:", str(name), " Interest:", str(topic)

            if interests.has_key(topic):
                interests[topic] = interests[topic] + "@" + name
            else:
                interests[topic] = name

        for temp in interests.keys():
            data[temp] = username

        # print interests
        # print data
        for key in interests.keys():
            temp = interests[key]
            # print temp
            temp1 = temp.split('@')
            # print temp1
            for temp2 in temp1:
                data[temp2] = key

        # print json.dumps(interests)
        # print '\n\n'
        # print data
        # data=sorted(data.items(), key=operator.itemgetter(1))
        return HttpResponse(json.dumps(data))
Exemplo n.º 6
0
def get_graph(request):

    URILL = 'https://api.twitter.com/1.1/friends/ids.json'

    if request.GET:

        username = request.GET.get('username')
        payload = {'screen_name' : username,'count': 20}
        auth = OAuth1(KEY6[0], KEY6[1], KEY6[2], KEY6[3])

        r = requests.get(URILL,params=payload, auth=auth)

        abc = r.json()

        users= abc['ids']

        url= "https://api.twitter.com/1.1/users/show.json"

        interests={}
        children={}
        data={}

        for ids in users:
            payload={'user_id':ids}
            r = requests.get(url,params=payload,auth=auth)

            xyz=r.json()

            name=xyz['screen_name']
            topic=uclassify(xyz['description'])
            # print "Name:", str(name), " Interest:", str(topic)

            if interests.has_key(topic):
                interests[topic]= interests[topic] + "@" + name
            else:
                interests[topic]=name

        for temp in interests.keys():
            data[temp]=username

        # print interests
        # print data
        for key in interests.keys():
            temp=interests[key]
            # print temp
            temp1=temp.split('@')
            # print temp1
            for temp2 in temp1:
                 data[temp2]=key

        # print json.dumps(interests)
        # print '\n\n'
        # print data
        # data=sorted(data.items(), key=operator.itemgetter(1))
        return HttpResponse(json.dumps(data))
Exemplo n.º 7
0
def DoClassify():
    """
    Remember to start a local server first, as this is an entirely offline approach
    Trains and Evaluates using uClassify
    """
    with open("rawSentiment", 'rb') as f:
        sentiment = pickle.load(f)
        del sentiment[0]
    with open("rawReviews", 'rb') as f:
        review = pickle.load(f)
        del review[0]
    review = np.array([x.encode('ascii', 'ignore') for x in review])
    sentiment = np.array(sentiment)
    classifier_name = "Lego_Review"
    a = uclassify()
    max = 50
    it = 0
    positive = []
    negative = []
    neutral = []
    a.removeClassifier("fold")
#    a.removeClassifier("fold%d"%0)
#    a.removeClassifier("fold%d"%1)
#    a.removeClassifier("fold%d"%2)
#    k_fold = cross_validation.KFold(len(review), 3)
#    for k, (train, test) in enumerate(k_fold):
#        t=0
#        f=0
#        print k
    a.create("fold")
    a.addClass(["Negative", "Neutral", "Positive"],"fold")
    for r,s in zip(review, sentiment):
        try:
            if s == "1":
                positive.append(r)
            elif s == "-1":
                negative.append(r)
            elif s == "0":
                neutral.append(r)
            if len(positive) > 50:
                a.train(positive, "Positive", "fold")
                positive = []
            if len(negative) > 50:
                a.train(negative, "Negative", "fold")
                negative = []
            if len(neutral) > 50:
                a.train(neutral, "Neutral", "fold")
                neutral = []
        except UnicodeEncodeError, ex:
            print "Cannot train; ", ex
Exemplo n.º 8
0
	def form_valid(self,form):
		feedback=form.save(commit=False)
		feedback.Student=User.objects.get(username=self.request.user)
		a = uclassify()
		a.setWriteApiKey('-----GET YOUR OWN WRITE KEY-----')
		a.setReadApiKey('-----GET YOUR OWN READ KEY-----')
		#a.create("GorB") #Creates Classifier named "ManorWoman"
		#a.addClass(["g","b"],"gorb") #Adds two class named "man" and "woman" to the classifier "ManorWoman"
		a.train(["Boring Bad","Late Slow","Too Fast","Dull BC MC BKL MKL Ganda Wost Worse Sleep Din't  Not Can't"],"b","GorB")
		a.train(["Nice Well Good Happy Great Amazing Wonderful Awsome MindBlowing Crazy "],"g","GorB")        
		d = a.classify([feedback.comments],"GorB")
		
		feedback.ifactor=int(float(ls1+0.1)*10)
        
		feedback.save()
		return redirect('/accounts/profile')
Exemplo n.º 9
0
def topics(request):
    #topics i like to talk about
    if request.GET:
        username = request.GET.get('username')
        payload = {'screen_name': username, 'count': MEGA_COUNT}
        auth = OAuth1(API_ID, API_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)

        r = requests.get(TIMELINE_URI, params=payload, auth=auth)

        abc = r.json()

        freq = collections.defaultdict(lambda: 0)

        for i in abc:
            re.sub(r'[^\x00-\x7F]+', ' ', i['text'])
            freq[uclassify(i['text'])] += 1

        freq = sorted(freq.items(), key=operator.itemgetter(1), reverse=True)

        return HttpResponse(json.dumps(freq))

    return HttpResponse("Oops")
Exemplo n.º 10
0
def topics(request):
    #topics i like to talk about
    if request.GET:
        username = request.GET.get('username')
        payload = {'screen_name' : username, 'count' : MEGA_COUNT}
        auth = OAuth1(KEY3[0], KEY3[1],
                  KEY3[2], KEY3[3])

        r = requests.get(TIMELINE_URI,params=payload, auth=auth)

        abc = r.json()

        freq = collections.defaultdict(lambda: 0)

        for i in abc:
            re.sub(r'[^\x00-\x7F]+',' ', i['text'])
            freq[uclassify(i['text'])] += 1

        freq=sorted(freq.items(), key=operator.itemgetter(1), reverse=True)

        return HttpResponse(json.dumps(freq))

    return HttpResponse("Oops")
Exemplo n.º 11
0
def topics_hear_about(request):
    NEW_URI = "https://api.twitter.com/1.1/statuses/home_timeline.json"
    if request.GET:
        username = request.GET.get('username')

        payload = {'screen_name': username, 'count': MEGA_COUNT}

        auth = OAuth1(API_ID, API_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)

        r = requests.get(NEW_URI, params=payload, auth=auth)

        abc = r.json()

        freq = collections.defaultdict(lambda: 0)

        for i in abc:
            re.sub(r'[^\x00-\x7F]+', ' ', i['text'])
            freq[uclassify(i['text'])] += 1

        freq = sorted(freq.items(), key=operator.itemgetter(1), reverse=True)

        return HttpResponse(json.dumps(freq))

    return HttpResponse("Oops")
Exemplo n.º 12
0
 def __init__(self, senftimentV1_data = None):
     self.sentimentV1_data = senftimentV1_data
     self.classifier_name = id_generator(10)
     self.classifier = uclassify()
     self.classifier.setWriteApiKey("6jYmrGb25nVC")
     self.classifier.setReadApiKey("lNin5wW4Mod5")
Exemplo n.º 13
0
def DoClassify():
    """
    Remember to start a local server first, as this is an entirely offline approach
    Trains and Evaluates using uClassify
    """
    with open("rawSentiment", 'rb') as f:
        sentiment = pickle.load(f)
        del sentiment[0]
    with open("rawReviews", 'rb') as f:
        review = pickle.load(f)
        del review[0]
    review = np.array([x.encode('ascii', 'ignore') for x in review])
    sentiment = np.array(sentiment)
    classifier_name = "Lego_Review"
    a = uclassify()
    max = 50
    it = 0
    positive = []
    negative = []
    neutral = []
    a.removeClassifier("fold%d"%0)
    a.removeClassifier("fold%d"%1)
    a.removeClassifier("fold%d"%2)
    k_fold = cross_validation.KFold(len(review), 3)
    for k, (train, test) in enumerate(k_fold):
        t=0
        f=0
        print k
        a.create("fold%d"%k)
        a.addClass(["Negative", "Neutral", "Positive"],"fold%d"%k)
        for r,s in zip(review[train], sentiment[train]):
            try:
                if s == "1":
                    positive.append(r)
                elif s == "-1":
                    negative.append(r)
                elif s == "0":
                   # print r
                    neutral.append(r)
                if len(positive) > 50:
                    a.train(positive, "Positive", "fold%d"%k)
                    positive = []
                if len(negative) > 50:
                    a.train(negative, "Negative", "fold%d"%k)
                    negative = []
                if len(neutral) > 50:
                    a.train(neutral, "Neutral", "fold%d"%k)
                    neutral = []
            except UnicodeEncodeError, ex:
                print "Cannot train; ", ex
        # Any leftover should not be excluded
        if len(positive) > 0:
            a.train(positive, "Positive", "fold%d"%k)
            positive = []
        if len(negative) > 0:
            a.train(negative, "Negative", "fold%d"%k)
            negative = []
        if len(neutral) > 0:
            a.train(neutral, "Neutral", "fold%d"%k)
            neutral = []
        it = 0
        print "testing"
        while it+50 < len(test):
            d = a.classify(review[test[it:it+50]],"fold%d"%k)
            for dd, ss in zip(d, sentiment[test[it:it+50]]):
                # will take the calculated sentiment, and compare to the ground truth
                if Sentiment(np.array([float(x[1]) for x in dd[2]]).argmax()).value-1 == int(ss):
                    # if equal, increment true
                    t+=1
                else:
                    # else increment false
                    f+=1
            it += 50
        d = a.classify(review[test[it:len(test)]],"fold%d"%k)
        print t, f
Exemplo n.º 14
0
def uclassify_api(tweets_text, classifier):
    # query the api
    uapi = uclassify()
    uapi.setReadApiKey(UCLASSIFY_API_READ_KEY)
    return uapi.classify([tweets_text],classifier, username="******")
Exemplo n.º 15
0
className = "LegoSentiment"

def analyse(data,i):
    locDat = []
    for s in data[i:i+blocksize]:
        locDat.append(s.encode("utf-8"))
    result = a.classifyKeywords(locDat,"Sentiment","uClassify")
    return  result

def train(data,a):
    a.create(className)
    a.addClass([-1,0,1],className)
    




if __name__ == "__main__":

    a = uclassify()
    a.setWriteApiKey(WRITE_API_KEY)
    a.setReadApiKey(READ_API_KEY)
    data = readCustomDat("../Data/truth2.tsv")
    current = 3*blocksize
    result = analyse(data[:, 1], current)
    save(result, "../Data/sentRes.txt")




Exemplo n.º 16
0
#!usr/env/python

import requests
import facebook
import json
from uclassify import uclassify
from pyechonest import config, song
import random
import urllib, urllib2
import time

a = uclassify("own key here")
'''get your own '''
a.setReadApiKey("own key here")
a.setWriteApiKey("own key here")

#need to update the token every so often for now, about every hour
oauth_access_token = "own key here"
graph = facebook.GraphAPI(oauth_access_token)
newf = graph.get_connections("TuftsConfessions", "feed")

#schools to troll
schools = [
    #MIT
    "CornellEdufess",  #Cornell
    "521835501190847",  #CMU
    "NYUSecrets"  #NYU
]

# array of message dictionaries
messg = []