Ejemplo n.º 1
0
def test():
    set_api_key("write your api key here")
    similarity("Sachin is the greatest batsman",
               "Tendulkar is the finest cricketer")
    sentiment("Come on, lets play together")
    ner("Narendra Modi is the prime minister of India")
    keywords(
        "Prime Minister Narendra Modi tweeted a link to the speech Human Resource Development Minister Smriti Irani made in the Lok Sabha during the debate on the ongoing JNU row and the suicide of Dalit scholar Rohith Vemula at the Hyderabad Central University."
    )
    emotion("Did you hear the latest Porcupine Tree song ? It's rocking !")
    intent(
        "Finance ministry calls banks to discuss new facility to drain cash")
    abuse("you f**king a$$hole")
    batch_intent([
        "drugs are fun", "don\'t do drugs, stay in school",
        "lol you a f*g son", "I have a throat infection"
    ])
    batch_abuse([
        "drugs are fun", "don\'t do drugs, stay in school",
        "lol you a f*g son", "I have a throat infection"
    ])
    batch_ner([
        "drugs are fun", "don\'t do drugs, stay in school",
        "lol you a f*g son", "I have a throat infection"
    ])
    batch_sentiment([
        "drugs are fun", "don\'t do drugs, stay in school",
        "lol you a f*g son", "I have a throat infection"
    ])
    batch_phrase_extractor([
        "drugs are fun", "don\'t do drugs, stay in school",
        "lol you a f*g son", "I have a throat infection"
    ])
Ejemplo n.º 2
0
def find_similar_reports(who, location, what, other_reports):
    reports = []
    for report in other_reports:
        if who == report[0]:
            print("SIMILARITY: {}".format(
                similarity(what, report[2])['normalized_score']))
            if location == report[1] or similarity(
                    what, report[2])['normalized_score'] >= 4.55:
                reports.append(report)
    return reports
Ejemplo n.º 3
0
def test():
    similarity("Sachin is the greatest batsman",
               "Tendulkar is the finest cricketer")
    sentiment("Come on, lets play together")
    taxonomy("Narendra Modi is the prime minister of India")
    ner("Narendra Modi is the prime minister of India")
    keywords(
        "Prime Minister Narendra Modi tweeted a link to the speech Human Resource Development Minister Smriti Irani made in the Lok Sabha during the debate on the ongoing JNU row and the suicide of Dalit scholar Rohith Vemula at the Hyderabad Central University."
    )
    emotion("Did you hear the latest Porcupine Tree song ? It's rocking !")
    intent(
        "Finance ministry calls banks to discuss new facility to drain cash")
    abuse("you f**king a$$hole")
Ejemplo n.º 4
0
def nlp(req):  # NLP work
    datetime = feedbacks.objects.latest('id').DateTime
    category = req.POST.get("category")
    text = req.POST.get("text")[10:]
    text = text[:-2]
    bw = req.POST.get("bw")
    fid = feedbacks.objects.latest('id').id
    counter = 0
    feedback = analyzedFeedbacks.objects.all()
    for feedbac in feedback:
        if(classify(text) > 70):  # If its greater than 70 it means it's garbadge text
            return
        # Finds similar feedbacks
        if(feedbac.category.lower() == category.lower() and feedbac.bw.lower() == bw.lower()):
            paralleldots.set_api_key(
                "pCQlFdWiBwhGO8RERIGpwHDeAHQmWUjP3i9LLOrK0oc")  # Paralleldots API Key
            result = paralleldots.similarity(
                feedbac.text.lower(), text.lower())
            #print(result['similarity_score'])
            #If similarity score is greater than 0.5 It means they are same. You can change it
            if(result['similarity_score'] >= 0.65):
                counter = counter+1
                postToRelated(fid, feedbac.fid)  # Post Related in related table
                return
# If we are here it means feedback is neither garbadge nor it's similar so we add it in analyzedfeedback table
    m = analyzedFeedbacks(
        DateTime=datetime, category=category, text=text, bw=bw, fid=fid, related=counter)
    m.save()
Ejemplo n.º 5
0
def nlp_function(data):
    paralleldots.set_api_key("pwYgvFI30sVIFqTDdbmLM68vbjYwnZ1shoCe8GXGQwk")
    text1 = data
    text2 = "this is rajeev"
    response = paralleldots.similarity(text1, text2)
    print(response)
    return response
Ejemplo n.º 6
0
def compare_text(text_a, text_b):
    print("{} {}".format("text_a: ", text_a))
    print("{} {}".format("text_b: ", text_b))
    paralleldots.set_api_key('VTpYXtJtNEOrPA2uqvLknLpAANMrbgEYOzyDxE7DmYg')
    score = paralleldots.similarity(text_a, text_b)
    print(score)
    similarity_score = score['similarity_score']
    print(similarity_score)
    return similarity_score
Ejemplo n.º 7
0
def similarity(target, compare):
    api_key = "djTeOg4gRQRnwl25dgswQoj1joPGmlila2puvacHu9w"
    paralleldots.set_api_key(api_key)
    sim = paralleldots.similarity(target, compare)
    json_sim = json.dumps(sim)
    #print(json_sim)
    d = literal_eval(json_sim)
    #print(d)
    data = d["actual_score"]
    #print(data)
    return data
Ejemplo n.º 8
0
    def findCharter(self):
        text1 = self.getTextBody(self.dic)

        pq = []
        samecategoryname = []
        samecategorypath = []
        maxScore = 0
        bestCharters = []
        for filename in os.listdir(self.charterDir):
            if filename[0] == '~':
                continue
            if filename.endswith(".docx"):
                filepath = self.charterDir + filename
                dic = self.getDic(filepath)
                text2 = self.getTextBody(dic)
                print(text2)
                scoreDic = similarity(text1, text2)
                print(scoreDic)
                score = scoreDic["actual_score"]
                print(type(score))
                print(score, " ", filename)
                currname = ""
                keyCategoriesDict = self.getDic(filepath)

                for key, value in keyCategoriesDict.items():
                    if str(key) == "Project Title":
                        print(str(value))
                        currname = str(value)
                    if str(key) == "Project Type":
                        if str(
                                value
                        ) == self.type and currname != self.title and filepath != self.filePath:
                            samecategorypath.append(filename)
                            samecategoryname.append(currname)
                        break

                if currname != self.title and filepath != self.filePath:
                    heapq.heappush(pq, (1 - score, filename))

                print("--------")
                if score > maxScore and currname != self.title:
                    maxScore = score
                    bestCharter = filename
        for i in range(3):
            f2 = heapq.heappop(pq)
            bestCharters.append(f2[1])
        return bestCharters, samecategorypath
Ejemplo n.º 9
0
def predict(claim, source='All'):
    lite_client = retinasdk.LiteClient("2bc45a70-3a85-11e8-9172-3ff24e827f76")

    def get_news_titles(claim, keywords):
        kw = keywords
        import itertools
        claim_words = claim.split()
        for i in range(len(keywords)):
            keywords[i] = keywords[i] + " "
        keys_flat = list(itertools.chain(*keywords))
        keywords = ''.join(keys_flat)
        new_claim = ""
        for word in claim_words:
            if word in keywords:
                new_claim = new_claim + " +" + word
            else:
                new_claim = new_claim + " " + word
        news_titles = []
        news_api = search_news_api(new_claim)
        if news_api is not None:
            news_titles.append(news_api)
        return news_titles

    keywords = lite_client.getKeywords(claim)
    news = get_news_titles(claim.lower(), keywords)
    if (type(news[0])) != 'str':
        new = []
        for y in news:
            for x in y:
                new.append(x)
        news = new
    count_agree = 0
    count_disagree = 0
    for title in news:

        test_sim = paralleldots.similarity(claim, title)
        score = test_sim["actual_score"]
        if score > 0.5:
            count_agree += 1
        elif score <= 0.5:
            count_disagree += 1
    if len(news) <= 0:
        return -1
    else:
        probability = (count_agree / (count_agree + count_disagree)) * 100
        return probability
Ejemplo n.º 10
0
import paralleldots

paralleldots.get_api_key()

test_sim = paralleldots.similarity("computer dead", "computer dead")
score = test_sim["actual_score"]
print(score)  # Test
Ejemplo n.º 11
0
def compareTranscripts(text1, text2):
    API_KEY = os.getenv('TRANSCRIPT_API')
    paralleldots.set_api_key(API_KEY)
    response=paralleldots.similarity(text1,text2)
    return response['similarity_score']
def similar(text1, text2):
    pdot.set_api_key("91cjpk2HkVWLoO0NUfM93AJ66DFM7SHLK7kmbymt4LE")

    response = pdot.similarity(text1, text2)

    return response['actual_score'] >= SIMILARITY_CUTOFF
Ejemplo n.º 13
0
def similarityscore(title1, title2):
    return similarity(title1, title2)['normalized_score']
Ejemplo n.º 14
0
from paralleldots import set_api_key, get_api_key, similarity, ner, taxonomy, sentiment, keywords, intent, emotion, multilang, abuse, sentiment_social
#DO NOT randomly test, limited to 100 calls/day, for testing go to: https://www.paralleldots.com/semantic-analysis
# more API examples here: https://github.com/ParallelDots/ParallelDots-Python-API

set_api_key("rjIdkelw0TpgqoMXvVm3GU6ZSmrlIQCawicY5mGyB0I")

test = similarity("Sachin is the greatest batsman",
                  "Tendulkar is the finest cricketer")
print(test)
Ejemplo n.º 15
0
 def get_similarity(text1, text2):
     return paralleldots.similarity(text1, text2)
Ejemplo n.º 16
0
    def findCharter(self):
        text1 = self.getTextBody(self.dic)
        
        pq = []
        samecategoryname = []
        samecategorypath = []
        maxScore = 0
        bestCharter = ""
        for filename in os.listdir(self.charterDir):
            if filename[0] == '~':
                continue
            if filename.endswith(".docx"):
                filepath = self.charterDir + filename
                dic = self.getDic(filepath)
                text2 = self.getTextBody(dic)
                print(text2)
                scoreDic = similarity(text1, text2)
                print(scoreDic)
                score = scoreDic["actual_score"]
                print(type(score))
                print(score, " ", filename)
                currname = ""
                keyCategoriesDict = self.getDic(filepath)
                
                for key,value in keyCategoriesDict.items():
                    if str(key)=="Project Title":
                        print(str(value))
                        currname=str(value)
                    if str(key)=="Project Type":
                        if str(value)==self.type and currname!=self.title and filepath!=self.filePath:
                            samecategorypath.append(filepath)
                            samecategoryname.append(currname)
                        break
            
                if currname!=self.title and filepath!=self.filePath:
                    heapq.heappush(pq,(1-score, currname))
                
                print("--------")
                if score > maxScore and currname!=self.title:
                    maxScore = score
                    bestCharter = filename

        
        print("Current Project: ",self.title)
        print("--------")
        print("Here is the list of projects with the same project type:")
        for i in range (0,len(samecategoryname)):
            print("--------")
            print("Project Name: ",samecategoryname[i])
            print(samecategorypath[i])
        print("--------")
        
        
        amount = 3
        if len(pq)<amount:
            amount = len(pq)
        print("Here is the list of similar projects:")
        for i in range (0,amount):
            best = heapq.heappop(pq)
            print("--------")
            print("Project Name: ", best[1])
            print("Percentage of Similarity: ", 1-best[0])
        print("--------")

        return bestCharter
Ejemplo n.º 17
0
def test():
    set_api_key("Put your Api key here")
    category = {
        "finance": ["markets", "economy", "shares"],
        "world politics": ["diplomacy", "UN", "war"],
        "india": ["congress", "india", "bjp"]
    }
    print(
        similarity("Sachin is the greatest batsman",
                   "Tendulkar is the finest cricketer"))
    print(sentiment("Come on, lets play together"))
    print(ner("Narendra Modi is the prime minister of India", "en"))
    print(
        taxonomy(
            "Michael Jordan of the Chicago Bulls is getting a 10-hour Netflix documentary in 2019"
        ))
    print(
        keywords(
            "Prime Minister Narendra Modi tweeted a link to the speech Human Resource Development Minister Smriti Irani made in the Lok Sabha during the debate on the ongoing JNU row and the suicide of Dalit scholar Rohith Vemula at the Hyderabad Central University."
        ))
    print(
        phrase_extractor(
            "Prime Minister Narendra Modi tweeted a link to the speech Human Resource Development Minister Smriti Irani made in the Lok Sabha during the debate on the ongoing JNU row and the suicide of Dalit scholar Rohith Vemula at the Hyderabad Central University."
        ))
    print(
        emotion(
            "Did you hear the latest Porcupine Tree song ? It's rocking !"))
    print(
        intent(
            "Finance ministry calls banks to discuss new facility to drain cash"
        ))
    print(abuse("you f**king a$$hole"))
    print(
        custom_classifier("Narendra Modi is the prime minister of India",
                          category))
    print(
        batch_intent([
            "drugs are fun", "don\'t do drugs, stay in school",
            "lol you a f*g son", "I have a throat infection"
        ]))
    print(batch_abuse(["drugs are fun", "dont do drugs, stay in school"]))
    print(
        batch_sentiment([
            "drugs are fun", "don\'t do drugs, stay in school",
            "lol you a f*g son", "I have a throat infection"
        ]))
    print(
        batch_phrase_extractor([
            "drugs are fun", "don\'t do drugs, stay in school",
            "lol you a f*g son", "I have a throat infection"
        ]))
    print(
        batch_taxonomy([
            "Michael Jordan of the Chicago Bulls is getting a 10-hour Netflix documentary in 2019",
            "Michael Jordan of the Chicago Bulls is getting a 10-hour Netflix documentary in 2019"
        ]))
    print(
        batch_ner([
            "Michael Jordan of the Chicago Bulls is getting a 10-hour Netflix documentary in 2019",
            "Michael Jordan of the Chicago Bulls is getting a 10-hour Netflix documentary in 2019"
        ]))
    print(
        batch_emotion([
            "drugs are fun", "don\'t do drugs, stay in school",
            "lol you a f*g son", "I have a throat infection"
        ]))
    print(facial_emotion_url("https://i.imgur.com/klb812s.jpg"))
    print(object_recognizer_url("https://i.imgur.com/klb812s.jpg"))
    print(
        sarcasm(
            "The movie that i watched last night is so funny that i get rolled out with laughter"
        ))
    print(
        batch_sarcasm([
            "The movie that i watched last night is so funny that i get rolled out with laughter",
            "I want to spend my life alone"
        ]))