Beispiel #1
0
    def read(self):
        final = []
        count = 0
        paralleldots.set_api_key(ParallelDotKeys[count])
        with open('output.json', 'r') as file:
            filtered_sentences = json.loads(file.read())
        with open('transcribed.json', 'r') as file:
            all_sentences = json.loads(file.read())
        for sentence in all_sentences['sentences']:
            if (sentence[0].strip() in filtered_sentences['sentences']):
                classification = paralleldots.custom_classifier(
                    sentence[0], categories)
                while ('taxonomy' not in classification.keys()):
                    count += 1
                    if (count >= len(ParallelDotKeys)):
                        paralleldots.set_api_key(ParallelDotKeys[0])
                        count = 0
                    else:
                        paralleldots.set_api_key(ParallelDotKeys[count])
                    classification = paralleldots.custom_classifier(
                        sentence[0], categories)
                if classification['taxonomy'][0]['confidence_score'] >= 0.85:
                    temp = (sentence[0], sentence[1], sentence[2],
                            classification['taxonomy'][0]['tag'])
                    final.append(temp)

        return (final)
Beispiel #2
0
def addNewsFeed(request):
    data = {}
    data['status']='false'
    if request.method == 'POST':
        username = request.user.username
        description = request.POST.get('inputContent')
        type = request.POST.get('type')
        intended_for = request.POST.get('intended_for')
        image = request.POST.get('image')
        print('type: ' + str(type))
        newsfeed_object = Newsfeed()
        newsfeed_object.user_name = username
        newsfeed_object.news_feed_type = type
        newsfeed_object.description = description
        newsfeed_object.image = image
        newsfeed_object.intended_for = intended_for
        newsfeed_object.save()
        # to calculate score
        category = { "Sports":['Cricket', 'Football', 'Soccer', 'Swimming', 'Horse Riding', 'Table Tennis', 'Badminton'], 'Artificial Intelligence':['Machine Learning', 'Deep Learning', 'Mimic', 'Linear Regression', 'Logistic Regression'], 'Internet of Things': ['Automation', 'Alexa', 'Siri', 'Google Home'], 'Data Structure and Algorithms':['DFS', 'BFS', 'Array', 'Stacks', 'Queues', 'Recursion', 'Disjoint Set'], 'Competitive Programming':['Codechef', 'Hackerearth', 'Hackerrank', 'Purple'],
        'Management':['Event', 'Time'], 'Developer':['Software Engineering', 'Project', 'APIs', 'Web Development'], 'Blockchain':['Cryptocurrency', 'Bitcoins', 'Etherium'], 'Operting System':[], 'Art':[], 'Gaming':[], 'Virtual Reality':[], 'Microprocessors':[], 'Aviation':[], 'Mechanical Engineering':[], 'Electronics Engineering':[], 'Textile Engineering':[], 'Mining Engineering':[]}

        api_scores = paralleldots.custom_classifier(description, category);
        for api_score in api_scores['taxonomy']:
            tag = api_score['tag']
            score = api_score['confidence_score']
            score_table = NewsfeedScore()
            score_table.newsfeed = newsfeed_object
            score_table.category = tag
            score_table.score = score
            score_table.save()

        data['status']='true'
    # else:
    return JsonResponse(data)
Beispiel #3
0
def classify():
    list_of_complaints = get_tweets("railminindia")
    category = data_in_dict()
    paralleldots.set_api_key("qamrbbHa47QpyLFeMkwEQd7g5ae9RyelNaQZUyGwexg")
    classified_tweets = {}
    for text in list_of_complaints:
        response = paralleldots.custom_classifier(text, category)
        tag = response['taxonomy'][0]['tag']
        classified_tweets.update({tag: text})

    #print(classified_tweets)
    return classified_tweets


#classify()
Beispiel #4
0
def create_categorized_list():
    categorized_articles = list()
    for i in range(len(text)) :
        response=paralleldots.custom_classifier(text[i],category)
        categorized_articles.append({
            'article' : text[i],
            'scores' : response
            })
        print("Article no.", i, "completed...")

    print(categorized_articles)

    return categorized_articles

# print(create_categorized_list())
Beispiel #5
0
def addNewsFeeds(request):
    print('here')
    n1 = User_Table.objects.filter(
        user_name__username__icontains=request.user.username)
    data = {}
    if request.method == 'POST':
        print('inside POST')
        add_news_feed = addNewsFeedForm(data=request.POST)
        if add_news_feed.is_valid():
            # news_feed = add_news_feed.save(commit=False)
            # news_feed.user_name = request.user.username
            # news_feed.save()
            # newsfeed_object = Newsfeed()
            # newsfeed_object.user_name = request.user.username
            # newsfeed_object.news_feed_type request.POST.get('')
            # newsfeed_object.description
            # newsfeed_object.image
            # newsfeed_object.intended_for
            category = {
                "Sports": [
                    'Cricket', 'Football', 'Soccer', 'Swimming',
                    'Horse Riding', 'Table Tennis', 'Badminton'
                ],
                'Artificial Intelligence': [
                    'Machine Learning', 'Deep Learning', 'Mimic',
                    'Linear Regression', 'Logistic Regression'
                ],
                'Internet of Things':
                ['Automation', 'Alexa', 'Siri', 'Google Home'],
                'Data Structure and Algorithms': [
                    'DFS', 'BFS', 'Array', 'Stacks', 'Queues', 'Recursion',
                    'Disjoint Set'
                ],
                'Competitive Programming':
                ['Codechef', 'Hackerearth', 'Hackerrank', 'Purple'],
                'Management': ['Event', 'Time'],
                'Developer':
                ['Software Engineering', 'Project', 'APIs', 'Web Development'],
                'Blockchain': ['Cryptocurrency', 'Bitcoins', 'Etherium'],
                'Operting System': [],
                'Art': [],
                'Gaming': [],
                'Virtual Reality': [],
                'Microprocessors': [],
                'Aviation': [],
                'Mechanical Engineering': [],
                'Electronics Engineering': [],
                'Textile Engineering': [],
                'Mining Engineering': []
            }

            api_scores = paralleldots.custom_classifier(
                request.POST.get('description'), category)
            for api_score in api_scores['taxonomy']:
                tag = api_score['tag']
                score = api_score['confidence_score']
                score_table = NewsfeedScore()
                score_table.newsfeed = news_feed
                score_table.category = tag
                score_table.score = score
                score_table.save()

            return index(request)
        else:
            print(news_feed.errors)
    else:
        add_news_feed_form = addNewsFeedForm()

    print('here after')
    return render(request, 'basicapp/addnewsfeed.html', {
        'add_news_feed_form': add_news_feed_form,
        'user_record': n1
    })
Beispiel #6
0
def extract_info():
    twitter_client = TwitterClient()
    tweet_analyzer = TweetAnalyzer()
    api = twitter_client.get_twitter_client_api()
    tknzr = TweetTokenizer(strip_handles=True, reduce_len=True)
    tweets = api.user_timeline(count=10, lang='en')
    df = tweet_analyzer.tweets_to_data_frame(tweets)
    for i in range(1):
        text = df['text'][
            i]  # "need food for 6 people at mesra contact no. 9932356934 email = [email protected]"
        temp = ''
        print(paralleldots.abuse(text))
        response1 = paralleldots.abuse(text)
        print(response1)

        # Find intent of the user input
        response2 = paralleldots.intent(text)
        print(response2)

        if response1['sentence_type'] == 'Abusive' or response2[
                'probabilities']['spam/junk'] > 0.5:
            flag = False
            ans = ['0, 0', '0', '0']
            return ans

        else:
            flag = True
            # print(flag)

        if flag:
            flag1 = False
            allsyns1 = set(ss for word in flood_related_words
                           for ss in wordnet.synsets(word))
            allsyns2 = set(ss for word in tknzr.tokenize(text)
                           for ss in wordnet.synsets(word))
            best = max((wordnet.wup_similarity(s1, s2) or 0, s1, s2)
                       for s1, s2 in product(allsyns1, allsyns2))
            print(best)
            if best[0] > 0.6:
                flag1 = True
            if flag1:
                response = paralleldots.ner(text)
                print(response)
                for j in range(len(response['entities'])):
                    if (response['entities'][j]['category'] == 'place' and
                            response['entities'][j]['confidence_score'] > 0.6):
                        print(response['entities'][j]['name'])
                        # get_location(response['entities'][i]['name'])

                category = {
                    "need": ['need', 'require', 'want', 'lack'],
                    "offer": [
                        'send', 'have', 'give', 'donate', 'transfer',
                        'distribute', 'aid', 'help', 'procure'
                    ]
                }
                response = paralleldots.custom_classifier(text, category)
                print(response)
                if response['taxonomy'][0]['confidence_score'] > response[
                        'taxonomy'][1]['confidence_score']:
                    temp = "need"
                else:
                    temp = "offer"
            num = get_contact(text, tweets[0])
            if temp == "need":
                category = {
                    "food": [],
                    "water": [],
                    "shelter": [],
                    "first-aid": [],
                    "help": []
                }
                response = paralleldots.custom_classifier(text, category)
                print(response)
                x = 0
                for j in range(5):

                    if response['taxonomy'][i]['confidence_score'] > x:
                        cat = response['taxonomy'][i]['tag']

            else:
                category = {
                    "food": [],
                    "water": [],
                    "shelter": [],
                    "first-aid": []
                }
                response = paralleldots.custom_classifier(text, category)
                print(response)
                x = 0
                for j in range(4):
                    if response['taxonomy'][i]['confidence_score'] > x:
                        cat = response['taxonomy'][i]['tag']

            quantity = re.findall(quant_no, text)
            qnt = []
            for j in quantity:
                if len(j) < 10:
                    qnt.append(j)

            print(qnt)
            s = tweets[0]
            loc1 = False
            if s.coordinates is None:
                sn = s.user.screen_name
                m = "@%s Hello! please share your location while tweeting" % (
                    sn)
                s = api.update_status(m, s.id)
            else:
                loc1 = True

            ans = []

            if loc1:
                ans.append(
                    str(tweets[0].coordinates['coordinates'][1]) + ', ' +
                    str(tweets[0].coordinates['coordinates'][0]))

            else:
                ans.append('0, 0')

            ans.append(num)
            print(len(qnt))
            if len(qnt) > 0:
                ans.append(qnt[0])
            else:
                ans.append('0')
            print(ans)

            return ans
Beispiel #7
0
def test():
    set_api_key("Put your Api key here")
    category = {
        "finance": ["markets", "economy", "shares"],
        "world politics": ["diplomacy", "UN", "war"],
        "india": ["congress", "india", "bjp"]
    }
    print(
        similarity("Sachin is the greatest batsman",
                   "Tendulkar is the finest cricketer"))
    print(sentiment("Come on, lets play together"))
    print(ner("Narendra Modi is the prime minister of India", "en"))
    print(
        taxonomy(
            "Michael Jordan of the Chicago Bulls is getting a 10-hour Netflix documentary in 2019"
        ))
    print(
        keywords(
            "Prime Minister Narendra Modi tweeted a link to the speech Human Resource Development Minister Smriti Irani made in the Lok Sabha during the debate on the ongoing JNU row and the suicide of Dalit scholar Rohith Vemula at the Hyderabad Central University."
        ))
    print(
        phrase_extractor(
            "Prime Minister Narendra Modi tweeted a link to the speech Human Resource Development Minister Smriti Irani made in the Lok Sabha during the debate on the ongoing JNU row and the suicide of Dalit scholar Rohith Vemula at the Hyderabad Central University."
        ))
    print(
        emotion(
            "Did you hear the latest Porcupine Tree song ? It's rocking !"))
    print(
        intent(
            "Finance ministry calls banks to discuss new facility to drain cash"
        ))
    print(abuse("you f**king a$$hole"))
    print(
        custom_classifier("Narendra Modi is the prime minister of India",
                          category))
    print(
        batch_intent([
            "drugs are fun", "don\'t do drugs, stay in school",
            "lol you a f*g son", "I have a throat infection"
        ]))
    print(batch_abuse(["drugs are fun", "dont do drugs, stay in school"]))
    print(
        batch_sentiment([
            "drugs are fun", "don\'t do drugs, stay in school",
            "lol you a f*g son", "I have a throat infection"
        ]))
    print(
        batch_phrase_extractor([
            "drugs are fun", "don\'t do drugs, stay in school",
            "lol you a f*g son", "I have a throat infection"
        ]))
    print(
        batch_taxonomy([
            "Michael Jordan of the Chicago Bulls is getting a 10-hour Netflix documentary in 2019",
            "Michael Jordan of the Chicago Bulls is getting a 10-hour Netflix documentary in 2019"
        ]))
    print(
        batch_ner([
            "Michael Jordan of the Chicago Bulls is getting a 10-hour Netflix documentary in 2019",
            "Michael Jordan of the Chicago Bulls is getting a 10-hour Netflix documentary in 2019"
        ]))
    print(
        batch_emotion([
            "drugs are fun", "don\'t do drugs, stay in school",
            "lol you a f*g son", "I have a throat infection"
        ]))
    print(facial_emotion_url("https://i.imgur.com/klb812s.jpg"))
    print(object_recognizer_url("https://i.imgur.com/klb812s.jpg"))
    print(
        sarcasm(
            "The movie that i watched last night is so funny that i get rolled out with laughter"
        ))
    print(
        batch_sarcasm([
            "The movie that i watched last night is so funny that i get rolled out with laughter",
            "I want to spend my life alone"
        ]))