Beispiel #1
0
def test_mixed(resource, capsys):
    main(resource('mixed.txt'))
    out, err = capsys.readouterr()
    score = float(re.search('score of (.+?) with', out).group(1))
    assert score <= 0.3
    assert score >= -0.3
Beispiel #2
0
def test_neutral(resource, capsys):
    main(resource('neutral.txt'))
    out, err = capsys.readouterr()
    magnitude = float(re.search('magnitude of (.+?)', out).group(1))
    assert magnitude <= 2.0
Beispiel #3
0
def my_form_post():
    global path_for_rating
    global objects_2D_arr
    global tags_2D_arr
    global categories_2D_arr
    global adult_2D_arr
    global senti_2D_arr
    global average_rating
    try:
        user_ip=request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
        print("IP of incoming connection is",user_ip)
        print(request.args)
        if(len(request.args)!=1):
            website=(request.args['website'])
            text=(request.args['fname'])
            captcha_response=request.args['g-recaptcha-response']
            print(text+" this is text"+website)
            web_header="http://www.twitter.com"
            if(website=="Facebook"):
                web_header=""
            elif(website=="Reddit"):
                web_header=""
            elif(website=="Instagram"):
                web_header=""
            #Profile_URL=Profile_URL_Generator(web_header,text)
            Profile_URL=text
            #print("URL genreated=",Profile_URL)
            if((captcha_response!="" and captcha_response!=None)):# and URL_CHECKER(Profile_URL)):
                #webbrowser.open(Profile_URL)
                average_rating=0
                objects_2D_arr = []
                tags_2D_arr = []
                categories_2D_arr = []
                adult_2D_arr = []
                senti_2D_arr = []
                if(website=="Facebook"):
                    username = extract_username(Profile_URL)
                    with open('input.txt',"w") as f:
                        f.write(Profile_URL)
                        f.close()
                    temp_path="C:\\Users\\Vaibhav\\PycharmProjects\\SMA_1\\Data\\"+username
                    if(not os.path.isdir(temp_path)):
                        Facebook_scraper.main()
                    temp_path="C:\\Users\\Vaibhav\\PycharmProjects\\SMA_1\\Data\\"+username+"\\Uploaded Photos"
                    if(os.path.isdir(temp_path)):
                        results = Msft_Vision_onlocalImage.main('C:\\Users\\Vaibhav\\PycharmProjects\\SMA_1\\Data\\' + username + '\\Uploaded Photos')
                    else:
                        results = Msft_Vision_onlocalImage.main('C:\\Users\\Vaibhav\\PycharmProjects\\SMA_1\\Data\\' + username + '\\Tagged Photos')
                    #results=Msft_Vision_onlocalImage.main('C:\\Users\\Vaibhav\\PycharmProjects\\SMA_1\\Data\\'+username+'\\Uploaded Photos')
                #two_D_arr=json.dumps([['Task', 'Hours per Day'],['Work', 8],['Eat', 2],['TV', 4],['Gym', 2],['Sleep', 8]])
                    objects=results[0]
                    tags=results[1]
                    categories=results[2]
                    adults=results[3]
                    objects.insert(0,['Task', 'Hours per Day'])
                    tags.insert(0,['Task', 'Hours per Day'])
                    categories.insert(0, ['Task', 'Hours per Day'])
                    adults.insert(0, ['Task', 'Hours per Day'])
                    objects_2D_arr=json.dumps(objects)
                    tags_2D_arr = json.dumps(tags)
                    categories_2D_arr = json.dumps(categories)
                    adult_2D_arr = json.dumps(adults)
                    string_list=get_all_strings("C:\\Users\\Vaibhav\\PycharmProjects\\SMA_1\\Data\\"+username,"Facebook")
                    path_for_rating="C:\\Users\\Vaibhav\\PycharmProjects\\SMA_1\\Data\\"+username
                    if(os.path.isfile(path_for_rating+"\\Rating.csv")):
                        file = pd.read_csv(path_for_rating+"\\Rating.csv")
                        temp_rating=0
                        size=0
                        for index, row in file.iterrows():
                            temp_rating+=row['Rating']
                            size+=1
                        average_rating=temp_rating/size
                    documents = create_array_of_dictionaries(string_list, "Reddit")
                    results2 = sentiment_analysis.main(documents)
                    senti_arr = []
                    print(results2)
                    print(len(results2["documents"]))
                    print((string_list))
                    length=len(results2["documents"])
                    cd=datetime.date.today()
                    for i in range(0, len(results2["documents"])):
                        try:
                            # print(string_list[i][0])
                            #templi = string_list[i][0].split(" ")
                            templi=cd-datetime.timedelta(i)
                            print(templi)
                            # senti_arr.append([i+1, results2["documents"][i]["score"]])
                            senti_arr.append([templi.strftime("%d-%b-%Y"), results2["documents"][i]["score"]])
                        except Exception as e:
                            print(e)
                    # print(senti_arr)
                    senti_arr.insert(0, ['Sentiment Score', 'Sentiment Score'])
                    print(senti_arr)
                    senti_2D_arr = json.dumps(senti_arr)
                    #return render_template('pie_chart.html',input=objects_2D_arr)
                    return render_template('simplpc-4.html',object=objects_2D_arr,tag=tags_2D_arr,category=categories_2D_arr,adult=adult_2D_arr,senti=senti_2D_arr,Rating=average_rating)
                if(website=="Reddit"):
                    username = re.search(r'https://www.reddit.com/user/([^/?]+)', Profile_URL).group(1)
                    if(Profile_URL[len(Profile_URL)-1]=='/'):
                        Profile_URL=Profile_URL[:-1]
                    #reddit_scrape.main(Profile_URL)
                    if (not os.path.isdir('C:\\Users\\Vaibhav\\PycharmProjects\\SMA_1\\reddit\\' + username)):
                        reddit_scrape.main(Profile_URL,username)
                    results=Msft_Vision_onlocalImage.main('C:\\Users\\Vaibhav\\PycharmProjects\\SMA_1\\reddit\\'+username)
                    path_for_rating='C:\\Users\\Vaibhav\\PycharmProjects\\SMA_1\\reddit\\'+username
                    if (os.path.isfile(path_for_rating + "\\Rating.csv")):
                        file = pd.read_csv(path_for_rating + "\\Rating.csv")
                        temp_rating = 0
                        size = 0
                        for index, row in file.iterrows():
                            temp_rating += row['Rating']
                            size += 1
                        average_rating = temp_rating / size
                    objects = results[0]
                    tags = results[1]
                    categories = results[2]
                    adults = results[3]
                    objects.insert(0, ['Task', 'Hours per Day'])
                    tags.insert(0, ['Task', 'Hours per Day'])
                    categories.insert(0, ['Task', 'Hours per Day'])
                    adults.insert(0, ['Task', 'Hours per Day'])
                    objects_2D_arr = json.dumps(objects)
                    tags_2D_arr = json.dumps(tags)
                    categories_2D_arr = json.dumps(categories)
                    adult_2D_arr = json.dumps(adults)
                    string_list=get_all_strings("C:\\Users\\Vaibhav\\PycharmProjects\\SMA_1\\reddit\\"+username+"\\"+"Reddit_response.csv","Reddit")
                    print(string_list)
                    documents=create_array_of_dictionaries(string_list,"Reddit")
                    results2=sentiment_analysis.main(documents)
                    senti_arr = []
                    print(results2)
                    print(len(results2["documents"]))
                    print((string_list))
                    for i in range(0, len(results2["documents"])):
                        try:
                            # print(string_list[i][0])
                            templi = string_list[i][0].split(" ")
                            # senti_arr.append([i+1, results2["documents"][i]["score"]])
                            senti_arr.append([templi[0], results2["documents"][i]["score"]])
                        except Exception as e:
                            print(e)
                    # print(senti_arr)
                    senti_arr.insert(0, ['Sentiment Score', 'Sentiment Score'])
                    print(senti_arr)
                    senti_2D_arr = json.dumps(senti_arr)
                    # return render_template('pie_chart.html',input=objects_2D_arr)
                    return render_template('simplpc-4.html', object=objects_2D_arr, tag=tags_2D_arr, category=categories_2D_arr,
                                           adult=adult_2D_arr,senti=senti_2D_arr,Rating=average_rating)
                if(website=="Twitter"):
                    username=re.search(r'https://twitter.com/([^/?]+)', Profile_URL).group(1)

                    if(not os.path.isdir('C:\\Users\\Vaibhav\\PycharmProjects\\SMA_1\\twitter\\'+username)):
                        Twitter3.get_all_tweets(username)

                    string_list=get_all_strings("C:\\Users\\Vaibhav\\PycharmProjects\\SMA_1\\twitter\\"+username+"\\"+username+"_tweets.csv","Twitter")
                    print(string_list)
                    documents=create_array_of_dictionaries(string_list,"Twitter")
                    results2=sentiment_analysis.main(documents)
                    results=Msft_Vision_onlocalImage.main('C:\\Users\\Vaibhav\\PycharmProjects\\SMA_1\\twitter\\'+username)
                    path_for_rating='C:\\Users\\Vaibhav\\PycharmProjects\\SMA_1\\twitter\\'+username
                    if (os.path.isfile(path_for_rating + "\\Rating.csv")):
                        file = pd.read_csv(path_for_rating + "\\Rating.csv")
                        temp_rating = 0
                        size = 0
                        for index, row in file.iterrows():
                            temp_rating += row['Rating']
                            size += 1
                        average_rating = temp_rating / size
                    objects = results[0]
                    tags = results[1]
                    categories = results[2]
                    adults = results[3]
                    objects.insert(0, ['Task', 'Hours per Day'])
                    tags.insert(0, ['Task', 'Hours per Day'])
                    categories.insert(0, ['Task', 'Hours per Day'])
                    adults.insert(0, ['Task', 'Hours per Day'])

                    senti_arr=[]
                    print(results2)
                    print(len(results2["documents"]))
                    print((string_list))

                    for i in range(0,len(results2["documents"])):

                        try:
                            #print(string_list[i][0])
                            templi=string_list[i][0].split(" ")
                            #senti_arr.append([i+1, results2["documents"][i]["score"]])
                            senti_arr.append([templi[0],results2["documents"][i]["score"]])
                        except Exception as e:
                            print(e)
                    #print(senti_arr)
                    senti_arr.insert(0, ['Sentiment Score', 'Sentiment Score'])
                    print(senti_arr)
                    senti_2D_arr=json.dumps(senti_arr)
                    objects_2D_arr = json.dumps(objects)
                    tags_2D_arr = json.dumps(tags)
                    categories_2D_arr = json.dumps(categories)
                    adult_2D_arr = json.dumps(adults)
                    print(results2)
                    return render_template('simplpc-4.html', object=objects_2D_arr, tag=tags_2D_arr, category=categories_2D_arr,
                                           adult=adult_2D_arr,senti=senti_2D_arr,Rating=average_rating)
            else:
                return render_template('Invalid_Profile.html')
        else:
            print(path_for_rating)
            path_for_rating2=path_for_rating+"\\Rating.csv"
            user_rating=1
            for key in request.args:
                user_rating=key
            exist=False
            if(os.path.isfile(path_for_rating2)):
                print("Path found")
                file = pd.read_csv(path_for_rating2)
                for index,row in file.iterrows():
                    ip=row['IP']
                    print(ip)
                    if(user_ip==ip):
                        exist=True
                        break
            print("existence is",exist)
            if(not exist):
                if (not os.path.isfile(path_for_rating2)):
                    with open(path_for_rating2, mode='a', encoding='utf-8', newline='') as f:
                        count = 0
                        fieldnames = ['IP', 'Rating']
                        writer = csv.DictWriter(f, fieldnames=fieldnames)
                        writer.writeheader()
                        writer.writerow({'IP':user_ip,"Rating":user_rating})
                else:
                    with open(path_for_rating2, mode='a', encoding='utf-8', newline='') as f:
                        count = 0
                        fieldnames = ['IP', 'Rating']
                        writer = csv.DictWriter(f, fieldnames=fieldnames)
                        writer.writerow({'IP': user_ip, "Rating": user_rating})
            if (os.path.isfile(path_for_rating + "\\Rating.csv")):
                file = pd.read_csv(path_for_rating + "\\Rating.csv")
                temp_rating = 0
                size = 0
                for index, row in file.iterrows():
                    temp_rating += row['Rating']
                    size += 1
                average_rating = temp_rating / size
            return render_template('simplpc-4.html', object=objects_2D_arr, tag=tags_2D_arr, category=categories_2D_arr,
                                   adult=adult_2D_arr, senti=senti_2D_arr,Rating=average_rating)
    except Exception as e:
        print(e)
        return render_template('Invalid_Profile.html')
Beispiel #4
0
def test_neg(resource, capsys):
    main(resource('neg.txt'))
    out, err = capsys.readouterr()
    score = float(re.search('score of (.+?) with', out).group(1))
    magnitude = float(re.search('magnitude of (.+?)', out).group(1))
    assert score * magnitude < 0
def test_mixed(resource, capsys):
    main(resource('mixed.txt'))
    out, err = capsys.readouterr()
    score = float(re.search('score of (.+?) with', out).group(1))
    assert score <= 0.3
    assert score >= -0.3
def test_neutral(resource, capsys):
    main(resource('neutral.txt'))
    out, err = capsys.readouterr()
    magnitude = float(re.search('magnitude of (.+?)', out).group(1))
    assert magnitude <= 2.0
def test_neg(resource, capsys):
    main(resource('neg.txt'))
    out, err = capsys.readouterr()
    score = float(re.search('score of (.+?) with', out).group(1))
    magnitude = float(re.search('magnitude of (.+?)', out).group(1))
    assert score * magnitude < 0
def test_mixed(resource, capsys):
    main(resource('mixed.txt'))
    out, err = capsys.readouterr()
    polarity = float(re.search('polarity of (.+?) with', out).group(1))
    assert polarity <= 0.3
    assert polarity >= -0.3
def test_pos(resource, capsys):
    main(resource('pos.txt'))
    out, err = capsys.readouterr()
    polarity = float(re.search('polarity of (.+?) with', out).group(1))
    magnitude = float(re.search('magnitude of (.+?)', out).group(1))
    assert polarity * magnitude > 0
Beispiel #10
0
Prend en paramètre un chemin vers une vidéo, analyse les sentiments de l'audio,
et détermine si la personne sur l'image est en train de parler ou non
'''
if __name__ == '__main__':
    #vérification des arguments
    if len(sys.argv) < 2 or not os.path.isfile(sys.argv[1]):
        raise Exception('usage: python video_analysis.py chemin_fichier_video')

    #conversion du fichier audio
    audio_file = speech_recognition.convert_to_wav(sys.argv[1])

    #transcription de l'audio
    timestamp, text = speech_recognition.speech_to_text(audio_file)

    #analyse des sentiments
    sentiments = sentiment_analysis.main(text)

    #ouverture de la vidéo
    cap = cv2.VideoCapture(sys.argv[1])

    #récupération du nombre d'images par seconde
    fps = cap.get(cv2.CAP_PROP_FPS)

    #récupération de la hauteur de la vidéo
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

    #listes pour récupérer les images et la reconnaissance des visages
    frames = []
    faces = []

    #lecture de la vidéo
def sentiment_analysis():
    GSA.main("searchTweetOutput.txt")