예제 #1
0
def get_levenshtein_sim(str1, str2):
    levenshtein = Levenshtein()
    if str1 == 'nan' or str2 == 'nan' or str1 == '' or str2 == '':
        return -1.0
    else:
        max_length = max(len(str1), len(str2))
        return 1.0 - levenshtein.distance(str1, str2) / max_length
예제 #2
0
파일: fnord.py 프로젝트: cephurs/Fnord
def is_similar(value, strings):
    """
    Checks is a string is similar to one in a set of strings
    :param value:
    :param strings:
    :return:
    """
    levenshtein = Levenshtein()
    for s in strings:
        if levenshtein.distance(value, s) < (len(value) / 2):
            return True
    return False
    def string_similarity(self, word1, word2):
        
        levenshtein = Levenshtein()
        lcs = LongestCommonSubsequence()

        ed = levenshtein.distance(word1, word2)
        if(ed == 0):
            sigma = (lcs.length(word1, word2)) / (
                    min(len(word1),len(word2)) * 1)
        else:
            sigma = (lcs.length(word1, word2)) / (
                    min(len(word1),len(word2)) * ed)
        return sigma
예제 #4
0
def getCorrectWordUsingBigramIndex(word):
    bigram_index = indexer.bigramIndex()
    possible_words = {}
    levenshtein = Levenshtein()
    bigram = indexer.getBigramForWord(word)
    for b in bigram:
        if b in bigram_index.keys():
            for term in bigram_index[b]:
                possible_words[term] = 0
    for p_word in possible_words:
        possible_words[p_word] = levenshtein.distance(word, p_word)
    possible_words = OrderedDict(
        sorted(possible_words.items(), key=lambda kv: kv[1]))
    return list(possible_words.keys())[0]
def levention():
    l = []
    df = jac()
    q1 = dt.getQues1(df)
    q2 = dt.getQues2(df)
    levenshtein = Levenshtein()
    for i in range(len(q1)):
        w1 = q1[i]
        w2 = q2[i]
        z = levenshtein.distance(w1, w2)
        l.append(z)
    df.insert(15, 'levenshtein', " ")
    x = dt.setleven(df, l)
    return df
예제 #6
0
def ent_incorp(plots, stories):
    # calculate entity incorporation in storyline and story
    # return average score of all lines
    entity_numbers = re.compile("(?<=\sent\s)\d+")
    ent_rate = []
    for plot, story in zip(plots, stories):
        plot_entities = list(entity_numbers.findall(plot))
        story_entities = list(entity_numbers.findall(story))
        levenshtein = Levenshtein()
        incorp_ent = levenshtein.distance(plot_entities, story_entities) # compute edit distance between the ent storyline and story
        try:
            ent_rate_each = incorp_ent/max(len(plot_entities), len(story_entities)) # normalise the distance
        except ZeroDivisionError:
            ent_rate_each = 0
        ent_rate.append(ent_rate_each)

        #print("Entity incorporation rate %:")
        #print("Mean: {:.2f} Min: {:.2f} Max: {:.2f} StDev {:.2f}".format(min(ent_rate), max(ent_rate),
        #                                                                 mean(ent_rate), std(ent_rate)))
    return mean(ent_rate), min(ent_rate), max(ent_rate), std(ent_rate)
예제 #7
0
def respond(strg):
    levenshtein = Levenshtein()
    stemmer = StemmerFactory().create_stemmer()
    stopwords = StopWordRemoverFactory().create_stop_word_remover()

    kategori = model.predict([strg])

    txt = stopwords.remove(strg)
    txt = stemmer.stem(txt)

    best = 1000
    res = []

    for words in dataset:
        if (words['category'] == kategori):
            distance = levenshtein.distance(txt, words['message_stemmed'])

            if (distance < best):
                best = distance
                res = words
    return res['respond']
class original_table:
    levenshtein = Levenshtein()
    pvc = PageviewsClient("Looking for songs")
    client_credentials_manager = SpotifyClientCredentials(
        client_id='274d5abed01c455099ac8ad14c6a68e8',
        client_secret='7425a61db8ed45c48d1ccfaa39842e00')

    def __init__(self, decade):
        self.sp = spotipy.Spotify(
            client_credentials_manager=self.client_credentials_manager)
        self.decade = decade
        table_name = input("Please insert the original chart table name")
        self.original_decade_df = pd.read_csv("DFs/" + table_name + ".csv",
                                              encoding="utf-8")
        spoti = input("Add Spotify Features?")
        if spoti == 'Y' or spoti == 'y' or spoti == 'yes':
            self.add_spotify_features()
        wiki = input("Add Wikipedia Features?")
        if wiki == 'Y' or wiki == 'y' or wiki == 'yes':
            self.operate_wikipedia()
        #yearly = input("Find in yearly charts?")
        #if yearly == 'Y' or yearly == 'y' or yearly == 'yes':
        #    self.find_in_yearly_chart()
        p = Path('C:/Users/tomha/PycharmProjects/GlglzPredictor/DFs/')
        new_table_name = input(
            "Please insert the new original chart table name")
        export_csv = self.original_decade_df.to_csv(Path(
            p, new_table_name + '.csv'),
                                                    index=None,
                                                    header=True)
        print("Table saved successfully!")

    def add_spotify_features(self):
        spotify_popularity = []
        spotify_valence = []
        spotify_tempo = []
        spotify_instrumentalness = []
        spotify_year = []
        for row in self.original_decade_df.iterrows():
            try:
                result = self.sp.search(q=row[1]['name'],
                                        type='track')['tracks']['items'][0]
                spotify_valence.append(
                    self.sp.audio_features(result['id'])[0]['valence'])
                spotify_tempo.append(
                    self.sp.audio_features(result['id'])[0]['tempo'])
                spotify_instrumentalness.append(
                    self.sp.audio_features(
                        result['id'])[0]['instrumentalness'])
                spotify_popularity.append(result['popularity'])
                spotify_year.append(
                    result['album']['release_date'].split("-")[0])
            except:
                spotify_valence.append('None')
                spotify_tempo.append('None')
                spotify_instrumentalness.append('None')
                spotify_popularity.append('None')
                spotify_year.append('None')
        self.original_decade_df['spotify_popularity'] = spotify_popularity
        self.original_decade_df['spotify_valence'] = spotify_valence
        self.original_decade_df['spotify_tempo'] = spotify_tempo
        self.original_decade_df[
            'spotify_instrumentalness'] = spotify_instrumentalness
        self.original_decade_df['spotify_year'] = spotify_year

    def scrape_info_from_wiki(self, page):
        song = {}
        try:
            page_html = wikipedia.WikipediaPage(page).html()
            prettified = BeautifulSoup(page_html, 'html.parser')
            info_table = prettified.findAll("table", {"class": "infobox"})
            song["result"] = page
            song["year"] = 0
            song["genres"] = []
            song["views"] = 0
            for row in info_table[0].find_all("tr"):
                row_year = row.find(text='Released')
                if row_year:
                    song["year"] = get_year(row)
                row_genres = row.find("td", {"class": "category"})
                if row_genres:
                    for genre in row_genres.find_all("a"):
                        if genre.has_attr("title"):
                            song["genres"].append(genre["title"])
            try:
                pop_dict = self.pvc.article_views('en.wikipedia', [page],
                                                  granularity='monthly',
                                                  start='20190101',
                                                  end='20190731')
                for value in pop_dict.items():
                    for i in value[1]:
                        if value[1][i] != None:
                            song["views"] = song["views"] + value[1][i]
            except:
                print("Can't Sum Up Views!")
        except Exception as e:
            print(e)
            song = {'result': 'None', 'year': 0, 'genres': [], 'views': 0}
        return song

    def get_song_from_wikipedia(self, song_name):
        song = {}
        results = wikipedia.search(song_name)
        found = 0
        for result in results:
            if self.levenshtein.distance(
                    result.split("(")[0],
                    song_name.split("-")[0]) <= 5 and found == 0:
                song = self.scrape_info_from_wiki(result)
                found = 1
        if found == 0:
            print("Name: " + song_name)
            print("Available Results: " + str(results))
            selection = int(input("Select the right result"))
            if selection in range(0, len(results)):
                song = self.scrape_info_from_wiki(results[selection])
            else:
                song = {'result': 'None', 'year': 0, 'genres': [], 'views': 0}
        return song

    def operate_wikipedia(self):
        songs_from_wikipedia = []
        for row in self.original_decade_df.iterrows():
            songs_from_wikipedia.append(
                self.get_song_from_wikipedia(row[1]['name']))
        songs_from_wikipedia = pd.DataFrame(songs_from_wikipedia)
        self.original_decade_df['wikipedia_year'] = songs_from_wikipedia[
            'year']
        self.original_decade_df['genres'] = songs_from_wikipedia['genres']
        self.original_decade_df['views'] = songs_from_wikipedia['views']

    def read_chart_file(self):
        songs = []
        year = 0
        file_name = "DFs/" + self.decade + ".txt"
        file = open(file_name, "r", encoding="utf8")

        for line in file.readlines():
            song = {}
            try:
                year = int(line)
            except:
                try:
                    song["name"] = line[line.find('"') + 1:len(line) - 1]
                    song["location"] = int(line.split(".")[0][0:2])
                    song["year"] = year
                    songs.append(song)
                except:
                    print("Empty Line")
        return songs

    def find_in_yearly_chart(self):
        yearly_positions = []
        songs_from_charts = self.read_chart_file()
        for row in self.original_decade_df.iterrows():
            found_song = 0
            for song in songs_from_charts:
                if self.levenshtein.distance(song['name'],
                                             row[1]['name']) <= 1:
                    yearly_positions.append(song['location'])
                    found_song = 1
            if found_song == 0:
                yearly_positions.append(0)
        self.original_decade_df['yearly_position'] = yearly_positions

    def fix_year(self):
        year = []
        year_source = []
        for row in self.original_decade_df.iterrows():
            if int(row[1]['spotify_year']) > 1979 and int(
                    row[1]['spotify_year']) < 1990:
                year.append(int(row[1]['spotify_year']))
                year_source.append('spotify')
            elif int(row[1]['wikipedia_year']) > 1979 and int(
                    row[1]['wikipedia_year']) < 1990:
                year.append(int(row[1]['wikipedia_year']))
                year_source.append('wikipedia')
            else:
                year.append(int(input(row[1]['name'] + " " +
                                      row[1]['artist'])))
                year_source.append('manual')
        self.original_decade_df['year'] = year
        self.original_decade_df['year_source'] = year_source
예제 #9
0
    def get_replacement(self, distance='lsh', threshold=.8):
        if distance == 'edit_distance':
            distance = Levenshtein()
        elif distance == 'normalized_edit_distance':
            distance = NormalizedLevenshtein()

        # for each token, get its bin
        # for each bin, iterate each element and get the groups of satisfied tokens such as
        # [white] = [whit, whie, whit]
        # [whie] = [whine,white]

        replacement = {}
        s = self.uniq_values

        while len(s) > 0:
            token = rd.sample(s, 1)[0]
            s.remove(token)
            m = self._generate_hash(token)
            similarities = self.lsh.query(m)
            similarities = [
                _ for _ in similarities if _ not in replacement.values()
                and _ not in replacement.keys()
            ]
            if len(similarities) > 1:
                scores = {}
                bin_replacement = {}
                if distance != 'lsh':
                    for idx, item in enumerate(similarities):
                        count = 0
                        candidates = []
                        for idx_compared in range(idx + 1, len(similarities)):
                            candidate = similarities[idx_compared]
                            if item != candidate and distance.distance(
                                    item, candidate) < threshold:
                                if idx not in bin_replacement:
                                    bin_replacement[idx] = [idx_compared]
                                else:
                                    bin_replacement[idx].append(idx_compared)
                                if idx_compared not in bin_replacement:
                                    bin_replacement[idx_compared] = [idx]
                                else:
                                    bin_replacement[idx_compared].append(idx)

                    for idx_item, candidates in sorted(
                            bin_replacement.items(), key=lambda x: -len(x[1])):
                        item = similarities[idx_item]
                        if item in replacement.keys():
                            item = replacement[item]
                        for idx_candidate in candidates:
                            candidate = similarities[idx_candidate]
                            if candidate != item and candidate not in replacement.keys(
                            ):
                                if item not in replacement.keys():
                                    replacement[candidate] = item
                                elif replacement[item] != candidate:
                                    replacement[candidate] = replacement[item]
                else:
                    for candidate in similarities:
                        if candidate != token:
                            replacement[candidate] = token

        return replacement
예제 #10
0
    def similarity(self, question, answer):

        stopword = self.read_from(folder_path + '上证专用停用词.txt')
        stopwords = []
        for sw in stopword:
            sw = sw.strip('\n')
            sw = sw.strip(' ')
            stopwords.append(sw)
        # print(stopwords)

        meaningful_words1 = []
        meaningful_words2 = []

        words2 = jieba.cut(str(question))
        words3 = jieba.cut(str(answer))
        for word in words2:
            if word not in stopwords:
                meaningful_words1.append(word)
        for word in words3:
            if word not in stopwords:
                meaningful_words2.append(word)
        s2 = ''.join(meaningful_words1)
        # print(s2)
        s3 = ''.join(meaningful_words2)
        a1 = Cosine(1)
        b1 = Damerau()
        c1 = Jaccard(1)
        d1 = JaroWinkler()
        e1 = Levenshtein()
        f1 = LongestCommonSubsequence()
        g1 = MetricLCS()
        h1 = NGram(2)
        i1 = NormalizedLevenshtein()
        j1 = OptimalStringAlignment()
        k1 = QGram(1)
        l1 = SorensenDice(2)
        m1 = WeightedLevenshtein(character_substitution=CharSub())

        line_sim = []

        cos_s = a1.similarity(s2, s3)
        line_sim.append(cos_s)
        cos_d = a1.distance(s2, s3)
        line_sim.append(cos_d)
        dam = b1.distance(s2, s3)
        line_sim.append(dam)
        jac_d = c1.distance(s2, s3)
        line_sim.append(jac_d)
        jac_s = c1.similarity(s2, s3)
        line_sim.append(jac_s)
        jar_d = d1.distance(s2, s3)
        line_sim.append(jar_d)
        jar_s = d1.similarity(s2, s3)
        line_sim.append(jar_s)
        lev = e1.distance(s2, s3)
        line_sim.append(lev)
        lon = f1.distance(s2, s3)
        line_sim.append(lon)
        met = g1.distance(s2, s3)
        line_sim.append(met)
        ngr = h1.distance(s2, s3)
        line_sim.append(ngr)
        nor_d = i1.distance(s2, s3)
        line_sim.append(nor_d)
        nor_s = i1.similarity(s2, s3)
        line_sim.append(nor_s)
        opt = j1.distance(s2, s3)
        line_sim.append(opt)
        qgr = k1.distance(s2, s3)
        line_sim.append(qgr)
        sor_d = l1.distance(s2, s3)
        line_sim.append(sor_d)
        sor_s = l1.similarity(s2, s3)
        line_sim.append(sor_s)
        wei = m1.distance(s2, s3)
        line_sim.append(wei)

        return line_sim
    return seperated_text


questions["listed_text"] = questions["Question"].apply(seperate_text)

split_questions = pd.DataFrame(questions.listed_text.values.tolist(),
                               questions.index).add_prefix("column_")
questions = pd.merge(questions,
                     split_questions,
                     left_index=True,
                     right_index=True).reset_index()

# compare the similarity of just column_0 to filter out questions that are too similar to
# one another where the question is essentially the same, but with a different object
# e.g. "Have you taken: cocaine" or "Have you taken: opiods"
levenshtein = Levenshtein()

index = 1
threshold = 0.9
reference_string = questions["column_0"].iloc[0]

while index < len(questions) - 1:
    string_1 = reference_string
    string_2 = questions["column_0"].iloc[index]
    levenshtein_distance = levenshtein.distance(string_1, string_2)
    if len(string_1) > len(string_2):
        similarity = 1 - levenshtein_distance / len(string_1)
    else:
        similarity = 1 - levenshtein_distance / len(string_2)
    if similarity > threshold:
        questions = questions.drop(questions.index[[index]])
예제 #12
0
class decade_table:
    levenshtein = Levenshtein()
    decade_dict = {
        'seventies': [1969, 1980],
        'eighties': [1979, 1990],
        'nineties': [1989, 2000],
        'thousands': [1999, 2010]
    }
    p = Path('C:/Users/tomha/PycharmProjects/GlglzPredictor/DFs')

    def __init__(self):
        print("Which decade are we working on?")
        self.decade = input()
        self.songs_from_charts = self.read_chart_file()
        self.spoti_table = None
        self.wikipedia_table = None

        spoti = input("Create Spotify Table?")
        if spoti == 'Y' or spoti == 'y' or spoti == 'yes':
            self.spoti_table = spotify_table(self.decade)
        else:
            self.spoti_table =  pd.read_csv("DFs/spotify_" + self.decade + ".csv")

        wiki = input("Create Wikipedia Table?")
        if wiki == 'Y' or wiki == 'y' or wiki == 'yes':
            self.wikipedia_table = wiki_table(self.decade)
        else:
            self.wikipedia_table = pd.read_csv("DFs/wiki_" + self.decade + ".csv")
        decade = input("Create decade Table?")
        if decade == 'Y' or decade == 'y' or decade == 'yes':
            self.decade_table = self.merge_table()
        else:
            self.decade_table = pd.read_csv("DFs/united_" + self.decade + ".csv")

        original = input("Handle original chart?")
        if original == 'Y' or original == 'y' or original == 'yes':
            self.original_chart = original_table(self.decade)
            self.update_year_in_original_chart()

    def merge_table(self):
        temp_table = pd.merge(self.spoti_table, self.wikipedia_table, right_on=['spotify_name'],
                                     left_on=['name'])
        list_for_new_table = []
        print("Please tag release years of the following songs")
        for row in temp_table.iterrows():
            list_for_new_table.append(self.create_new_row(row))
        new_table = pd.DataFrame(list_for_new_table)
        table_name = "united_" + self.decade
        export_csv = new_table.to_csv(Path(self.p, table_name + '.csv'), index=None, header=True)
        print("Saved table successfully")
        return new_table

    def look_for_song(self, name):
        location = 0
        year = 0
        for song in self.songs_from_charts:
            if self.levenshtein.distance(song['name'].split(" - ")[1], name) <= 1:
                location = song['location']
                year = song['year']
        return location, year

    def create_new_row(self, old_row):
        row = old_row[1]
        new_row = {}
        new_row['name'] = self.decade+"_"+row['name'].split(" - ")[0]
        if len(row['name'].split(" - ")) > 1:
            new_row['version_exists'] = 1
        else:
            new_row['version_exists'] = 0
        new_row['artist'] = row['artist']
        if row['artist'][:2] == 'The' or row['artist'][:2] == 'the':
            new_row['artist_first_letter'] = row['artist'][4]
        else:
            new_row['artist_first_letter'] = row['artist'][0]
        try:
            if int(row['year_y']) > self.decade_dict[self.decade][0] and int(row['year_y']) < self.decade_dict[self.decade][1]:
                new_row['year'] = row['year_y']
                new_row['year_source'] = 'wikipedia'
            elif int(row['year_x']) > self.decade_dict[self.decade][0] and int(row['year_x']) < self.decade_dict[self.decade][1]:
                new_row['year'] = row['year_x']
                new_row['year_source'] = 'spotify'
            else:
                new_row['year'] = input(new_row['name']+" "+new_row['artist'])
                new_row['year_source'] = 'manual'
        except:
            new_row['year'] = input(new_row['name']+" "+new_row['artist'])
            new_row['year_source'] = 'manual'
        new_row['song_popularity'] = row['song_popularity']
        new_row['artist_popularity'] = row['artist_popularity']
        new_row['duration_ms'] = row['duration_ms']
        new_row['key'] = row['key']
        new_row['time_signature'] = row['time_signature']
        new_row['acousticness'] = row['acousticness']
        new_row['danceability'] = row['danceability']
        new_row['energy'] = row['energy']
        new_row['instrumentalness'] = row['instrumentalness']
        new_row['loudness'] = row['loudness']
        new_row['speechiness'] = row['speechiness']
        new_row['valence'] = row['valence']
        new_row['tempo'] = row['tempo']
        new_row['genres'] = row['genres']
        new_row['views'] = row['views']
        chart_location, chart_year = self.look_for_song(row['name'].split(" - ")[0])
        if chart_year > 0:
            new_row['year'] = chart_year
        new_row['old_chart_position'] = chart_location
        new_row['new_chart_location'] = old_row[0]
        return new_row

    def update_year_in_original_chart(self):
        yearly_positions = []
        for row in self.original_chart.original_decade_df.iterrows():
            chart_location, chart_year = self.look_for_song(row[1]['name'])
            yearly_positions.append(chart_location)
        self.original_chart.original_decade_df['yearly_position'] = yearly_positions
        p = Path('C:/Users/tomha/PycharmProjects/GlglzPredictor/DFs/')
        new_table_name = input("Please insert the new original chart table name")
        self.original_chart.original_decade_df.to_csv(Path(p, new_table_name + '.csv'), index=None, header=True)

    def read_chart_file(self):
        songs = []
        type = input("Do you have a .csv file of the yearly charts?")
        if type == 'y' or type == 'Y' or type == 'yes':
            yearly_csv = pd.read_csv('DFs/' +self.decade+".csv")
            for song_in_csv in yearly_csv.iterrows():
                song = {}
                song['name'] = song_in_csv[1]['Artist']+ " - "+song_in_csv[1]['Song']
                song['location'] = song_in_csv[1]['Location']
                song['year'] = song_in_csv[1]['Year']
                songs.append(song)
        else:
            year = 0
            file_name = "DFs/" + self.decade + ".txt"
            file = open(file_name, "r", encoding="utf8")

            for line in file.readlines():
                song = {}
                try:
                    year = int(line)
                except:
                    try:
                        song["name"] = line[line.find('"') + 1: len(line)-1]
                        song["location"] = int(line.split(".")[0][0:2])
                        song["year"] = year
                        songs.append(song)
                    except:
                        print("Empty Line")
        print("Yearly charts are ready!")
        return songs
예제 #13
0
from similarity.levenshtein import Levenshtein
from similarity.normalized_levenshtein import NormalizedLevenshtein
from similarity.cosine import Cosine
lev = Levenshtein()
nolev = NormalizedLevenshtein()
cosine = Cosine(4)
str1 = 'I enjoy playing football'
str2 = 'I love to play soccer'

print(lev.distance(str1, str2))
print('Levenshtein distance:')
print(nolev.similarity(str1, str2))
print('Cosine similarity:')
print(cosine.similarity(str1, str2))
class wiki_table:
    levenshtein = Levenshtein()
    pvc = PageviewsClient("Looking for songs")

    def __init__(self, decade):
        self.p = Path('C:/Users/tomha/PycharmProjects/GlglzPredictor/DFs')
        self.decade = decade
        self.genres_dict = {}
        self.df = self.create_table()

    def cut_year_from_cell(self, cell):
        try:
            return int(cell.contents[0])
        except:
            try:
                return int(cell.contents[0].split(" ")[1])
            except:
                try:
                    return int(cell.contents[0].split(" ")[2])
                except:
                    return 0

    def append_genre(self, genre):
        for genre_from_dict in self.genres_dict.keys():
            if genre_from_dict[1:len(genre_from_dict)] == genre["title"][
                    1:len(genre["title"])]:
                return genre_from_dict
            elif genre_from_dict[1:len(genre_from_dict)] == genre["title"][
                    1:len(genre["title"])] + " music":
                return genre_from_dict
            elif self.levenshtein.distance(genre_from_dict,
                                           genre["title"]) <= 2:
                return genre_from_dict
        return genre["title"]

    def get_year(self, row):
        year = 0
        found = 0
        year_cell = row.find("td", {"class": "plainlist"})
        if year_cell is not None:
            if year_cell.find("li") and found == 0:
                year = self.cut_year_from_cell(year_cell.find("li"))
                if year != 0:
                    print("Taken from List! " + str(year))
                    found = 1
                else:
                    print("year_li: " + str(year_cell.find("li")))
            elif year_cell.find("a") and year_cell.find("a").has_attr("title"):
                year = year_cell.find("a")["title"].split(" ")[0]
                print("Taken from Link! " + str(year))
                found = 1
            elif year_cell.find("span", {"class": "dtstart"}):
                try:
                    year = int(
                        year_cell.find("span", {
                            "class": "dtstart"
                        }).contents[0].split("-")[0])
                    print("Taken from span! " + str(year))
                    found = 1
                except:
                    print(year_cell)
            elif len(year_cell.contents) > 0:
                year = self.cut_year_from_cell(year_cell)
                if year != 0:
                    found = 1
            if found == 0:
                print("year cell: " + str(year_cell))
        return year

    def scrape_info_from_wiki(self, page):
        song = {}
        try:
            page_html = wikipedia.WikipediaPage(page).html()
            prettified = BeautifulSoup(page_html, 'html.parser')
            info_table = prettified.findAll("table", {"class": "infobox"})
            song["result"] = page
            song["year"] = 0
            song["genres"] = []
            song["views"] = 0
            for row in info_table[0].find_all("tr"):
                row_year = row.find(text='Released')
                if row_year:
                    song["year"] = self.get_year(row)
                row_genres = row.find("td", {"class": "category"})
                if row_genres:
                    for genre in row_genres.find_all("a"):
                        if genre.has_attr("title"):
                            song["genres"].append(self.append_genre(genre))
            try:
                pop_dict = self.pvc.article_views('en.wikipedia', [page],
                                                  granularity='monthly',
                                                  start='20190101',
                                                  end='20191001')
                for value in pop_dict.items():
                    for i in value[1]:
                        if value[1][i] != None:
                            song["views"] = song["views"] + value[1][i]
            except:
                print("Can't Sum Up Views!")
        except Exception as e:
            print(e)
            song = {'result': 'None', 'year': 0, 'genres': [], 'views': 0}
        return song

    def get_song_from_wikipedia(self, song_name):
        song = {}
        results = wikipedia.search(song_name)
        found = 0
        for result in results:
            if self.levenshtein.distance(
                    result.split("(")[0],
                    song_name.split("-")[0]) <= 5 and found == 0:
                song = self.scrape_info_from_wiki(result)
                found = 1
        if found == 0:
            print("Name: " + song_name)
            print("Available Results: " + str(results))
            selection = int(input("Select the right result"))
            if selection in range(0, len(results)):
                song = self.scrape_info_from_wiki(results[selection])
            else:
                song = {'result': 'None', 'year': 0, 'genres': [], 'views': 0}
        return song

    def create_table(self):
        spotify_table_name = "DFs/spotify_" + self.decade + ".csv"
        data_from_spotify = pd.read_csv(spotify_table_name)
        wiki_songs = []
        for row in data_from_spotify.iterrows():
            name = row[1]['name'].split("-")[0].replace(
                'remastered', '') + " - " + row[1]['artist']
            song = self.get_song_from_wikipedia(name)
            song["spotify_name"] = row[1]['name']
            song["spotify_artist"] = row[1]['artist']
            wiki_songs.append(song)
            wiki_df = pd.DataFrame(wiki_songs)
            table_name = "wiki_" + self.decade
            wiki_df.to_csv(Path(self.p, table_name + '.csv'),
                           index=None,
                           header=True)
            if len(wiki_songs) % 100 == 0:
                print("Fetched " + str(len(wiki_songs)) + " songs")
        wiki_df = pd.DataFrame(wiki_songs)
        table_name = "wiki_" + self.decade
        export_csv = wiki_df.to_csv(Path(self.p, table_name + '.csv'),
                                    index=None,
                                    header=True)
        print("Saved table successfully")
        return wiki_df
예제 #15
0
from similarity.optimal_string_alignment import OptimalStringAlignment
from similarity.jarowinkler import JaroWinkler
from similarity.longest_common_subsequence import LongestCommonSubsequence
from similarity.metric_lcs import MetricLCS
from similarity.ngram import NGram
from similarity.qgram import QGram
from similarity.cosine import Cosine
from similarity.jaccard import Jaccard
from similarity.sorensen_dice import SorensenDice
from scipy.spatial.distance import euclidean, cosine, cityblock
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.preprocessing import normalize
import numpy as np

# Inizializza all'import
levenshtein = Levenshtein()
norm_levenshtein = NormalizedLevenshtein()
damerau = Damerau()
optimal_string_alignment = OptimalStringAlignment()
jarowinkler = JaroWinkler()
lcs = LongestCommonSubsequence()
metric_lcs = MetricLCS()
ngram = NGram()
qgram = QGram()
dice = SorensenDice()
cos = Cosine(5)
jaccard = Jaccard(5)

similarity_functions = [
    norm_levenshtein.similarity, lambda a, b: 1 - metric_lcs.distance(a, b),
    lambda a, b: 1 - ngram.distance(a, b), cos.similarity, dice.similarity