for tweet in tweets:

                # convert string to json
                tweet = json.loads(tweet)

                # check if tweet ID already processed
                if '{}{}'.format(mode, str(tweet['id'])) not in tweet_tracker:

                    # add to tracker so we won't process the same id + type again
                    tweet_tracker.add(mode + str(tweet['id']))

                    # create new document so we can save it to the database
                    doc = {}
                    # save the tweet ID (this is the unique identifier for each tweet)
                    doc['id'] = tweet['id']
                    # we refer to mode of research as tweet_type, so this is interdisciplinary for instance
                    doc['tweet_type'] = mode
                    # save the data of the tweet
                    doc['tweet_date'] = datetime.strptime(
                        re.sub(r'[+-]([0-9])+', '', tweet['created_at']), '%a %b %d %H:%M:%S %Y')
                    # save the content of the tweet (this is the full raw content, we will parse out certain fields later on)
                    doc['tweet_raw'] = tweet

                    # save document to database
                    db.insert_one_to_collection(
                        collection='raw_tweets', doc=doc)

                else:
                    logging.info(
                        'Tweet ID {} already processed, skipping...'.format(tweet['id']))
                        if ' bot ' not in bio:
                            logging.info(
                                'Academic word match in bio: {}'.format(w))
                            matches.append(w)

                if len(matches) == 0:
                    continue

                # add matches so we can use it later
                d['matches'] = matches

                # remove _id so we can save it to database again but different collection
                del d['_id']

                # save doc to filtered_tweets collectino
                db.insert_one_to_collection(collection='filtered_tweets',
                                            doc=d)
            else:
                logging.debug('Tweet {} already processed'.format(d['id']))

    # execute if set to True
    if clean_tweets:
        """
			Clean raw tweets that have already been filtered (if not filtered, set filter_tweets to True first)

			filtered tweets are stored in the collection 'filtered_tweets'

			After cleaning, tweets are stored in the collection 'target_tweets'

			Cleaning/preprocessing steps

				- replace new lines
            if not tweet_id in processed_tweets:

                # get content of the tweet
                tweet = get_tweet_by_id(tweet_id)

                # create new document to insert into the database
                new_doc = {}
                # add label
                new_doc['label'] = tweet_label
                # add tweet id
                new_doc['tweet_id'] = tweet_id
                # add raw tweet content
                new_doc['tweet'] = tweet

                # insert into database
                db.insert_one_to_collection(collection=db_collection,
                                            doc=new_doc)

    # execute if set to True
    if get_semeval_tweets:
        """
			The Semantic Analysis in Twitter Task 2016 dataset, also known as SemEval-2016 Task 4, was created for various sentiment classification tasks. The tasks can be seen as challenges where 
			teams can compete amongst a number of sub-tasks, such as classifying tweets into positive, negative and neutral sentiment, or estimating distributions of sentiment classes. 
			Typically, teams with better classification accuracy or other performance measure rank higher. The dataset consist of training, development, and development-test data that combined 
			consist of 3,918 positive, 2,736 neutral, and 1,208 negative tweets. The original dataset contained a total of 10,000 tweets -- 100 tweets from 100 topics. Each tweet was labeled 
			by 5 human annotators and only tweets for which 3 out of 5 annotators agreed on their sentiment label were considered. The dataset is available from http://alt.qcri.org/semeval2016/task4/.

			Tweets are saved into the collection 'semeval_tweets_raw'
		"""

        # name of the collection to store tweets to
        db_collection = 'semeval_tweets_raw'
Exemple #4
0
class Interpretation():
    def __init__(self):

        logging.info('Initialized {}'.format(self.__class__.__name__))

        # instantiate database
        self.db = MongoDatabase()

        # location to store plots
        self.plot_save_folder = os.path.join('files', 'plots')

        # location to store tables to
        self.table_save_folder = os.path.join('files', 'tables')

    def infer_document_topic_distribution(
        self,
        K=10,
        dir_prior='auto',
        random_state=42,
        num_pass=15,
        iteration=200,
        top_n_words=10,
        models_folder=os.path.join('files', 'models'),
        lda_files_folder=os.path.join('files', 'lda')):
        """
			Infer the document topic distribition per publication. The LDA model shows us the word probabilies per topic, but we also want to know what
			topics we find within each document. Here we infer such document-topic distribution and save it to the databse so we can use it later
			to plot some interesting views of the corpus

			Values for K, dir_prior, random_state, num_pass and iteratrion will become visible when plotting the coherence score. Use the model that 
			achieved the highest coherence score.

			Parameters
			-----------
			k: int
				number of topics that resulted in the best decomposition of the underlying corpora
			dir_prior: string
				dirichlet priors 'auto', 'symmetric', 'asymmetric'
			random_state: int
				seed value for random initialization
			num_pass: int
				number of passes over the full corpus
			iteration: int
				max iterations for convergence
			top_n_words: int
				only print out the top N high probability words
			models_folder: os.path
				location of created LDA models
			lda_files_folder: os.path
				location of LDA corpus and dictionary
			save_folder: os.path
				location to store the tables

		"""

        logging.info('Start {}'.format(sys._getframe().f_code.co_name))

        # read dictionary and corpus
        dictionary, corpus = get_dic_corpus(lda_files_folder)

        # load LDA model according to parameters
        model = load_lda_model(
            os.path.join(models_folder, str(K), dir_prior, str(random_state),
                         str(num_pass), str(iteration)))

        # load docs
        D = self.db.read_collection(collection='publications_raw')

        # loop through all the documents to infer document-topics distribition
        for i, d in enumerate(D):

            # check if tokens are present; in case some documents couldn't properly be tokenized during pre-processing phase
            if d.get('tokens') is not None:

                # print to console
                print_doc_verbose(i, D.count(), d['journal'], d['year'],
                                  d['title'])

                # create bag of words from tokens
                bow = model.id2word.doc2bow(d['tokens'])

                # infer document-topic distribution
                topics = model.get_document_topics(bow, per_word_topics=False)

                # convert to dictionary: here we convert the topic number to string because mongodb will complain otherwise
                # you will get a message that documents can only have string keys
                dic_topics = {}
                for t in topics:
                    dic_topics[str(t[0])] = float(t[1])

                # create a new document to add to the database, this time in a different collection
                insert_doc = {
                    'journal': d['journal'],
                    'year': d['year'],
                    'title': d['title'],
                    'topics': dic_topics
                }

                # save insert_doc to database within publications collection
                self.db.insert_one_to_collection('publications', insert_doc)

    def get_document_title_per_topic(self):
        """
			Get document title per topic
			Here we obtain the publication title of the most dominant topic within that publication
			Most dominant topic is the topic proportion that is the largest
			So if document has topic A = 10%, B = 30%, and C = 60%, then C is the dominant topic
			We can use the titles for the dominant topics to get insights into the label of that topic 
		"""

        logging.info('Start {}'.format(sys._getframe().f_code.co_name))

        # load docs
        D = self.db.read_collection(collection='publications')

        # empty list where we can append publication titles to
        titles = []

        # loop trough all the docs
        for i, d in enumerate(D):

            # print to console
            print_doc_verbose(i, D.count(), d['journal'], d['year'],
                              d['title'])

            # get the dominant topic
            dominant_topic = max(d['topics'].iteritems(), key=itemgetter(1))
            # get the topic ID and percentage
            dominant_topic_id, dominant_topic_percentage = dominant_topic[
                0], dominant_topic[1]

            # append to list
            titles.append([
                d['year'], d['title'], d['journal'], dominant_topic_id,
                dominant_topic_percentage
            ])

        # save to CSV
        save_csv(titles, 'titles-to-topics', folder=self.table_save_folder)

    def plot_topics_over_time(self, plot_save_name='topics-over-time.pdf'):
        """
			Plot cumulative topic distribution over time

			Parameters
			----------
			plot_save_name: string
				name of the plot
		"""

        logging.info('Start {}'.format(sys._getframe().f_code.co_name))

        # load docs
        D = self.db.read_collection(collection='publications')

        # create dictionary where we can obtain the topic distribution per year
        year_to_topics = get_year_to_topics(D)

        # calculate the cumulative topic distribution: basically the average distribution per year
        year_to_cum_topics = get_year_to_cum_topics(year_to_topics)

        # convert dictionary to pandas dataframe
        df = pd.DataFrame.from_dict(year_to_cum_topics)

        # create the plot
        fig, axs = plt.subplots(2, 5, figsize=(15, 10))
        axs = axs.ravel()

        # loop over each row of the dataframe
        for index, row in df.iterrows():

            # get year values
            x = df.columns.values.tolist()
            # get topic proportions
            y = row.tolist()

            # add to plot
            axs[index].plot(x,
                            y,
                            'o--',
                            color='black',
                            linewidth=1,
                            label="Topic prevalence")
            axs[index].set_title(get_topic_label(index), fontsize=14)
            axs[index].set_ylim([0, 0.4])

        # save plot
        plt.savefig(os.path.join(self.plot_save_folder, plot_save_name),
                    bbox_inches='tight')
        plt.close()

    def plot_topics_over_time_stacked(
            self, plot_save_name='topics-over-time-stacked.pdf'):
        """
			Plot topics over time stacked

			Parameters
			----------
			plot_save_name: string
				name of the plot
		"""

        logging.info('Start {}'.format(sys._getframe().f_code.co_name))

        # load docs
        D = self.db.read_collection(collection='publications')

        # create dictionary where we can obtain the topic distribution per year
        year_to_topics = get_year_to_topics(D)

        # calculate the cumulative topic distribution: basically the average distribution per year
        year_to_cum_topics = get_year_to_cum_topics(year_to_topics)

        # convert dictionary to pandas dataframe
        df = pd.DataFrame.from_dict(year_to_cum_topics)

        # transpose dataframe
        df = df.transpose()

        # change column headers into topic labels
        df.columns = [get_topic_label(x) for x in df.columns.values]

        # plot the dataframe
        ax = df.plot(figsize=(15, 8),
                     kind='area',
                     colormap='Spectral_r',
                     rot=45,
                     grid=False)
        # set values for x-axis
        plt.xticks(df.index)
        # limit the x-axis
        plt.xlim(min(df.index), max(df.index))
        # limit the y-axis
        plt.ylim(0, 1)
        # get the legend
        handles, labels = ax.get_legend_handles_labels()
        # position it right of the figure
        plt.legend(reversed(handles),
                   reversed(labels),
                   loc='right',
                   bbox_to_anchor=(1.35, 0.50),
                   ncol=1,
                   fancybox=False,
                   shadow=False,
                   fontsize=16)

        # save plot
        plt.savefig(os.path.join(self.plot_save_folder, plot_save_name),
                    bbox_inches='tight')
        plt.close()

    def plot_topic_co_occurrence(self,
                                 plot_save_name='topic-co-occurrence.pdf'):
        """
			Plot topic co-occurrence
			
			Parameters
			----------
			plot_save_name: string
				name of the plot
		"""

        logging.info('Start {}'.format(sys._getframe().f_code.co_name))

        # load docs
        D = self.db.read_collection(collection='publications')

        # create empty dictionary where we can store the dominant topic id and remaining other proportions
        dominant_id_to_topics = {}

        for d in D:

            # sort topics and create list
            topics = [
                value for key, value in sorted(d['topics'].iteritems(),
                                               key=lambda x: int(x[0]))
            ]

            # get max topix id
            max_topic_id = topics.index(max(topics))

            # check if topic ID key already created
            if max_topic_id not in dominant_id_to_topics:
                dominant_id_to_topics[max_topic_id] = []

            dominant_id_to_topics[max_topic_id].append(topics)

        # create empty dictionary where we can have the cumulative topic distribution per dominant topic ID
        dominant_id_to_cum_topics = {}
        for k, v in dominant_id_to_topics.iteritems():

            # calculate mean and add to dictionary
            dominant_id_to_cum_topics[k] = np.mean(np.array(v), axis=0) * 100.

        # convert dictionary to pandas dataframe
        df = pd.DataFrame.from_dict(dominant_id_to_cum_topics)

        # change column headers into topic labels
        df.columns = [get_topic_label(x) for x in df.columns.values]
        df.index = [get_topic_label(x) for x in df.index.values]

        # create max column
        df['max'] = 0.

        # keep track of new index
        new_index = []

        # add max column so we can sort on it later
        for index, row in df.iterrows():

            # add value to max column
            df['max'][index] = max(row)
            # make self co-occurrence zero
            df[index][index] = 0.0

            # add new index names to tracker so we can rename it later
            new_index.append('{} ({}%)'.format(index, round(max(row), 2)))

        # update index name
        df.index = new_index

        # sort by max column
        df = df.sort_values(by=['max'], ascending=False)

        # delete max column
        df = df.drop(['max'], axis=1)

        # sort based on column totals
        df = df.reindex(sorted(df.columns), axis=1)

        # plot the heatmap
        ax = sns.heatmap(df,
                         cmap="Blues",
                         annot=True,
                         vmin=0.,
                         vmax=10.,
                         square=True,
                         annot_kws={"size": 11},
                         fmt='.1f',
                         mask=df <= 0.0,
                         linewidths=.5,
                         cbar=False,
                         yticklabels=True)

        # adjust the figure somewhat
        ax.xaxis.tick_top()
        plt.yticks(rotation=0)
        plt.xticks(rotation=90, ha='left')
        fig = ax.get_figure()
        fig.set_size_inches(19, 6)

        # save figure
        fig.savefig(os.path.join(self.plot_save_folder, plot_save_name),
                    bbox_inches='tight')

    def plot_topics_in_journals(self, plot_save_name='topics-in-journals.pdf'):
        """
			Plot the distribution of topics within each of the journals in our dataset.
			This plot provides an overview of the topical content published by a journal given the time frame of our dataset

			Parameters
			----------
			plot_save_name: string
				name of the plot
		"""

        logging.info('Start {}'.format(sys._getframe().f_code.co_name))

        # create dictionary where we have key = journal, and value = [topic_distributions]
        journal_to_topics = {}

        # load documents from database
        D = self.db.read_collection(collection='publications')

        # loop over the documents, read in the topic distribution, and add to the correct journal key
        for i, d in enumerate(D):

            # verbose process every 1000th document
            if i % 1000 == 0:
                logging.debug('Processing document {}/{}'.format(i, D.count()))

            # get the name of the journal
            journal = d['journal']

            # check if topics are created
            if d.get('topics') is not None:

                # add journal as key to the dictionary if not already exists
                if journal not in journal_to_topics:

                    # add journal as key with empty list
                    journal_to_topics[journal] = []

                # sort topics and create as list
                topics = [
                    value for key, value in sorted(d['topics'].iteritems(),
                                                   key=lambda x: int(x[0]))
                ]

                # append topic distribution to dictionary
                journal_to_topics[journal].append(topics)

        # get cumulative topic distributions for each journa
        journal_to_cum_topics = get_journal_to_cum_topics(journal_to_topics)

        # convert to Pandas DataFrame
        df = pd.DataFrame.from_dict(journal_to_cum_topics).T

        # change column labels to topic labels
        df.columns = [get_topic_label(x) for x in df.columns.values]

        # plot the heatmap
        ax = sns.heatmap(df,
                         cmap="Blues",
                         annot=True,
                         vmin=0.,
                         vmax=.3,
                         square=True,
                         annot_kws={"size": 11},
                         fmt='.2f',
                         mask=df <= 0.0,
                         linewidths=.5,
                         cbar=False,
                         yticklabels=True)

        # adjust the figure somewhat
        ax.xaxis.tick_top()
        plt.yticks(rotation=0)
        plt.xticks(rotation=90, ha='left')
        fig = ax.get_figure()
        fig.set_size_inches(10, 10)

        # save figure
        fig.savefig(os.path.join(self.plot_save_folder, plot_save_name),
                    bbox_inches='tight')

        # close thee plot
        plt.close()
Exemple #5
0
class Evaluation():
    def __init__(self):

        logging.info('Initialized {}'.format(self.__class__.__name__))

        # instantiate database
        self.db = MongoDatabase()

    def calculate_coherence(self,
                            file_folder=os.path.join('files', 'lda'),
                            models_folder=os.path.join('files', 'models')):
        """
			Calculate the CV coherence score for each of the created LDA models

			Parameters
			----------
			file_folder: os.path
				location of the dictionary and corpus for gensim
			models_folder: os.path
				location where the lda model is saved
		"""

        logging.info('Start {}'.format(sys._getframe().f_code.co_name))

        # read dictionary and corpus
        dictionary, corpus = get_dic_corpus(file_folder)

        # load bag of words features of each document from the database
        texts = [
            x['tokens'] for x in self.db.read_collection('publications_raw')
        ]

        # get path location for models
        M = [
            x for x in read_directory(models_folder) if x.endswith('lda.model')
        ]

        # read processed models from database
        processed_models = [
            '{}-{}-{}-{}-{}'.format(x['k'], x['dir_prior'], x['random_state'],
                                    x['num_pass'], x['iteration'])
            for x in self.db.read_collection('coherence')
        ]

        # calculate coherence score for each model
        for i, m in enumerate(M):

            logging.info('Calculating coherence score: {}/{}'.format(
                i + 1, len(M)))

            print m

            # number of topics
            k = m.split(os.sep)[2]
            # different dirichlet priors
            dir_prior = m.split(os.sep)[3]
            # random initiatilizations
            random_state = m.split(os.sep)[4]
            # passes over the corpus
            num_pass = m.split(os.sep)[5]
            # max iteration for convergence
            iteration = m.split(os.sep)[6]

            logging.info(
                'k: {}, dir_prior: {}, random_state: {}, num_pass: {}, iteration: {}'
                .format(k, dir_prior, random_state, num_pass, iteration))

            # check if coherence score already obtained
            if '{}-{}-{}-{}-{}'.format(k, dir_prior, random_state, num_pass,
                                       iteration) not in processed_models:

                # load LDA model
                model = models.LdaModel.load(m)

                # get coherence c_v score
                coherence_c_v = CoherenceModel(model=model,
                                               texts=texts,
                                               dictionary=dictionary,
                                               coherence='c_v')

                # get coherence score
                score = coherence_c_v.get_coherence()

                # logging output
                logging.info('coherence score: {}'.format(score))

                # save score to database
                doc = {
                    'k': k,
                    'dir_prior': dir_prior,
                    'random_state': random_state,
                    'num_pass': num_pass,
                    'iteration': iteration,
                    'coherence_score': score
                }
                self.db.insert_one_to_collection('coherence', doc)

            else:
                logging.info(
                    'coherence score already calculated, skipping ...')
                continue

    def plot_coherence(self,
                       min_k=2,
                       max_k=20,
                       save_location=os.path.join('files', 'plots'),
                       plot_save_name='coherence_scores_heatmap.pdf'):
        """
			Read coherence scores from database and create heatmap to plot scores

			Parameters
			-----------
			min_k: int 
				owest number of topics created when creating LDA models. Here 2
			max_k: int
				highest number of topics created when creating LDA models. Here 20
			save_location: os.path
				location where to save the plot
			plot_save_name: string
				name for the plot
		"""

        logging.info('Start {}'.format(sys._getframe().f_code.co_name))

        # make sure plot save location exists
        create_directory(save_location)

        # read documents from database that contain coherence scores
        D = list(self.db.read_collection(collection='coherence'))

        # convert data from document into a list
        data = [[
            int(x['k']), x['dir_prior'], x['random_state'], x['num_pass'],
            x['iteration'], x['coherence_score']
        ] for x in D]

        # create empty dataframe where we can store our scores
        df = pd.DataFrame()

        # loop trough values of k parameter and find relevant scores for each grid search combination
        for k in range(min_k, max_k + 1):

            # create dataframe to temporarily store values
            df_temp = pd.DataFrame(index=[k])

            # loop trough the data to obtain only the scores for a specific k value
            for row in sorted(data):
                if row[0] == k:
                    df_temp['{}-{}-{}-{}'.format(
                        row[1], row[2], row[3], row[4])] = pd.Series(row[5],
                                                                     index=[k])

            # append temporarary dataframe of only 1 k value to the full dataframe
            df = df.append(df_temp)

        # transpose the dataframe
        df = df.transpose()

        # plot the heatmap
        ax = sns.heatmap(df,
                         cmap="Blues",
                         annot=True,
                         vmin=0.500,
                         vmax=0.530,
                         square=True,
                         annot_kws={"size": 11},
                         fmt='.3f',
                         linewidths=.5,
                         cbar_kws={'label': 'coherence score'})

        # adjust the figure somewhat
        ax.xaxis.tick_top()
        plt.yticks(rotation=0)
        plt.xticks(rotation=0, ha='left')
        fig = ax.get_figure()
        fig.set_size_inches(19, 6)

        # save figure
        fig.savefig(os.path.join(save_location, plot_save_name),
                    bbox_inches='tight')

    def output_lda_topics(self,
                          K=9,
                          dir_prior='auto',
                          random_state=42,
                          num_pass=15,
                          iteration=200,
                          top_n_words=10,
                          models_folder=os.path.join('files', 'models'),
                          save_folder=os.path.join('files', 'tables')):
        """
			Create table with LDA topic words and probabilities
			Creates a table of topic words and probabilties + topics in a list format
			
			Values for K, dir_prior, random_state, num_pass and iteratrion will become visible when plotting the coherence score. Use the model that 
			achieved the highest coherence score and plug in the correct values. The values will create the correct file location of the LDA model
			for example : files/models/2/auto/42/5/200/lda.model

			Parameters
			-----------
			k: int
				number of topics that resulted in the best decomposition of the underlying corpora
			dir_prior: string
				dirichlet priors 'auto', 'symmetric', 'asymmetric'
			random_state: int
				seed value for random initialization
			num_pass: int
				number of passes over the full corpus
			iteration: int
				max iterations for convergence
			top_n_words: int
				only print out the top N high probability words
			models_folder: os.path
				location of created LDA models
			save_folder: os.path
				location to store the tables

		"""

        logging.info('Start {}'.format(sys._getframe().f_code.co_name))

        # load LDA model according to parameters
        model = load_lda_model(
            os.path.join(models_folder, str(K), dir_prior, str(random_state),
                         str(num_pass), str(iteration)))

        # define empty lists so we can fill them with words
        topic_table, topic_list = [], []

        # loop trough all the topics found within K
        for k in range(K):

            # create topic header, e.g. (1) TOPIC X
            topic_table.append([
                '{}'.format(
                    get_topic_label(k, labels_available=False).upper())
            ])
            # add column for word and probability
            topic_table.append(["word", "prob."])

            list_string = ""
            topic_string = ""
            topic_string_list = []

            # get topic distribution for topic k and return only top-N words
            scores = model.print_topic(k, top_n_words).split("+")

            # loop trough each word and probability
            for score in scores:

                # extract score and trimm spaces
                score = score.strip()

                # split on *
                split_scores = score.split('*')

                # get percentage
                percentage = split_scores[0]
                # get word
                word = split_scores[1].strip('"')

                # add word and percentage to table
                topic_table.append(
                    [word.upper(), "" + percentage.replace("0.", ".")])

                # add word to list table
                list_string += word + ", "

            # add empty line for the table
            topic_table.append([""])
            # add topic words to list
            topic_list.append([str(k + 1), list_string.rstrip(", ")])

        # save to CSV
        save_csv(topic_list, 'topic-list', folder=save_folder)
        save_csv(topic_table, 'topic-table', folder=save_folder)
Exemple #6
0
class Preprocessing():
    def __init__(self):

        logging.info('Initialized {}'.format(self.__class__.__name__))

        # instantiate database
        self.db = MongoDatabase()

        # set utf8 encoding
        reload(sys)
        sys.setdefaultencoding('utf8')

    def full_text_preprocessing(self, pdf_folder=os.path.join('files', 'pdf')):
        """
			preprocess full-text publications
			- convert pdf to plain text
			- correct for carriage returns
			- correct for end-of-line hyphenation
			- remove boilerplate
			- remove bibliography
			- remove acknowledgements

			Parameters
			----------
			pdf_folder : os.path
				location where PDF documents are stored
		"""

        logging.info('Start {}'.format(sys._getframe().f_code.co_name))

        # read pdf files that need to be converted
        F = [x for x in read_directory(pdf_folder) if x[-4:] == '.pdf']

        # read documents from DB that have already been processed so we can skip them
        processed_documents = [
            '{}-{}-{}'.format(x['journal'], x['year'], x['title'])
            for x in self.db.read_collection(collection='publications_raw')
        ]

        # loop over each file and convert pdf to plain and save meta data to DB
        for i, f in enumerate(F):

            # extract meta data from folder structure and file name
            journal = f.split('/')[2]
            year = f.split('/')[3]
            title = f.split('/')[4].replace('-', ' ')[4:-4].strip()

            # console output
            print_doc_verbose(i, len(F), journal, year, title)

            # check if PDF has already been processed
            if '{}-{}-{}'.format(journal, year, title) in processed_documents:
                logging.info('PDF document already processed, skipping ...')
                continue

            # convert content of PDF to plain text
            content = pdf_to_plain(f)

            # check if content could be extracted
            if content is not None:

                # fix soft hyphen
                content = content.replace(u'\xad', "-")
                # fix em-dash
                content = content.replace(u'\u2014', "-")
                # fix en-dash
                content = content.replace(u'\u2013', "-")
                # minus sign
                content = content.replace(u'\u2212', "-")
                # fix hyphenation that occur just before a new line
                content = content.replace('-\n', '')
                # remove new lines/carriage returns
                content = content.replace('\n', ' ')

                # correct for ligatures
                content = content.replace(u'\ufb02', "fl")  # fl ligature
                content = content.replace(u'\ufb01', "fi")  # fi ligature
                content = content.replace(u'\ufb00', "ff")  # ff ligature
                content = content.replace(u'\ufb03', "ffi")  # ffi ligature
                content = content.replace(u'\ufb04', "ffl")  # ffl ligature
                """ 
					Remove boilerplate content:

					Especially journal publications have lots of boilerplate content on the titlepage. Removing of this is specific for each
					journal and you can use some regular expressions to identify and remove it.
				"""
                """
					Remove acknowledgemends and/or references
					This is a somewhat crude example
				"""
                if content.rfind("References") > 0:
                    content = content[:content.rfind("References")]
                """
				 	Remove acknowledgements
				"""
                if content.rfind("Acknowledgment") > 0:
                    content = content[:content.rfind("Acknowledgment")]

                # prepare dictionary to save into MongoDB
                doc = {
                    'journal': journal,
                    'title': title,
                    'year': year,
                    'content': content
                }

                # save to database
                self.db.insert_one_to_collection(doc=doc,
                                                 collection='publications_raw')

    def general_preprocessing(self, min_bigram_count=5):
        """
			General preprocessing of publications (used for abstracts and full-text)

			Parameters
			----------
			min_bigram_count : int (optional)
				frequency of bigram to occur to include into list of bigrams. Thus lower frequency than min_bigram_count will not be included.
		"""

        logging.info('Start {}'.format(sys._getframe().f_code.co_name))

        # read document collection
        D = self.db.read_collection(collection='publications_raw')

        # setup spacy natural language processing object
        nlp = setup_spacy()

        # loop through the documents and correct content
        for i, d in enumerate(D):

            # check if tokens are already present, if so, skip
            if d.get('tokens') is None:

                # print to console
                print_doc_verbose(i, D.count(), d['journal'], d['year'],
                                  d['title'])

                # get content from document and convert to spacy object
                content = nlp(d['content'])

                # tokenize, lemmatization, remove punctuation, remove single character words
                unigrams = word_tokenizer(content)

                # get entities
                entities = named_entity_recognition(content)

                # get bigrams
                bigrams = get_bigrams(" ".join(unigrams))
                bigrams = [['{} {}'.format(x[0], x[1])] * y
                           for x, y in Counter(bigrams).most_common()
                           if y >= min_bigram_count]
                bigrams = list(itertools.chain(*bigrams))

                d['tokens'] = unigrams + bigrams + entities

                # save dictionary to datbase
                self.db.update_collection(collection='publications_raw', doc=d)

            else:
                logging.debug('Document already tokenized, skipping ...')