def _vectorise_ps(self,
                      ps: int,
                      convert_to_proportions: bool):
        # Override the function, returning only the LSS representation
        directory_path = f"{self.corpus_path}\\problem{ps:03d}"
        pzd_fpath = (f"{directory_path}\\BTM_{self.btm_dir_suffix}"
                     f"\\k{self.t}.pz_d")

        btm_lss = pd.read_csv(filepath_or_buffer=pzd_fpath,
                              delim_whitespace=True,
                              header=None)

        if len(self.btm.doc_index) == 0:
            doc_index = []
            # We will need to build the index
            with Tools.scan_directory(directory_path) as docs:
                for doc in docs:
                    if doc.is_dir():
                        continue
                    doc_index.append(Tools.get_filename(doc.path))
            btm_lss.index = doc_index
        else:
            btm_lss.index = self.btm.doc_index

        if convert_to_proportions:
            tokenised_btmcorpus_filepath = (
                f"{directory_path}\\BTM_{self.btm_dir_suffix}"
                f"\\vectorised\\tokenised_btmcorpus.txt")
            with open(tokenised_btmcorpus_filepath) as c:
                tcorpus = c.readlines()
                freqs = [len(self._doc_gen_biterms(tdoc))
                         for tdoc in tcorpus]
                btm_lss = btm_lss.mul(freqs, axis="index")

        return btm_lss
Ejemplo n.º 2
0
    def generate_gibbs_states_plots(self,
                                    states_path: str,
                                    cat: str = "likelihood"):
        new_dir = Tools.get_path(states_path, f"{cat}_plots")
        if Tools.path_exists(new_dir):
            print("Plots found, skipping..")
            return

        Tools.initialise_directory(new_dir)
        with Tools.scan_directory(states_path) as outputs:
            for i, output in enumerate(outputs):
                try:
                    state_file = Tools.get_path(output.path, "state.log")
                    df = pd.read_csv(filepath_or_buffer=state_file,
                                     delim_whitespace=True,
                                     index_col="iter")
                    ax = sns.lineplot(x=df.index, y=cat, data=df)
                    ax.margins(x=0)
                    name = output.name
                    fig = ax.get_figure()
                    fig.savefig(Tools.get_path(states_path, f"{cat}_plots",
                                               f"{name}.png"),
                                dpi=300,
                                bbox_incehs="tight",
                                format="png")
                    fig.clf()
                    print(f"{i}")
                except FileNotFoundError:
                    print(f"→ Skipping {output.name}")
Ejemplo n.º 3
0
    def _convert_corpus_to_bow(self, file_ext: str = "txt"):
        """
        Convert a directory of text files into a BoW model.

        Parameters
        ----------
        word_grams : int (optional)
            The number of words to combine as features. 1 is the default value,
            and it denotes the usage of word unigrams.

        Returns
        -------
        bow_corpus : gnesim corpus
            The bag-of-words model.

        dictionary : gensim dictionary
            The id2word mapping.

        plain_documents : list
            The list of plain documents, to serve as a reference point.
        """
        # Read in the plain text files
        plain_documents = []
        with Tools.scan_directory(self.input_docs_path) as docs:
            for doc in docs:
                if doc.is_dir() or Tools.split_path(
                        doc.path)[1] != f".{file_ext}":
                    continue
                try:
                    f = open(doc.path, mode="r", encoding="utf8")
                    plain_documents.append(f.read())
                    self.doc_index.append(Tools.get_filename(doc.path))
                except PermissionError:
                    # Raised when trying to open a directory
                    print("Skipped while loading files: {}".format(doc.name))
                    pass
        # Collocation Detection can be applied here via gensim.models.phrases
        # Tokenise corpus and remove too short documents
        tokenised_corpus = [[
            ' '.join(tkn)
            for tkn in ngrams(word_tokenize(d.lower()), self.word_grams)
        ] for d in plain_documents if len(d) > 3]

        if self.drop_uncommon:
            freq = defaultdict(int)
            for doc in tokenised_corpus:
                for word in doc:
                    freq[word] += 1
            tokenised_corpus = [[w for w in doc if freq[w] > self.freq_th]
                                for doc in tokenised_corpus]
        # Form the word ids dictionary for vectorisation
        dictionary = Dictionary(tokenised_corpus)
        corpus = [dictionary.doc2bow(t_d) for t_d in tokenised_corpus]

        return (corpus, dictionary,
                pd.DataFrame(data=plain_documents,
                             index=self.doc_index,
                             columns=["content"]))
Ejemplo n.º 4
0
    def _concatenate_docs_into_btmcorpus(self,
                                         remove_bgw: bool = False,
                                         drop_uncommon: bool = False,
                                         drop_punctuation: bool = False):
        # Read in the plain text files
        plain_documents = []
        with Tools.scan_directory(self.directory_path) as docs:
            for doc in docs:
                if doc.is_dir():
                    continue
                try:
                    f = open(doc.path, mode="r", encoding="utf8")
                    plain_documents.append(f.read())
                    self.doc_index.append(Tools.get_filename(doc.path))
                except PermissionError:
                    # Raised when trying to open a directory
                    print("Skipped while loading files: {}".format(doc.name))
                    pass
                finally:
                    f.close()
        # lowercase and strip \n away
        plain_documents = [
            str.replace(d, "\n", "").lower() for d in plain_documents
        ]
        # it was observed that the topics are composed of a lot of stop words
        # Following the BTM paper and the observation, we remove these
        if remove_bgw:
            # Detect the language
            lang = detect(" ".join(plain_documents))
            if lang == "en":
                lang = "english"
            elif lang == "nl":
                lang = "dutch"
            else:
                lang = "greek"

            new_documents = []
            for d in plain_documents:
                terms = [
                    w for w in word_tokenize(text=d, language=lang)
                    if w not in set(stopwords.words(lang))
                ]
                new_documents.append(" ".join(terms))
            plain_documents = new_documents

        if drop_punctuation:
            plain_documents = [
                sub(pattern=r"[^\w\s]", repl="", string=d)
                for d in plain_documents
            ]
        # save it to disk
        Tools.save_list_to_text(mylist=plain_documents,
                                filepath=self.plain_corpus_path)
        return plain_documents
Ejemplo n.º 5
0
    def load_pz_d_into_df(self, use_frequencies: bool = False):
        """


        Parameters
        ----------
        use_frequencies : bool, optional
            DESCRIPTION. The default is False.

        Returns
        -------
        btm_lss : TYPE
            DESCRIPTION.

        """
        # ??? This function is not used, should be used in tester._vectorise_ps
        # Load the lss into df
        pzd_fpath = f"{self.directory_path}k{self.t}.pz_d"
        try:
            btm_lss = pd.read_csv(filepath_or_buffer=pzd_fpath,
                                  delim_whitespace=True)

            if not self.doc_index:
                # We will need to build the index
                with Tools.scan_directory(self.directory_path) as docs:
                    for doc in docs:
                        if doc.is_dir():
                            continue
                        self.doc_index.append(Tools.get_filename(doc.path))
            btm_lss.index = self.doc_index

            if use_frequencies:
                # The saved documents are in p(z|d) values
                # We want to proportion them to frequencies so that we have the
                # frequency of terms belonging to a topic
                # Since sum_b is used, we will use the count of biterms
                # Treating each p(zi|dj) as a proportion, we will count biterms
                with open(self.tokenised_btmcorpus_filepath) as c:
                    tcorpus = c.readlines()
                # How many biterms are there?
                # Analyzing the C++ code, a widnow of 15 is used
                # regenerate the biterms and count as statistics can detect
                # redundancies in unordered terms:
                freqs = [len(self._doc_gen_biterms(tdoc)) for tdoc in tcorpus]
                btm_lss = btm_lss.mul(freqs, axis="index")

            return btm_lss
        except FileNotFoundError:
            return None
Ejemplo n.º 6
0
    def smart_optimisation(self,
                           plot_cat: str = "likelihood",
                           tail_prcnt: float = 0.80,
                           skip_factor: int = 1,
                           verbose: bool = False):
        # First generate the outputs to compare:
        words_counts = self._generate_hdps_outputs(skip_factor=skip_factor,
                                                   verbose=verbose)

        ret = {}
        # Loop over the outputs of different etas
        master_folder = Tools.get_path(self.out_dir, "optimisation")
        log_likelihoods = []
        avg_num_topics = []
        std_num_topics = []
        pw_ll = []
        errors = []
        with Tools.scan_directory(master_folder) as perms:
            for perm in perms:
                # generate plots
                if not Tools.is_path_dir(perm.path):
                    continue

                self.generate_gibbs_states_plots(states_path=perm.path,
                                                 cat=plot_cat)
                with Tools.scan_directory(perm.path) as problems:
                    for problem in problems:
                        try:
                            n_words = words_counts[problem.name]
                            path_state = Tools.get_path(
                                problem.path, "state.log")
                            df_state = pd.read_csv(
                                filepath_or_buffer=path_state,
                                delim_whitespace=True,
                                index_col="iter",
                                usecols=["iter", "likelihood", "num.topics"])
                            ll = df_state.likelihood.tail(
                                round(len(df_state) * tail_prcnt)).mean()
                            avg_topics = df_state["num.topics"].tail(
                                round(len(df_state) * tail_prcnt)).mean()
                            std_topics = df_state["num.topics"].tail(
                                round(len(df_state) * tail_prcnt)).std()

                            log_likelihoods.append(ll)
                            pw_ll.append(ll / n_words)
                            avg_num_topics.append(avg_topics)
                            std_num_topics.append(std_topics)
                        except FileNotFoundError as e:
                            print(f"{e}")
                            errors.append(f"{e}")
                            continue
                        except KeyError:
                            # Plots folders are being queried for n_words
                            continue
                ret.update({
                    f"{perm.name}": [
                        round(sum(log_likelihoods) / len(log_likelihoods), 4),
                        round(sum(pw_ll) / len(pw_ll), 4),
                        round(sum(avg_num_topics) / len(avg_num_topics), 4),
                        round(sum(std_num_topics) / len(std_num_topics), 4)
                    ]
                })
        # Save any encountered errors to disk too
        Tools.save_list_to_text(mylist=errors,
                                filepath=Tools.get_path(
                                    self.out_dir, "optimisation",
                                    "opt_errors.txt"))

        pd.DataFrame(data=ret,
                     index=["Log-l", "PwLL", "T-Avg", "T-Std"
                            ]).T.to_csv(Tools.get_path(self.out_dir,
                                                       "optimisation",
                                                       "optimisation.csv"),
                                        index=True)

        return ret
Ejemplo n.º 7
0
    def _generate_hdps_outputs(self,
                               skip_factor: int = 1,
                               verbose: bool = False):
        st = time.perf_counter()
        ldac_path = Tools.get_path("lda_c_format_HyperFalse",
                                   "dummy_ldac_corpus.dat")
        words_nums = {}
        vocab_file = Tools.get_path("lda_c_format_HyperFalse",
                                    "dummy_ldac_corpus.dat.vocab")
        #        size = ((60 // skip_factor)
        #                * len(self.etas)
        #                * len(self.gammas)**2
        #                * len(self.alphas)**2)
        # Since we fixed the scales of Gammas
        size = ((60 // skip_factor) * len(self.etas) * len(self.gammas) *
                len(self.alphas))
        i = 0
        with Tools.scan_directory(self.training_folder) as ps_folders:
            for c, folder in enumerate(ps_folders):
                if not folder.name[0:7] == "problem":
                    if verbose:
                        print(f"→ Skipping {folder.name}")
                    continue
                # Implement the skipping factor
                if c % skip_factor != 0:
                    continue

                t = time.perf_counter()
                # Fix the scale parameters for the Gamma priors
                g_r = 1
                a_r = 1
                for eta in self.etas:
                    # for g_s, g_r in product(self.gammas, repeat=2):
                    # for a_s, a_r in product(self.alphas, repeat=2):
                    # Only switch the shape parameter of Gammas
                    for g_s in self.gammas:
                        for a_s in self.alphas:
                            # Cache the number of words for later
                            if folder.name not in words_nums:
                                vocab_path = Tools.get_path(
                                    folder.path, vocab_file)
                                n_words = self._get_number_words(vocab_path)
                                words_nums.update({folder.name: n_words})

                            i = i + 1
                            percentage = f"{100 * i / size:06.02f}"
                            suff = (f"{g_s:0.2f}_{g_r:0.2f}_"
                                    f"{a_s:0.2f}_{a_r:0.2f}")
                            if verbose:
                                print(f"► Applying HDP with "
                                      f"eta={eta:0.1f} "
                                      f"gamma({g_s:0.2f}, {g_r:0.2f}) "
                                      f"alpha({a_s:0.2f}, {a_r:0.2f}) "
                                      f"on {folder.name} [{percentage}%]")

                            directory = Tools.get_path(self.out_dir,
                                                       "optimisation",
                                                       f"{eta:0.1f}__{suff}",
                                                       folder.name)

                            if (Tools.path_exists(directory)):
                                if verbose:
                                    print("\tcached result found at "
                                          f"{directory}")
                                continue

                            path_executable = r"{}\hdp.exe".format(
                                self.hdp_path)
                            data = Tools.get_path(folder.path, ldac_path)

                            # Prepare the output directory
                            Tools.initialise_directories(directory)

                            if self.seed is not None:
                                s.run([
                                    path_executable, "--algorithm", "train",
                                    "--data", data, "--directory", directory,
                                    "--max_iter",
                                    str(self.iters), "--sample_hyper", "no",
                                    "--save_lag", "-1", "--eta",
                                    str(eta), "--gamma_a",
                                    str(g_s), "--gamma_b",
                                    str(g_r), "--alpha_a",
                                    str(a_s), "--alpha_b",
                                    str(a_r), "--random_seed",
                                    str(self.seed)
                                ],
                                      stdout=s.DEVNULL,
                                      check=True,
                                      capture_output=False,
                                      text=True)
                            else:
                                s.run([
                                    path_executable, "--algorithm", "train",
                                    "--data", data, "--directory", directory,
                                    "--max_iter",
                                    str(self.iters), "--sample_hyper", "no",
                                    "--save_lag", "-1", "--eta",
                                    str(eta), "--gamma_a",
                                    str(g_s), "--gamma_b",
                                    str(g_r), "--alpha_a",
                                    str(a_s), "--alpha_b",
                                    str(a_r)
                                ],
                                      stdout=s.DEVNULL,
                                      check=True,
                                      capture_output=False,
                                      text=True)

                if verbose:
                    print(f"--- {folder.name} done in "
                          f"{time.perf_counter() - t:0.1f} seconds ---")

        period = round(time.perf_counter() - st, 2)
        print(f"----- Vectorisation done in {period} seconds -----")
        return words_nums
Ejemplo n.º 8
0
    def assess_hyper_sampling(self, tail_prcnt: float, verbose: bool = False):
        """
        A function to measure the average per word log-likelihood after
        hyper-sampling the concentration parameters of the Dirichlet
        distributions.
        Caution: the hdp must have been run on the data with hyper sampling and
        without it, in order to load the two representations and compare.

        Returns
        -------
        dct: dict
            A dictionary containing the per word log-likelihood of the train
            data with the two methods pertaining to sampling the concentration
            parameters: normal and hyper.

        """
        path_normal = Tools.get_path(".", "hdp_lss_HyperFalse", "state.log")
        path_hyper = Tools.get_path(".", "hdp_lss_HyperTrue", "state.log")
        path_ldac = Tools.get_path(".", "lda_c_format_HyperTrue",
                                   "dummy_ldac_corpus.dat.vocab")
        per_word_ll_normal = []
        per_word_ll_hyper = []

        if verbose:
            print("------Concentration Parameters Optimisation------")

        with Tools.scan_directory(self.training_folder) as dirs:
            for d in dirs:
                if d.name[0:7] != "problem":
                    continue

                if verbose:
                    print(f"\t► Processing {d.name}")

                normal = Tools.get_path(d.path, path_normal)
                hyper = Tools.get_path(d.path, path_hyper)
                vocab = Tools.get_path(d.path, path_ldac)

                n_words = self._get_number_words(vocab)
                df_normal = pd.read_csv(filepath_or_buffer=normal,
                                        delim_whitespace=True,
                                        index_col="iter",
                                        usecols=["iter", "likelihood"],
                                        squeeze=True)
                ll_normal = df_normal.tail(round(len(df_normal) *
                                                 tail_prcnt)).mean()
                per_word_ll_normal.append(ll_normal / n_words)

                df_hyper = pd.read_csv(filepath_or_buffer=hyper,
                                       delim_whitespace=True,
                                       index_col="iter",
                                       usecols=["iter", "likelihood"],
                                       squeeze=True)
                ll_hyper = df_hyper.tail(round(len(df_hyper) *
                                               tail_prcnt)).mean()
                per_word_ll_hyper.append(ll_hyper / n_words)

        dct = {
            "Normal_Sampling":
            round(sum(per_word_ll_normal) / len(per_word_ll_normal), 4),
            "Hyper_Sampling":
            round(sum(per_word_ll_hyper) / len(per_word_ll_hyper), 4)
        }

        if verbose:
            print("-------------------------------------------------")

        pd.DataFrame(data=dct,
                     index=[0
                            ]).to_csv(f"{self.out_dir}/hyper_optimisation.csv",
                                      index=False)
        return dct