コード例 #1
0
    def __init__(
            self,
            directory_path: str,
            t: int,
            alpha: float,
            beta: float,
            btm_exe_path: str = Tools.get_path("..", "BTM-master", "src",
                                               "btm.exe"),
            n_iter: int = 10000,  # To guarantee convergence
            model_dir_suffix: str = "",
            doc_inference_type: str = "sum_b"):
        self.directory_path = directory_path
        self.t = t
        self.alpha = alpha
        self.beta = beta
        self.n_iter = n_iter
        self.doc_index = []  # the index of the files read for reference
        self.w = None
        self.btm_exe = btm_exe_path
        self.doc_inf_type = "sum_b"  # Due to later dependant computations

        self.output_dir = Tools.get_path(directory_path,
                                         f"BTM_{model_dir_suffix}")
        self.plain_corpus_path = Tools.get_path(self.output_dir,
                                                "btmcorpus.txt")
        self.tokenised_btmcorpus_filepath = Tools.get_path(
            self.output_dir, "vectorised", "tokenised_btmcorpus.txt")
        self.vocab_ids_path = Tools.get_path(self.output_dir, "vectorised",
                                             "voca_pt")
コード例 #2
0
    def generate_gibbs_states_plots(self,
                                    states_path: str,
                                    cat: str = "likelihood"):
        new_dir = Tools.get_path(states_path, f"{cat}_plots")
        if Tools.path_exists(new_dir):
            print("Plots found, skipping..")
            return

        Tools.initialise_directory(new_dir)
        with Tools.scan_directory(states_path) as outputs:
            for i, output in enumerate(outputs):
                try:
                    state_file = Tools.get_path(output.path, "state.log")
                    df = pd.read_csv(filepath_or_buffer=state_file,
                                     delim_whitespace=True,
                                     index_col="iter")
                    ax = sns.lineplot(x=df.index, y=cat, data=df)
                    ax.margins(x=0)
                    name = output.name
                    fig = ax.get_figure()
                    fig.savefig(Tools.get_path(states_path, f"{cat}_plots",
                                               f"{name}.png"),
                                dpi=300,
                                bbox_incehs="tight",
                                format="png")
                    fig.clf()
                    print(f"{i}")
                except FileNotFoundError:
                    print(f"→ Skipping {output.name}")
コード例 #3
0
    def _generate_lda_c_corpus(self):
        """ Convert a group of files LDA_C corpus and store it on disk"""
        bow_corpus, id2word_map, plain_docs = self._convert_corpus_to_bow()
        # Sterialise into LDA_C and store on disk
        output_dir = Tools.get_path(
            self.input_docs_path,
            f"lda_c_format_{self.hdp_eta:0.1f}_{self.hdp_gamma_s:0.1f}",
            f"_{self.hdp_alpha_s:0.1f}_common_{self.drop_uncommon}")

        Tools.initialise_directory(output_dir)
        save_location = Tools.get_path(output_dir, f"{self.lda_c_fname}.dat")

        bleicorpus.BleiCorpus.serialize(fname=save_location,
                                        corpus=bow_corpus,
                                        id2word=id2word_map)
        return plain_docs, bow_corpus
コード例 #4
0
    def _invoke_gibbs_hdp(self):
        """Invoke Gibbs hdp posterior inference on the corpus"""
        path_executable = Tools.get_path(self.hdp_path, "hdp.exe")

        param_data = Tools.get_path(
            self.input_docs_path,
            f"lda_c_format_{self.hdp_eta:0.1f}_{self.hdp_gamma_s:0.1f}",
            f"_{self.hdp_alpha_s:0.1f}_common_{self.drop_uncommon}",
            f"{self.lda_c_fname}.dat")

        param_directory = Tools.get_path(self.input_docs_path,
                                         self.hdp_output_directory)

        # Prepare the output directory
        Tools.initialise_directory(param_directory)

        if self.hdp_seed is not None and self.hdp_seed > 0:
            ret = s.run([
                path_executable, "--algorithm", "train", "--data", param_data,
                "--directory", param_directory, "--max_iter",
                str(self.hdp_iterations), "--sample_hyper",
                "yes" if self.hdp_hyper_sampling else "no", "--save_lag", "-1",
                "--eta",
                str(self.hdp_eta), "--random_seed",
                str(self.hdp_seed), "--gamma_a",
                str(self.hdp_gamma_s), "--alpha_a",
                str(self.hdp_alpha_s)
            ],
                        check=True,
                        capture_output=True,
                        text=True)
        else:
            ret = s.run([
                path_executable, "--algorithm", "train", "--data", param_data,
                "--directory", param_directory, "--max_iter",
                str(self.hdp_iterations), "--sample_hyper",
                "yes" if self.hdp_hyper_sampling else "no", "--save_lag", "-1",
                "--eta",
                str(self.hdp_eta), "--gamma_a",
                str(self.hdp_gamma_s), "--alpha_a",
                str(self.hdp_alpha_s)
            ],
                        check=True,
                        capture_output=True,
                        text=True)

        return ret.stdout
コード例 #5
0
    def _infer_btm_pz_d(self):
        """Invoke Gibbs BTM docs inference on the corpus"""

        ret = s.run([
            self.btm_exe, "inf", self.doc_inf_type,
            str(self.t), self.tokenised_btmcorpus_filepath,
            Tools.get_path(self.output_dir, "")
        ],
                    check=True,
                    capture_output=True,
                    text=True)
        return ret.stdout
コード例 #6
0
    def _estimate_btm(self):
        """Invoke Gibbs BTM posterior inference on the tokenised corpus"""

        ret = s.run(
            [
                self.btm_exe,
                "est",
                str(self.t),
                str(self.w),
                str(self.alpha),
                str(self.beta),
                str(self.n_iter),
                str(self.n_iter),  # Save Step
                self.tokenised_btmcorpus_filepath,
                Tools.get_path(self.output_dir, "")
            ],
            check=True,
            capture_output=True,
            text=True)
        return ret.stdout
コード例 #7
0
    def _load_lss_representation_into_df(self) -> pd.DataFrame:
        """
        Load a BoT LSS representation from disk to a returned dataframe.

        Returns
        -------
        lss_df : pd.DataFrame
            A matrix of shape (n_samples, n_features)

        Raises
        ------
        FileNotFoundError
            When the LSS representation isn't found on disk.

        """

        path = Tools.get_path(self.input_docs_path, self.hdp_output_directory,
                              "mode-word-assignments.dat")
        # We don't need document tables, so we'll skip the relative column,
        # But we do need word counts under each topic, to produce some sort
        # of a bag-of-topics model (BoT)
        try:
            lss_df = pd.read_csv(filepath_or_buffer=path,
                                 delim_whitespace=True)
            #                             usecols=["d", "w", "z"]).drop_duplicates()
            # Produce topic weights as counts of topic words
            lss_df = lss_df.pivot_table(values='w',
                                        columns='z',
                                        index='d',
                                        aggfunc='count',
                                        fill_value=0)
            # Index with file names for later reference
            lss_df.index = self.doc_index

            return lss_df
        except FileNotFoundError:
            print(("\nNo LSS precomputed file was found on disk via:\n{}\n"
                   "> Please generate LDA-C corpus and run HDP first...\n"
                   ).format(path))
            raise
コード例 #8
0
    def traverse_gamma_alpha(self,
                             ps: int,
                             tail_prcnt: float = 0.80,
                             verbose: bool = True):
        ldac_path = Tools.get_path("lda_c_format_HyperFalse",
                                   "dummy_ldac_corpus.dat")
        dat_path = Tools.get_path(self.training_folder, f"problem{ps:03d}",
                                  ldac_path)
        directory = Tools.get_path(self.out_dir, "gamma_alpha")
        path_executable = Tools.get_path(self.hdp_path, "hdp.exe")

        res = defaultdict(list)
        total_work = len(self.gammas)**2 * len(self.alphas)**2
        c = 0
        print("----------------------------------------------------")
        for g_s, g_r in product(self.gammas, repeat=2):
            for a_s, a_r in product(self.alphas, repeat=2):
                for a_r in self.alphas:
                    c = c + 1
                    progress = 100.0 * c / total_work
                    suff = f"_{g_s:0.2f}_{g_r:0.2f}_{a_s:0.2f}_{a_r:0.2f}"
                    if verbose:
                        print(f"► Working on "
                              f"Gamma({g_s:0.2f},{g_r:0.2f}) "
                              f"and Alpha({a_s:0.2f},{a_r:0.2f}) "
                              f"[{progress:06.2f}%]")
                    s.run([
                        path_executable, "--algorithm", "train", "--data",
                        dat_path, "--directory",
                        Tools.get_path(directory, f"{c:03d}",
                                       f"hdp_out{suff}"), "--max_iter",
                        str(500), "--sample_hyper", "no", "--save_lag", "-1",
                        "--eta", "0.5", "--random_seed",
                        str(self.seed), "--gamma_a",
                        str(g_s), "--gamma_b",
                        str(g_r), "--alpha_a",
                        str(a_s), "--alpha_b",
                        str(a_r)
                    ],
                          check=True,
                          capture_output=True,
                          text=True)
                    # Read the likelihood
                    ll = pd.read_csv(Tools.get_path(directory,
                                                    f"{c:03d}hdp_out{suff}",
                                                    "state.log"),
                                     delim_whitespace=True).likelihood.tail(
                                         round(tail_prcnt * 500)).mean()
                    res["gamma_shape"].append(g_s)
                    res["gamma_rate"].append(g_r)
                    res["alpha_shape"].append(a_s)
                    res["alpha_rate"].append(a_r)
                    res["gamma"].append(g_s * g_r)
                    res["alpha"].append(a_s * a_r)
                    res["likelihood"].append(ll)
        # Save the results to disk
        df_res = pd.DataFrame(res)
        df_res.to_csv(Tools.get_path(directory, "results.csv"), index=False)
        if verbose:
            print("---------------------- Done ------------------------")
        return df_res
コード例 #9
0
    def smart_optimisation(self,
                           plot_cat: str = "likelihood",
                           tail_prcnt: float = 0.80,
                           skip_factor: int = 1,
                           verbose: bool = False):
        # First generate the outputs to compare:
        words_counts = self._generate_hdps_outputs(skip_factor=skip_factor,
                                                   verbose=verbose)

        ret = {}
        # Loop over the outputs of different etas
        master_folder = Tools.get_path(self.out_dir, "optimisation")
        log_likelihoods = []
        avg_num_topics = []
        std_num_topics = []
        pw_ll = []
        errors = []
        with Tools.scan_directory(master_folder) as perms:
            for perm in perms:
                # generate plots
                if not Tools.is_path_dir(perm.path):
                    continue

                self.generate_gibbs_states_plots(states_path=perm.path,
                                                 cat=plot_cat)
                with Tools.scan_directory(perm.path) as problems:
                    for problem in problems:
                        try:
                            n_words = words_counts[problem.name]
                            path_state = Tools.get_path(
                                problem.path, "state.log")
                            df_state = pd.read_csv(
                                filepath_or_buffer=path_state,
                                delim_whitespace=True,
                                index_col="iter",
                                usecols=["iter", "likelihood", "num.topics"])
                            ll = df_state.likelihood.tail(
                                round(len(df_state) * tail_prcnt)).mean()
                            avg_topics = df_state["num.topics"].tail(
                                round(len(df_state) * tail_prcnt)).mean()
                            std_topics = df_state["num.topics"].tail(
                                round(len(df_state) * tail_prcnt)).std()

                            log_likelihoods.append(ll)
                            pw_ll.append(ll / n_words)
                            avg_num_topics.append(avg_topics)
                            std_num_topics.append(std_topics)
                        except FileNotFoundError as e:
                            print(f"{e}")
                            errors.append(f"{e}")
                            continue
                        except KeyError:
                            # Plots folders are being queried for n_words
                            continue
                ret.update({
                    f"{perm.name}": [
                        round(sum(log_likelihoods) / len(log_likelihoods), 4),
                        round(sum(pw_ll) / len(pw_ll), 4),
                        round(sum(avg_num_topics) / len(avg_num_topics), 4),
                        round(sum(std_num_topics) / len(std_num_topics), 4)
                    ]
                })
        # Save any encountered errors to disk too
        Tools.save_list_to_text(mylist=errors,
                                filepath=Tools.get_path(
                                    self.out_dir, "optimisation",
                                    "opt_errors.txt"))

        pd.DataFrame(data=ret,
                     index=["Log-l", "PwLL", "T-Avg", "T-Std"
                            ]).T.to_csv(Tools.get_path(self.out_dir,
                                                       "optimisation",
                                                       "optimisation.csv"),
                                        index=True)

        return ret
コード例 #10
0
    def _generate_hdps_outputs(self,
                               skip_factor: int = 1,
                               verbose: bool = False):
        st = time.perf_counter()
        ldac_path = Tools.get_path("lda_c_format_HyperFalse",
                                   "dummy_ldac_corpus.dat")
        words_nums = {}
        vocab_file = Tools.get_path("lda_c_format_HyperFalse",
                                    "dummy_ldac_corpus.dat.vocab")
        #        size = ((60 // skip_factor)
        #                * len(self.etas)
        #                * len(self.gammas)**2
        #                * len(self.alphas)**2)
        # Since we fixed the scales of Gammas
        size = ((60 // skip_factor) * len(self.etas) * len(self.gammas) *
                len(self.alphas))
        i = 0
        with Tools.scan_directory(self.training_folder) as ps_folders:
            for c, folder in enumerate(ps_folders):
                if not folder.name[0:7] == "problem":
                    if verbose:
                        print(f"→ Skipping {folder.name}")
                    continue
                # Implement the skipping factor
                if c % skip_factor != 0:
                    continue

                t = time.perf_counter()
                # Fix the scale parameters for the Gamma priors
                g_r = 1
                a_r = 1
                for eta in self.etas:
                    # for g_s, g_r in product(self.gammas, repeat=2):
                    # for a_s, a_r in product(self.alphas, repeat=2):
                    # Only switch the shape parameter of Gammas
                    for g_s in self.gammas:
                        for a_s in self.alphas:
                            # Cache the number of words for later
                            if folder.name not in words_nums:
                                vocab_path = Tools.get_path(
                                    folder.path, vocab_file)
                                n_words = self._get_number_words(vocab_path)
                                words_nums.update({folder.name: n_words})

                            i = i + 1
                            percentage = f"{100 * i / size:06.02f}"
                            suff = (f"{g_s:0.2f}_{g_r:0.2f}_"
                                    f"{a_s:0.2f}_{a_r:0.2f}")
                            if verbose:
                                print(f"► Applying HDP with "
                                      f"eta={eta:0.1f} "
                                      f"gamma({g_s:0.2f}, {g_r:0.2f}) "
                                      f"alpha({a_s:0.2f}, {a_r:0.2f}) "
                                      f"on {folder.name} [{percentage}%]")

                            directory = Tools.get_path(self.out_dir,
                                                       "optimisation",
                                                       f"{eta:0.1f}__{suff}",
                                                       folder.name)

                            if (Tools.path_exists(directory)):
                                if verbose:
                                    print("\tcached result found at "
                                          f"{directory}")
                                continue

                            path_executable = r"{}\hdp.exe".format(
                                self.hdp_path)
                            data = Tools.get_path(folder.path, ldac_path)

                            # Prepare the output directory
                            Tools.initialise_directories(directory)

                            if self.seed is not None:
                                s.run([
                                    path_executable, "--algorithm", "train",
                                    "--data", data, "--directory", directory,
                                    "--max_iter",
                                    str(self.iters), "--sample_hyper", "no",
                                    "--save_lag", "-1", "--eta",
                                    str(eta), "--gamma_a",
                                    str(g_s), "--gamma_b",
                                    str(g_r), "--alpha_a",
                                    str(a_s), "--alpha_b",
                                    str(a_r), "--random_seed",
                                    str(self.seed)
                                ],
                                      stdout=s.DEVNULL,
                                      check=True,
                                      capture_output=False,
                                      text=True)
                            else:
                                s.run([
                                    path_executable, "--algorithm", "train",
                                    "--data", data, "--directory", directory,
                                    "--max_iter",
                                    str(self.iters), "--sample_hyper", "no",
                                    "--save_lag", "-1", "--eta",
                                    str(eta), "--gamma_a",
                                    str(g_s), "--gamma_b",
                                    str(g_r), "--alpha_a",
                                    str(a_s), "--alpha_b",
                                    str(a_r)
                                ],
                                      stdout=s.DEVNULL,
                                      check=True,
                                      capture_output=False,
                                      text=True)

                if verbose:
                    print(f"--- {folder.name} done in "
                          f"{time.perf_counter() - t:0.1f} seconds ---")

        period = round(time.perf_counter() - st, 2)
        print(f"----- Vectorisation done in {period} seconds -----")
        return words_nums
コード例 #11
0
    def assess_hyper_sampling(self, tail_prcnt: float, verbose: bool = False):
        """
        A function to measure the average per word log-likelihood after
        hyper-sampling the concentration parameters of the Dirichlet
        distributions.
        Caution: the hdp must have been run on the data with hyper sampling and
        without it, in order to load the two representations and compare.

        Returns
        -------
        dct: dict
            A dictionary containing the per word log-likelihood of the train
            data with the two methods pertaining to sampling the concentration
            parameters: normal and hyper.

        """
        path_normal = Tools.get_path(".", "hdp_lss_HyperFalse", "state.log")
        path_hyper = Tools.get_path(".", "hdp_lss_HyperTrue", "state.log")
        path_ldac = Tools.get_path(".", "lda_c_format_HyperTrue",
                                   "dummy_ldac_corpus.dat.vocab")
        per_word_ll_normal = []
        per_word_ll_hyper = []

        if verbose:
            print("------Concentration Parameters Optimisation------")

        with Tools.scan_directory(self.training_folder) as dirs:
            for d in dirs:
                if d.name[0:7] != "problem":
                    continue

                if verbose:
                    print(f"\t► Processing {d.name}")

                normal = Tools.get_path(d.path, path_normal)
                hyper = Tools.get_path(d.path, path_hyper)
                vocab = Tools.get_path(d.path, path_ldac)

                n_words = self._get_number_words(vocab)
                df_normal = pd.read_csv(filepath_or_buffer=normal,
                                        delim_whitespace=True,
                                        index_col="iter",
                                        usecols=["iter", "likelihood"],
                                        squeeze=True)
                ll_normal = df_normal.tail(round(len(df_normal) *
                                                 tail_prcnt)).mean()
                per_word_ll_normal.append(ll_normal / n_words)

                df_hyper = pd.read_csv(filepath_or_buffer=hyper,
                                       delim_whitespace=True,
                                       index_col="iter",
                                       usecols=["iter", "likelihood"],
                                       squeeze=True)
                ll_hyper = df_hyper.tail(round(len(df_hyper) *
                                               tail_prcnt)).mean()
                per_word_ll_hyper.append(ll_hyper / n_words)

        dct = {
            "Normal_Sampling":
            round(sum(per_word_ll_normal) / len(per_word_ll_normal), 4),
            "Hyper_Sampling":
            round(sum(per_word_ll_hyper) / len(per_word_ll_hyper), 4)
        }

        if verbose:
            print("-------------------------------------------------")

        pd.DataFrame(data=dct,
                     index=[0
                            ]).to_csv(f"{self.out_dir}/hyper_optimisation.csv",
                                      index=False)
        return dct
コード例 #12
0
def main():
    # Specify which topic model to use?
    use_btm = True

    if use_btm:
        #   Control Parameters ###
        train_phase = True
        t = 10  # number of btm topics
        ##########################

        print("\n-------------------------------------")
        print("BTM modelling and authorial clustering")
        print("-------------------------------------\n")

        if train_phase:
            r = range(1, 2)
            dpath = Tools.get_path(
                r"D:\Projects\Authorial_Clustering_Short_Texts_nPTM"
                r"\Datasets\pan17_train")
        else:
            r = range(1, 121)
            dpath = (r"D:\Projects\Authorial_Clustering_Short_Texts_nPTM"
                     r"\Datasets\pan17_test")

        for ps in r:
            # Loop over the problemsets
            ps_path = Tools.get_path(dpath, f"problem{ps:03d}")
            print(f"\nProcessing #{ps:03d}:")
            #   Inferring BTM ###
            #####################
            # TODO: avoid creating r BTM objects by delegating ps_path
            btm = LssBTModeller(directory_path=ps_path,
                                t=t,
                                alpha=1.0,
                                beta=0.01,
                                model_dir_suffix="remove_stopwords_puncts")
            btm.infer_btm(remove_bg_terms=True,
                          drop_puncs=True,
                          use_biterm_freqs=False)
            print("\t→ btm inference done")
    else:

        print("Main thread started..\n")
        folders_path = (r"D:\College\DKEM\Thesis"
                        r"\AuthorshipClustering\Datasets\pan17_train")
        hdp = r"D:\College\DKEM\Thesis\AuthorshipClustering\Code\hdps\hdp"

        optimiser = LssOptimiser(train_folders_path=folders_path,
                                 hdp_path=hdp,
                                 ldac_filename="dummy_ldac_corpus.dat",
                                 hdp_seed=None,
                                 eta_range=[0.3, 0.5, 0.8, 1],
                                 gamma_range=[0.1, 0.3, 0.5],
                                 alpha_range=[0.1, 0.3, 0.5],
                                 out_dir=Tools.get_path(".", "__outputs__"),
                                 hdp_iters=1000)

        ret_eta = optimiser.smart_optimisation(tail_prcnt=0.8,
                                               skip_factor=5,
                                               plot_cat="num.tables",
                                               verbose=True)
        print(ret_eta)
        print("Done.")