def __init__(self, _type, bandits, agent, runs, exp_nums, logdir, k_vec,
                 k_probs):
        self._type = _type
        self.esr_vector = []
        self.esr_probs = []
        self.f1_score = []
        self.f1 = []
        #self.plotter = Plotter()
        self.metrics = Metrics()
        self.k_vec = k_vec
        self.k_probs = k_probs
        self.f1_df = pd.DataFrame()
        self.esrBandit = Agent(0, 10)
        self.logdir = logdir

        for i in range(len(k_vec)):
            self.esrBandit.manual_distribution(k_vec[i], k_probs[i])

        if self._type == "bandit":
            self.bandits = bandits
            self.agent = agent
            self.runs = runs
            self.exp_nums = exp_nums
Пример #2
0
    def classify(self):
        metrics: Metrics = Metrics(len(self.data_frame.df.index), self.data_frame.df)
        if self.ui_classify.euklidianRadio.isChecked():
            if self.ui_classify.checkBoxNormalize.isChecked():
                metrics.classify_euclidean_normalize()
            else:
                metrics.classify_euclidean()
        elif self.ui_classify.manhattanRadio.isChecked():
            if self.ui_classify.checkBoxNormalize.isChecked():
                metrics.classify_manhattan_normalize()
            else:
                metrics.classify_manhattan()
        elif self.ui_classify.chebyshevRadio.isChecked():
            if self.ui_classify.checkBoxNormalize.isChecked():
                metrics.classify_chebyshev_normalize()
            else:
                metrics.classify_chebyshev()
        elif self.ui_classify.mahalanobisRadio.isChecked():
            if self.ui_classify.checkBoxNormalize.isChecked():
                metrics.classify_mahalanobis_normalize()
            else:
                metrics.classify_mahalanobis()

        self.close_classify_dialog()
Пример #3
0
def main():

    parser = argparse.ArgumentParser()
    parser.add_argument("--project", "-p", help="Project path", required=True)
    parser.add_argument("--stop_words", "-s", help="Path to stopwords file")
    parser.add_argument("--k_topics",
                        "-k",
                        help="Number of topics for given project")
    parser.add_argument(
        "--resolution",
        "-r",
        help=
        "Resolution number parameter in Louvain community detection. A value in range of 0.3 to 1 is advised. A smaller resolution will identify smaller communities and vice versa. By default the whole range is tested and communities for each community saved."
    )
    parser.add_argument(
        "--metrics",
        "-m",
        help=
        "Execute metrics for a given project name after normal parsing and execution (relative path to set root path) (At the current time it does NOT work independently from the identification process)",
        action="store_true")
    parser.add_argument("--draw",
                        "-d",
                        help="Enable plotting of graphs",
                        action="store_true")
    parser.add_argument("--lda-plotting",
                        "-l",
                        help="Enable plotting of LDA topics",
                        action="store_true")
    args = parser.parse_args()

    Settings.DRAW = True if args.draw else False
    Settings.LDA_PLOTTING = True if args.lda_plotting else False
    Settings.K_TOPICS = int(args.k_topics) if args.k_topics else None
    Settings.RESOLUTION = float(args.resolution) if args.resolution else None

    Settings.set_stop_words(args.stop_words)

    print(f"Setting Directory as: {Settings.DIRECTORY}")

    if args.project:
        project_name = str(args.project.split('/')[-1])
        project_path = str(args.project)
        Settings.PROJECT_PATH = project_path
        Settings.PROJECT_NAME = project_name

        # cluster_results = (clusters, modularity, resolution)
        clusters_results = identify_clusters_in_project(
            project_name, project_path)

        metrics = Metrics()
        for cluster in clusters_results:
            Settings.create_id()

            # TODO: Refactor MetricExecutor into ProcessResultOutput and MetricExecutor, currently sharing many responsabilities
            metric_executor = MetricExecutor()
            metric_executor.add_project(project_name, str(cluster[0]))
            metric_executor.dump_to_json_file()

            if args.metrics:
                # TODO: refactor
                metrics.set_metric_executor(metric_executor)
                metrics.set_cluster_results(clusters_results)
                metrics.calculate()

        if args.metrics:
            metrics.save()