Ejemplo n.º 1
0
        if mode:
            # results for each machine type
            csv_lines.append([machine_type])
            csv_lines.append([
                "section", "domain", "AUC", "pAUC", "precision", "recall",
                "F1 score"
            ])
            performance = []

        dir_names = ["source_test", "target_test"]

        for dir_name in dir_names:

            #list machine id
            section_names = com.get_section_names(target_dir,
                                                  dir_name=dir_name)

            for section_name in section_names:
                # load test file
                files, y_true = com.file_list_generator(
                    target_dir=target_dir,
                    section_name=section_name,
                    dir_name=dir_name,
                    mode=mode)

                # setup anomaly score file path
                anomaly_score_csv = "{result}/anomaly_score_{machine_type}_{section_name}_{dir_name}.csv".format(
                    result=param["result_directory"],
                    machine_type=machine_type,
                    section_name=section_name,
                    dir_name=dir_name)
Ejemplo n.º 2
0
        if os.path.exists(model_file_path):
            com.logger.info("model exists")
            continue

        history_img = "{model}/history_{machine_type}.png".format(
            model=param["model_directory"], machine_type=machine_type)
        # pickle file for storing section names
        section_names_file_path = "{model}/section_names_{machine_type}.pkl".format(
            model=param["model_directory"], machine_type=machine_type)
        # pickle file for storing anomaly score distribution
        score_distr_file_path = "{model}/score_distr_{machine_type}.pkl".format(
            model=param["model_directory"], machine_type=machine_type)

        # get section names from wave file names
        section_names = com.get_section_names(target_dir, dir_name="train")
        unique_section_names = np.unique(section_names)
        n_sections = unique_section_names.shape[0]

        # make condition dictionary
        joblib.dump(unique_section_names, section_names_file_path)

        # generate dataset
        print("============== DATASET_GENERATOR ==============")
        # number of wave files in each section
        # required for calculating y_pred for each wave file
        n_files_ea_section = []

        data = np.empty(
            (0, param["feature"]["n_frames"] * param["feature"]["n_mels"]),
            float)