com.logger.info("model exists")
            continue

        history_img = "{model}/history_{machine_type}.png".format(
            model=param["model_directory"], machine_type=machine_type)
        # pickle file for storing anomaly score distribution
        score_distr_file_path = "{model}/score_distr_{machine_type}.pkl".format(
            model=param["model_directory"], machine_type=machine_type)

        # generate dataset
        print("============== DATASET_GENERATOR ==============")

        # get file list for all sections
        # all values of y_true are zero in training
        files, y_true = com.file_list_generator(target_dir=target_dir,
                                                section_name="*",
                                                dir_name="train",
                                                mode=mode)

        dataset = DCASE_Dataset(files, param)

        # data loader and split train and validation
        train_size = int(len(dataset) * (1 - param["fit"]["validation_split"]))

        train_loader = DataLoader(Subset(dataset, list(range(0, train_size))),
                                  batch_size=param["fit"]["batch_size"],
                                  shuffle=param["fit"]["shuffle"],
                                  drop_last=True,
                                  num_workers=os.cpu_count(),
                                  pin_memory=True)

        val_loader = DataLoader(Subset(dataset,
Exemplo n.º 2
0
            ])
            performance = []

        dir_names = ["source_test", "target_test"]

        for dir_name in dir_names:

            #list machine id
            section_names = com.get_section_names(target_dir,
                                                  dir_name=dir_name)

            for section_name in section_names:
                # load test file
                files, y_true = com.file_list_generator(
                    target_dir=target_dir,
                    section_name=section_name,
                    dir_name=dir_name,
                    mode=mode)

                # setup anomaly score file path
                anomaly_score_csv = "{result}/anomaly_score_{machine_type}_{section_name}_{dir_name}.csv".format(
                    result=param["result_directory"],
                    machine_type=machine_type,
                    section_name=section_name,
                    dir_name=dir_name)
                anomaly_score_list = []

                # setup decision result file path
                decision_result_csv = "{result}/decision_result_{machine_type}_{section_name}_{dir_name}.csv".format(
                    result=param["result_directory"],
                    machine_type=machine_type,
Exemplo n.º 3
0
        tflite_model = converter.convert()
        tflite_file = "{model}/model_{machine_type}.tflite".format(
            model=param["model_directory"], machine_type=machine_type)
        with tf.io.gfile.GFile(tflite_file, 'wb') as f:
            f.write(tflite_model)

        # Quantization of weights (but not the activations)
        converter.optimizations = [tf.lite.Optimize.DEFAULT]
        tflite_model = converter.convert()
        tflite_file = "{model}/model_{machine_type}_quant.tflite".format(
            model=param["model_directory"], machine_type=machine_type)
        with tf.io.gfile.GFile(tflite_file, 'wb') as f:
            f.write(tflite_model)

        print("============== DATASET_GENERATOR ==============")
        files = com.file_list_generator(target_dir)
        train_data = com.list_to_vector_array(
            files,
            msg="generate train_dataset",
            n_mels=param["feature"]["n_mels"],
            frames=param["feature"]["frames"],
            n_fft=param["feature"]["n_fft"],
            hop_length=param["feature"]["hop_length"],
            power=param["feature"]["power"])

        def representative_dataset_gen():
            for sample in train_data[::5]:
                sample = numpy.expand_dims(sample.astype(numpy.float32),
                                           axis=0)
                yield [sample]