Esempio n. 1
0
def CompletelyTest(data_manager, graph, sess, tester):
    tester.restart()
    # ! Get tensor X and Softmax probability
    input_X = graph.get_tensor_by_name('Placeholder:0')
    softmax_output = graph.get_tensor_by_name('Softmax:0')
    # ! Start test data by batches
    test_batches = data_manager.get_test_batches(kBatchSize)
    # ! Start Test
    batch_num = int(np.ceil(data_manager.test_samples_num / kBatchSize))
    process_bar = ProcessBar(batch_num)
    for i, test_batch in enumerate(test_batches):
        batch_X, batch_Y = test_batch
        # if kHotClean:
        #     batch_X, batch_Y = BatchCleaner(batch_X, batch_Y)
        if not kIOnly:
            batch_X = ZipIQ(batch_X)
        batch_X = batch_X.reshape(
            (batch_X.shape[0], input_X.shape[1], input_X.shape[2]))
        batch_probability = sess.run(softmax_output,
                                     feed_dict={input_X: batch_X})

        tester.update_confusion_matrix(batch_probability, batch_Y)
        tester.show_confusion_matrix()

        process_bar.UpdateBar(i + 1)
    # ! Show test result
    if not os.path.isdir(kTestResultPath):
        os.makedirs(kTestResultPath)
    tester.show_confusion_matrix(
        img_save_path=os.path.join(kTestResultPath, "confusion_matrix.png"))
    tester.measure()
    tester.show_measure_result(
        rslt_save_path=os.path.join(kTestResultPath, "test_result.txt"))
def CompletelyTest(data_manager, net, tester):
    tester.restart()
    # ! Start test data by batches
    test_batches = data_manager.get_test_batches(K.BatchSize)
    # ! Start Test
    batch_num = int(np.ceil(data_manager.test_samples_num / K.BatchSize))
    process_bar = ProcessBar(batch_num)
    for i, test_batch in enumerate(test_batches):
        samples, gts = test_batch
        # if K.HotClean:
        #     batch_X, batch_Y = BatchCleaner(batch_X, batch_Y)
        TestSamples(
            samples,
            gts,
            net,
            tester,
            I_only=K.IOnly,
            device=K.Device,
            SNR_generate=K.constant_SNR_generator if K.IsNoise else None)
        tester.show_confusion_matrix()

        process_bar.UpdateBar(i + 1)
    # ! Show test result
    if not os.path.isdir(K.TestResultPath):
        os.makedirs(K.TestResultPath)
    tester.show_confusion_matrix(
        img_save_path=os.path.join(K.TestResultPath, "confusion_matrix.png"))
    tester.measure()
    tester.show_measure_result(
        rslt_save_path=os.path.join(K.TestResultPath, "test_result.txt"))
Esempio n. 3
0
def GetH5ModuleData(data_path: str, h5_module_data_path: str):
    logger = Logger(os.path.join(kLogPath, 'tran_to_h5_log.txt')).logger
    h5_module_data_txt_path = os.path.join(h5_module_data_path,
                                           'h5_module_data.txt')

    # ! Process each wifi module
    module_data_num = dict()
    for wifi_module_path in glob.glob(os.path.join(data_path, '*')):
        if os.path.isdir(wifi_module_path):
            logger.info('-------------------------------------')
            logger.info('Processing Data In' + wifi_module_path)
            module_name = os.path.split(wifi_module_path)[1]
            # ! Convert only if output dir exits
            assert os.path.isdir(os.path.join(wifi_module_path, 'output.finished')), \
                "No output finished in {}".format(wifi_module_path)
            # ! Get I and Q paths and sort to make sure they are correspondent
            I_paths = []
            Q_paths = []
            for txt_file_path in glob.glob(
                    os.path.join(wifi_module_path, 'output.finished', '*')):
                if re.search('_I', os.path.split(txt_file_path)[1]):
                    I_paths.append(txt_file_path)
                elif re.search('_Q', os.path.split(txt_file_path)[1]):
                    Q_paths.append(txt_file_path)
            I_paths.sort()
            Q_paths.sort()
            # ! Update module_data_num
            assert len(I_paths) == len(
                Q_paths), "Data Num of num_I != num_Q ! in {}".format(
                    wifi_module_path)
            module_data_num[module_name] = len(I_paths)
            # ! Get Numpy and Write to H5
            I_data = np.empty((len(I_paths), kSampleLen), dtype=np.float32)
            Q_data = np.empty((len(Q_paths), kSampleLen), dtype=np.float32)
            labels = np.array([module_name] * len(I_paths))
            process_bar = ProcessBar(len(I_paths))
            for i in range(len(I_paths)):
                I_data[i, :] = np.loadtxt(I_paths[i], dtype=np.float32)
                Q_data[i, :] = np.loadtxt(Q_paths[i], dtype=np.float32)
                process_bar.UpdateBar(i + 1)
            process_bar.Close()
            with h5py.File(
                    os.path.join(h5_module_data_path, module_name + '.h5'),
                    'w') as hf:
                hf.create_dataset("I", data=I_data)
                hf.create_dataset("Q", data=Q_data)
                # http://docs.h5py.org/en/stable/strings.html Store Strings in h5py
                dt = h5py.special_dtype(vlen=str)
                dset_label = hf.create_dataset("Label", labels.shape, dtype=dt)
                dset_label[:] = labels
            # ! Record in txt
            with open(h5_module_data_txt_path, 'a') as txt_f:
                txt_f.write("{} {}\n".format(module_name,
                                             module_data_num[module_name]))
    return module_data_num
Esempio n. 4
0
def _List2H5(sample_list: list, file_prefix: str, h5_module_data_path: str,
             h5_train_test_data_path: str, logger) -> int:
    # ! Set basic info for prefix_info.json
    sample_num = len(sample_list)
    h5_names = []
    # ! Save Data Step by Step
    i1 = 0
    while i1 < sample_num:
        # i1 - i2 -----> prefix_i1-i2.h5
        if i1 + kSaveStep < sample_num:
            i2 = i1 + kSaveStep
        else:
            i2 = sample_num
        logger.info("In List {}, Processing Samples from {} to {}".format(
            file_prefix, i1, i2))
        # ! Get I, Q, Labels Data
        I_data = np.empty((i2 - i1, kSampleLen), dtype=np.float32)
        Q_data = np.empty((i2 - i1, kSampleLen), dtype=np.float32)
        labels = np.empty((i2 - i1, ), dtype=np.object)
        process_bar = ProcessBar(i2 - i1)
        for i, sample in enumerate(sample_list[i1:i2]):
            with h5py.File(
                    os.path.join(h5_module_data_path, sample[0] + '.h5'),
                    'r') as hf:
                I_data[i, :] = hf['I'][sample[1], :]
                Q_data[i, :] = hf['Q'][sample[1], :]
            labels[i] = sample[0]
            process_bar.UpdateBar(i + 1)
        # ! Save To h5 file
        h5_name = "{}_{}-{}.h5".format(file_prefix, i1, i2)
        h5_names.append(h5_name)
        h5_path = os.path.join(h5_train_test_data_path, h5_name)
        logger.info("Saving {}".format(file_prefix, h5_name))
        with h5py.File(h5_path, 'w') as hf:
            hf.create_dataset('I', data=I_data)
            hf.create_dataset('Q', data=Q_data)
            dset_labels = hf.create_dataset('Label',
                                            labels.shape,
                                            dtype=h5py.special_dtype(vlen=str))
            dset_labels[:] = labels
        i1 = i2
    with open(
            os.path.join(h5_train_test_data_path, file_prefix + "_info.json"),
            'w') as jf:
        json.dump(
            {
                "sample_num": sample_num,
                "save_step": kSaveStep,
                "h5_names": h5_names
            }, jf)
    return 0
Esempio n. 5
0
def GetSNRs(h5_module_data_dir):
    # ! Process each wifi module
    module_data_num = dict()
    named_SNRs = {}
    for wifi_module_file in glob.glob(os.path.join(h5_module_data_dir, '*.h5')):
        module_name = os.path.split(wifi_module_file)[1].split('.')[0]
        sh_logger.info("Currently Processing Wifi Module {}".format(module_name))

        # ! Get Raw Data
        with h5py.File(wifi_module_file) as hf:
            I_data = hf['I'][...]
            Q_data = hf['Q'][...]
        # ! Get SNR
        I_energies = I_data ** 2
        Q_energies = Q_data ** 2
        head_tail_len = int(I_data.shape[1] * kHeadTailPercentage)
        # Get unbalance samples
        indexes_unbalance = []
        sh_logger.debug("Detect Unbalanced")
        process_bar = ProcessBar(I_energies.shape[0] - 1)
        for i in range(I_energies.shape[0]):
            I_energy = I_energies[i, :]
            Q_energy = Q_energies[i, :]
            if np.mean(I_energy[-head_tail_len:]) < kTailInHeadThreshold * np.mean(I_energy[0:head_tail_len])\
                    and \
                np.mean(Q_energy[-head_tail_len:]) < kTailInHeadThreshold * np.mean(Q_energy[0:head_tail_len]):
                indexes_unbalance.append(i)
            process_bar.UpdateBar(i)
        # Get low power samples
        I_mean_energy = np.mean(I_energies)
        Q_mean_energy = np.mean(Q_energies)
        indexes_low_power = []
        sh_logger.debug("Detect Low Power")
        process_bar = ProcessBar(I_energies.shape[0] - 1)
        for i in range(I_energies.shape[0]):
            I_energy = I_energies[i, :]
            Q_energy = Q_energies[i, :]
            if np.mean(I_energy) < kSampleInBatchMeanThreshold * I_mean_energy\
                    or \
               np.mean(Q_energy) < kSampleInBatchMeanThreshold * Q_mean_energy   :
                indexes_low_power.append(i)
            process_bar.UpdateBar(i)

        half_tail_indexes = list(filter(lambda index: index not in indexes_low_power, indexes_unbalance))
        if kIsDebug:
            print("Plot half tail indexes")
            PlotSamples(I_data[half_tail_indexes, :])
            PlotSamples(Q_data[half_tail_indexes, :])

        head_half_tail_I = I_data[half_tail_indexes, :1000]
        tail_half_tail_I = I_data[half_tail_indexes, -1000:]
        head_half_tail_Q = Q_data[half_tail_indexes, :1000]
        tail_half_tail_Q = Q_data[half_tail_indexes, -1000:]

        head_mean_power = np.mean(head_half_tail_I ** 2 + head_half_tail_Q ** 2)
        tail_mean_power = np.mean(tail_half_tail_I ** 2 + tail_half_tail_Q ** 2)
        named_SNRs[module_name] = 10 * np.log10(head_mean_power / tail_mean_power)

        print(f'Current SNR is {10 * np.log10(head_mean_power / tail_mean_power)}')
    return named_SNRs
Esempio n. 6
0
        if kIsRecover:
            start_epoch = kRecoverEpochNum + 1
        else:
            start_epoch = 0
        for epoch in range(start_epoch, kNumEpochs):
            epoch_start_time = time.time()
            logger.info('****** Epoch: {}/{} ******'.format(epoch, kNumEpochs))

            batches_num = int(np.ceil(data_manager.train_samples_num / kBatchSize))
            # Init data_manager
            data_manager.init_epoch()
            # Get batches generator
            train_batches = data_manager.get_train_batches(kBatchSize)

            # iteration
            process_bar = ProcessBar(batches_num)
            for i, train_batch in enumerate(train_batches):
                # get corrupted batch using the un-corrupted data_train
                batch_X, batch_Y = train_batch
                if not kIOnly:
                    batch_X = ZipIQ(batch_X)
                batch_X = batch_X.reshape(batch_X.shape[0], lstm_model.TIMESTEPS, -1)

                if iteration % 5 == 0:
                    _, train_summary, current_loss = \
                        sess.run([optimizer, merged, loss], feed_dict={lstm_model.X: batch_X, lstm_model.Y: batch_Y})
                    train_writer.add_summary(train_summary, iteration)

                    test_X, test_Y = data_manager.get_random_test_samples(kBatchSize)
                    if not kIOnly:
                        test_X = ZipIQ(test_X)
Esempio n. 7
0
    # * Start training
    # ** Restore epochID
    batches_num = int(np.ceil(data_manager.train_samples_num / K.BatchSize))
    start_epoch = int(np.round(iteration / batches_num))
    for epochID in range(start_epoch, K.NumEpochs):
        logger.info('****** Epoch: {}/{} ******'.format(
            epochID, K.NumEpochs - 1))

        # * Init data_manager & Get batches generator
        data_manager.init_epoch()
        train_batches = data_manager.get_train_batches(K.BatchSize)
        # * Init iteration, sum_loss and tester
        sum_loss = 0
        tester.restart()
        # * Process bar
        process_bar = ProcessBar(batches_num)
        for batch_ID, train_batch in enumerate(train_batches):
            # ! Test every log_interval iteration
            if iteration % K.TrainLogInterval == 0 and iteration != 0 and \
                    (not (K.IsRecover and iteration == K.LoadModelNum)):
                train_loss = sum_loss / K.TrainLogInterval
                tester.measure()
                train_accuracy = tester.micro_avg_precision
                writer.add_scalar('train/loss',
                                  train_loss,
                                  global_step=iteration)
                writer.add_scalar('train/accuracy',
                                  train_accuracy,
                                  global_step=iteration)
                writer.add_scalars('loss', {'train': train_loss},
                                   global_step=iteration)
Esempio n. 8
0
def CleanModuleData(h5_module_data_dir, clean_h5_module_data_dir):
    logger = Logger(os.path.join(kLogFile, 'clean_module_data.txt')).logger
    clean_h5_module_data_txt_file = os.path.join(clean_h5_module_data_dir,
                                                 'h5_module_data.txt')
    # ! Process each wifi module
    module_data_num = dict()
    for wifi_module_file in glob.glob(os.path.join(h5_module_data_dir,
                                                   '*.h5')):
        module_name = os.path.split(wifi_module_file)[1].split('.')[0]
        logger.info("Currently Processing Wifi Module {}".format(module_name))
        if kIsChar2Num:
            if module_name in kChar2NumDict:
                module_name = kChar2NumDict[module_name]
                logger.info(
                    "Replace Wifi Module Name to {}".format(module_name))
        # ! Skip if Already Porcessed
        if os.path.isfile(
                os.path.join(clean_h5_module_data_dir, module_name + '.h5')):
            logger.warning("{}.h5 Already Exits, Skip".format(module_name))
            continue
        # ! Get Raw Data
        with h5py.File(wifi_module_file) as hf:
            I_data = hf['I'][...]
            Q_data = hf['Q'][...]
        # ! Clean Data
        I_energies = I_data**2
        Q_energies = Q_data**2
        head_tail_len = int(I_data.shape[1] * kHeadTailPercentage)
        # ! Delete Samples whose tail energy is less than head's 80%
        del_indexes = []
        sh_logger.debug("Start Quite Tail Discarding")
        process_bar = ProcessBar(I_energies.shape[0] - 1)
        for i in range(I_energies.shape[0]):
            I_energy = I_energies[i, :]
            Q_energy = Q_energies[i, :]
            if np.mean(I_energy[-head_tail_len:]) < kTailInHeadThreshold * np.mean(I_energy[0:head_tail_len])\
                    or \
                np.mean(Q_energy[-head_tail_len:]) < kTailInHeadThreshold * np.mean(Q_energy[0:head_tail_len]):
                del_indexes.append(i)
            process_bar.UpdateBar(i)
        if kIsDebug:
            print("Plot Samples Deleted for tail < 0.8 * head")
            PlotSamples(I_data[del_indexes, :])
            PlotSamples(Q_data[del_indexes, :])
        I_data = np.delete(I_data, del_indexes, axis=0)
        Q_data = np.delete(Q_data, del_indexes, axis=0)
        I_energies = np.delete(I_energies, del_indexes, axis=0)
        Q_energies = np.delete(Q_energies, del_indexes, axis=0)
        # ! Delete Samples whose mean energy is too small
        I_mean_energy = np.mean(I_energies)
        Q_mean_energy = np.mean(Q_energies)
        del_indexes = []
        sh_logger.debug("Start Whole Quite Discarding")
        process_bar = ProcessBar(I_energies.shape[0] - 1)
        for i in range(I_energies.shape[0]):
            I_energy = I_energies[i, :]
            Q_energy = Q_energies[i, :]
            if np.mean(I_energy) < kSampleInBatchMeanThreshold * I_mean_energy\
                    or \
               np.mean(Q_energy) < kSampleInBatchMeanThreshold * Q_mean_energy   :
                del_indexes.append(i)
            process_bar.UpdateBar(i)
        if kIsDebug:
            print("Plot Samples Deleted for sample_mean < 0.8 * samples_mean")
            PlotSamples(I_data[del_indexes, :])
            PlotSamples(Q_data[del_indexes, :])
        I_data = np.delete(I_data, del_indexes, axis=0)
        Q_data = np.delete(Q_data, del_indexes, axis=0)
        labels = np.array([module_name] * I_data.shape[0], dtype=np.object)
        # ! Save to h5 file
        sh_logger.debug("Start Saving H5")
        with h5py.File(
                os.path.join(clean_h5_module_data_dir,
                             module_name + '.h5')) as hf:
            hf.create_dataset('I', data=I_data)
            hf.create_dataset('Q', data=Q_data)
            dset_labels = hf.create_dataset('Label',
                                            labels.shape,
                                            dtype=h5py.special_dtype(vlen=str))
            dset_labels[:] = labels
        # ! Record in txt
        module_data_num[module_name] = I_data.shape[0]
        with open(clean_h5_module_data_txt_file, 'a') as txt_f:
            txt_f.write("{} {}\n".format(module_name,
                                         module_data_num[module_name]))
    return module_data_num