def process_ground_truth(file_name):
    ground_truth = GroundTruth(None, file_name)
    ground_truth.process_ground_truth(10)

    fig, ax = plt.subplots(2, 1)
    fig.suptitle("Time series", fontsize=14)

    ax[0].plot(ground_truth.time_base,
               ground_truth.ecg_wave,
               label='ECG Waveform',
               color=(0, 0, 1))
    ax[1].plot(ground_truth.time_base,
               ground_truth.ecg_heart_rate,
               label='ECG Heart rate',
               color=(1, 0, 0))
    ax[1].plot(ground_truth.time_base,
               ground_truth.ecg_average,
               label='ECG Heart rate (average)',
               color=(0, 1, 0))

    ax[0].legend(loc='best')
    ax[1].legend(loc='best')

    plt.ion()
    plt.pause(0.00001)
    plt.show()
    input("press enter")
예제 #2
0
 def __init__(self, root, name):
     """
     Evaluate preprocess effectiveness
     Depends on `GroundTruth` should include `segments` dataframe
     """
     ExpCommon.__init__(self, root, name)
     gt = GroundTruth(self.root, self.name)
     Scov.__init__(self, gt.load("segments"))
예제 #3
0
 def __process_ground_truth(self, video_file_or_camera):
     if video_file_or_camera != 0:
         # processing a video file, check to see if there is an existing ground_truth file
         # noinspection PyPep8
         try:
             folder = os.path.dirname(video_file_or_camera)
             ground_truth_file = "{}/ground_truth.txt".format(folder)
             ground_truth = GroundTruth(self.logger, ground_truth_file)
             ground_truth.process_ground_truth(
                 int(self.config["pulse_sample_frames"] /
                     self.config["video_fps"]))
             self.ground_truth = ground_truth
         except:
             e = sys.exc_info()[0]
             self.logger.error("Exception: {} ".format(e))
예제 #4
0
 def pack_gnd(self, roots, names, targets):
     """
     `roots`: dataset roots
     `names`: dataset names, paired with roots
     `targets`: list with data format currently support:
         `absp`: for `absolute pairs`
         `relp`: for `relative pairs`
         `seg`: for `segments` (**buggy**)
     Return packed dict data by named keys
     """
     keg = {}
     for root, name in zip(roots, names):
         gt = GroundTruth(root, name)
         ns = root + "_" + name
         keg[ns] = self.__keg_pack(gt, targets)
     return keg
예제 #5
0
    def extract_pitch_and_roll_from_sun_rgbd_extrinsics_text_file(file_path):
        """
        Gets the pitch and roll from a SUN RGB-D extrinsics file.

        :param file_path: The path to the extrinsics file.
        :type file_path: str
        :return: The pitch and the roll.
        :rtype: (float, float)
        """
        extrinsics_array = np.transpose(np.loadtxt(file_path,
                                                   dtype=np.float32))
        ground_truth = GroundTruth()
        ground_truth.r1 = extrinsics_array[0]
        ground_truth.r2 = extrinsics_array[1]
        ground_truth.r3 = extrinsics_array[2]
        return -ground_truth.pitch(), -ground_truth.roll()
    def evaluate_performance(self, gt_file, verbose=False, print_TP=True, method=2, q=2):

        performance = []
        gt = GroundTruth(gt_file, self.file_list, self.data_list, verbose=verbose)
        probs = set([res.prob for res in self.alignment_results])
        if len(probs) == 1:

            peaksets = [(res.peakset, res.prob) for res in self.alignment_results]
            if method == 1:
                results = gt.evaluate_alignment_results_1(peaksets, 1.0, annotations=self.annotations, feature_binning=None, verbose=verbose, print_TP=print_TP)
            elif method ==  2:
                results = gt.evaluate_alignment_results_2(peaksets, 1.0, annotations=self.annotations, feature_binning=None, verbose=verbose, print_TP=print_TP)
            elif method == 3:
                results = gt.evaluate_alignment_results_3(peaksets, 1.0, annotations=self.annotations, feature_binning=None, verbose=verbose, print_TP=print_TP, q=q)
            performance.append(results)

        else:

            sorted_probs = sorted(probs)
            for th_prob in sorted_probs:
                # print "Processing %.3f" % th_prob
                sys.stdout.flush()
                peaksets = []
                for ps in self.alignment_results:
                    if ps.prob > th_prob:
                        peaksets.append(ps)
                # print len(peaksets)
                if len(peaksets) > 0:
                    if method == 1:
                        results = gt.evaluate_alignment_results_1(peaksets, th_prob, annotations=self.annotations, feature_binning=None, verbose=verbose)
                    elif method == 2:
                        results = gt.evaluate_alignment_results_2(peaksets, th_prob, annotations=self.annotations, feature_binning=None, verbose=verbose)
                    elif method == 3:
                        results = gt.evaluate_alignment_results_3(peaksets, th_prob, annotations=self.annotations, feature_binning=None, verbose=verbose, q=q)
                    # print results
                    if results is not None:
                        performance.append(results)

        return performance
예제 #7
0
    def extract_pitch_and_roll_from_sun_rgbd_extrinsics_text_file(file_path):
        """
        Gets the pitch and roll from a SUN RGB-D extrinsics file.

        :param file_path: The path to the extrinsics file.
        :type file_path: str
        :return: The pitch and the roll.
        :rtype: (float, float)
        """
        extrinsics_array = np.transpose(np.loadtxt(file_path, dtype=np.float32))
        ground_truth = GroundTruth()
        ground_truth.r1 = extrinsics_array[0]
        ground_truth.r2 = extrinsics_array[1]
        ground_truth.r3 = extrinsics_array[2]
        return -ground_truth.pitch(), -ground_truth.roll()
예제 #8
0
    audio_folder = os.path.expanduser(conf.get('Input', 'AudioFolder'))
    ground_truth_path = os.path.expanduser(conf.get('Preprocessing', 'GroundTruthPath'))
    features_path = os.path.expanduser(conf.get('Preprocessing', 'RawFeaturesPath'))
    extensions = available_file_formats()
    lengthinseconds = int(conf.get('Tracks', 'LengthInSeconds'))
    samplerate = int(conf.get('Tracks', 'SampleRate'))
    encoding = conf.get('Tracks', 'Encoding')
    channels = int(conf.get('Tracks', 'Channels'))
    windowsize = int(conf.get('Spectrogram', 'WindowSize'))
    stepsize = int(conf.get('Spectrogram', 'StepSize'))
    windowtype = conf.get('Spectrogram', 'WindowType')
    fftres = int(conf.get('Spectrogram', 'FFTResolution'))
    if os.path.isdir(audio_folder):
        # ground truth
        audiofiles = list_audio_files_and_genres(audio_folder, extensions)
        gt = GroundTruth(audiofiles)
        gt.save_to_pickle_file(ground_truth_path)
        logging.info("Ground Truth: saved in %s" % ground_truth_path)

        # feature extraction
        logging.info("Feature Extraction: Calculating %i spectrograms... (this may take a while)" % len(gt.ground_truth))
        h5file = tables.open_file(features_path, mode="w", title="Features")
        table = h5file.create_table("/", 'track', Track, "Track")
        tr = table.row
        i = 0
        for filename, genre in gt.ground_truth.iteritems():
            # Read file
            f = Sndfile(filename, mode='r')

            # Check against specs
            check_audio_file_specs(f, samplerate, encoding, channels)
예제 #9
0
from nltk.corpus.reader import WordListCorpusReader
from underthesea import word_tokenize
import re

from model import DictionaryBasedSpellingDetection
from ground_truth import GroundTruth


punctuation_marks = '.,?"\'()[]{}!:&;-+%/*#'

model = DictionaryBasedSpellingDetection(punctuation_marks, './corpus', ['dict.txt'])
ground_truth = GroundTruth(punctuation_marks)


# Evaluation
# ----------------------------------------------------------------------------------------------------------------------
def evaluate(inp: str, trg: str):
    """
    :param inp: str
    :param trg: str
    :return: (Số lỗi phát hiện đúng, Số lỗi phát hiện sai, Số lỗi bỏ sót)
    """
    ref = ground_truth.get_wrong_position(inp, trg)
    predict = model.predict(inp)
    total_ = len(ref)
    correct_ = 0
    wrong_ = 0
    for w in predict:
        # Có vị trí của w xuất hiện trong ref
        if len(w - ref) < len(w):
            correct_ += 1
예제 #10
0
import cPickle
from ground_truth import GroundTruth

PATH = "../resources/"
json_file = PATH + "dumps_map1.json"
yaml_file = PATH + "nominal_gate_locations.yaml"
gt = GroundTruth(json_file=json_file,
                 yaml_file=yaml_file,
                 split=0.1,
                 nb_timesteps=8)

with open("../resources/data_maxime.pkl", "wb") as f:
    cPickle.dump(gt, f)

PATH = "../resources/"
json_file = PATH + "test_dumps_map1.json"
yaml_file = PATH + "nominal_gate_locations.yaml"
gt = GroundTruth(json_file=json_file, yaml_file=yaml_file)

with open("../resources/data_test.pkl", "wb") as f:
    cPickle.dump(gt, f)
예제 #11
0
 def setGroundTruth(self, datapath, ratio_valid=0.1):
     # PATH = "../resources"
     self.gt = GroundTruth(path=datapath, split=ratio_valid)