コード例 #1
0
def initData():
    initialization = InitDataSet()
    doas = initialization.get_dataset_as_doas()
    encoding = Encoding('./../../data_to_be_saved/alphabet_3.txt')
    # SAU 1 SAU 2
    #  1 th
    mark_bursts_regions_one_threshold(doas)

    # diff th
    # mark_bursts_regions(doas)

    # remove_bursted_trials_when_segment(doas)


    # doas_train, doas_test, ind_test = train_test_doa(doas, 0.2)
    # sa imi fac propriul test and train
    doas_train, doas_test, ind_test = train_test_doa_check_trials(doas, 0.2)

    train_data = ExtractData(doas_train, [channel], ['light', 'medium', 'deep'], [segment], ['all'])
    test_data = ExtractData(doas_test, [channel], ['light', 'medium', 'deep'], [segment], ['all'])

    # doar fft features
    X_train, y_train = obtain_TESPAR_A_FFT_features(train_data)
    x_test, y_test = obtain_TESPAR_A_FFT_features(test_data)

    return X_train, y_train, x_test, y_test
コード例 #2
0
from utils.DataSpliting import train_test_doa, obtain_features_labels

# data frame that keeps avr and std of the runs
columns = ['ch train', 'ch test', 'segment', 'acc avr', 'acc std_dev', 'f1-sc avr', 'f1-sc std_dev']
df_results = DataFrame(columns=columns)
# df_results.to_csv(csv_results, mode='a', header=True)

train_channels = [4, 6, 12, 2, 14]  # good over all
test_channels = [21, 20, 29, 16, 19]  # bad over all

segment = 'spontaneous'

# how many models to train a for a channel-segment pair
run_nr = 10

initialization = InitDataSet()
doas = initialization.get_dataset_as_doas()
encoding = Encoding('./../../data_to_be_saved/alphabet_1_150hz.txt')

# ############################## train on good channel, test on good chnannel ###############################3
# accuracies = [[[] for i in range(len(train_channels))] for j in range(len(train_channels))]
# f1scores = [[[] for i in range(len(train_channels))] for j in range(len(train_channels))]
#
# for run in range(run_nr):
#     # firstly split the input into train test
#     doas_train, doas_test, ind_test = train_test_doa(doas, 0.2)
#
#     for ind_train, ch_train in enumerate(train_channels):
#         for ind_test, ch_test in enumerate(train_channels):
#             print("start running for channel " + str(ch_train) + ' and ' + str(ch_test) + ' ' + segment + '\n')
#
                        for ind_tuple in range(len(outside_in) - 1):
                            # get the beginning and ending of the inter bursts zone
                            index_start = outside_out[ind_tuple][0]
                            index_end = outside_in[ind_tuple + 1][0]
                            dist_within = index_end - index_start

                            distances.append(dist_within)

    quantile = np.quantile(distances, q=q)

    print(f'quantile={quantile} for q = {q}')

    return quantile


initialization = InitDataSet(levels=['deep', 'medium', 'light'])
doas = initialization.get_dataset_as_doas()

# print('start marking interbursts')
# mark_burst_basic_one_threshold(doas, threshold=19.94)
# print('see inter bursts distances')
# see_procents_distances(doas)
#
# '''
#     output of the script: quantile=364.0 for q = 0.95
#
# '''

mark_burst_basic(doas, thresholds={'deep': 19.74, 'medium': 24.97, 'light': 32.00})
see_procents_distances(doas)
コード例 #4
0
import os

from feature_extraction.TESPAR.Encoding import Encoding
from input_reader.InitDataSet import InitDataSet
from utils.mark_bursts.MarkOutsiderWithBurstFlags_SeparateThresholds import mark_bursts_regions
from utils.mark_bursts.MarkOutsidersWithBurstsFlags import remove_bursted_trials_when_segment
from vizualization.TESPAR.PlotTESPARMatrices import get_channel_matrix_A, get_channel_trial_matrix_A

data_dir = os.path.join('..', '..')
initialization = InitDataSet(current_directory=data_dir,
                             subject_directory='m014',
                             filtering_directory='classic',
                             levels=['deep2', 'medium3', 'light4'])
doas = initialization.get_dataset_as_doas()

# mark_bursts_regions(doas)

# remove_bursted_trials_when_segment(doas)

encoding = Encoding('./../../data_to_be_saved/alphabet_3.txt')

# good_channels = [2, 3, 4]
# deep_trials = [5, 6, 11, 14]
# medium_trials = [5, 6, 14]
# light_trials = [8, 11, 14]
# for i in range(len(good_channels)):
#     for j in range(len(light_trials)):
#         get_channel_trial_matrix_A(encoding, doas, 'light', good_channels[i], light_trials[j], log=False)

get_channel_trial_matrix_A(encoding, doas, 'deep2', 2, 14, log=False)
コード例 #5
0
run_nr = 10

channels_range = 7
# all_channels = [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 23, 24, 25, 26, 27, 28, 29, 30,
#                 31, 32]
all_channels = [6, 7, 15, 20, 26, 27]

segments = ['spontaneous', 'stimulus', 'poststimulus']

# data frame that keeps all runs for all channels, that will be added to .csv file
column_names = ['channel', 'segment', 'accuracy', 'f1-score']
df_all = DataFrame(columns=column_names)
df_all.to_csv(csv_file, mode='a', header=True)

# initialization = InitDataSet()
initialization = InitDataSet(trials_to_skip=[1, 2])
doas = initialization.get_dataset_as_doas()
# mark_outsiders(doas, max_interbursts_dist=500)
# encoding = Encoding('./../../data_to_be_saved/alphabet_3.txt')
encoding = Encoding('./../../data_to_be_saved/alphabet_5.txt')
'''
for calculating the average acc or af1-score
we need
dictionary to keep array of 30 values for 3 segments for 30 channels
'''
accuracies = [[[] for i in range(channels_range - 1)]
              for j in range(len(segments))]
f1scores = [[[] for i in range(channels_range - 1)]
            for j in range(len(segments))]

for run in range(run_nr):
コード例 #6
0
channel = 1
segment = 'spontaneous'

encoding = Encoding('./../../data_to_be_saved/alphabet_3.txt')

print('SVM 0.2 seg=' + str(segment) + ' ch=' + str(segment))

for ind_interval, interval in enumerate(intervals):

    print('interval= ' + str(interval))

    accuracies = []
    f1scores = []

    initialization = InitDataSet(trials_to_skip=interval)
    doas = initialization.get_dataset_as_doas()

    for run in range(run_nr):
        # firstly split the input into train test
        doas_train, doas_test, ind_test = train_test_doa(doas, 0.2)
        print('lengths: train deep {}, light {}'.format(
            len(doas_train[0].channels[0].trials),
            len(doas_train[1].channels[0].trials)))
        print('lengths: test deep {}, light {}'.format(
            len(doas_test[0].channels[0].trials),
            len(doas_test[1].channels[0].trials)))
        #
        print("run: {}".format(run))

        train_data = SplitData(doas_train, [channel], ['light', 'deep'],