コード例 #1
0
def create_data_rep_training(file_name, fist_time_stamp, last_time_stamp):
    gcd_res = readCompleteMatFile(file_name)
    data_for_eval = ExtractDataVer4(gcd_res['all_relevant_channels'],
                                    gcd_res['marker_positions'],
                                    gcd_res['target'], fist_time_stamp,
                                    last_time_stamp)

    data_for_eval = (downsample_data(data_for_eval[0],
                                     data_for_eval[0].shape[1],
                                     1), data_for_eval[1])

    train_mode_per_block = gcd_res['train_mode'].reshape(-1, 30)[:, 0]
    all_data_per_char_as_matrix = np.zeros(
        (train_mode_per_block.shape[0], 30, data_for_eval[0].shape[1],
         data_for_eval[0].shape[2]))
    all_data_per_char = dict()
    target_per_char_as_matrix = np.zeros((train_mode_per_block.shape[0], 30),
                                         dtype=np.int)
    for i, stimuli_i in enumerate(range(1, 31)):
        all_data_per_char[i] = data_for_eval[0][gcd_res['stimulus'] ==
                                                stimuli_i]
        all_data_per_char_as_matrix[:, i, :, :] = data_for_eval[0][
            gcd_res['stimulus'] == stimuli_i]

    target_per_char = dict()
    for i, stimuli_i in enumerate(range(1, 31)):
        target_per_char[i] = data_for_eval[1][gcd_res['stimulus'] == stimuli_i]
        target_per_char_as_matrix[:, i] = data_for_eval[1][gcd_res['stimulus']
                                                           == stimuli_i]

    return all_data_per_char, target_per_char, train_mode_per_block, all_data_per_char_as_matrix, target_per_char_as_matrix
コード例 #2
0
def create_data_rep_training(file_name, fist_time_stamp, last_time_stamp, downsampe_params=1):
    """
    The function divide the data into epochs and shaping it such that it is
    easy to do operation per stimuli category on it.
    :param file_name:
    :param fist_time_stamp:
    :param last_time_stamp:
    :return:
    """
    gcd_res = readCompleteMatFile(file_name)
    data_for_eval = ExtractDataVer4(gcd_res['all_relevant_channels'], gcd_res['marker_positions'],
                                    gcd_res['target'], fist_time_stamp, last_time_stamp)

    data_for_eval = (downsample_data(data_for_eval[0], data_for_eval[0].shape[1], downsampe_params), data_for_eval[1])

    train_mode_per_block = gcd_res['train_mode'].reshape(-1, 30)[:, 0]
    all_data_per_char_as_matrix = np.zeros(
        (train_mode_per_block.shape[0], 30, data_for_eval[0].shape[1], data_for_eval[0].shape[2]))
    all_data_per_char = dict()
    target_per_char_as_matrix = np.zeros((train_mode_per_block.shape[0], 30), dtype=np.int)
    for i, stimuli_i in enumerate(range(1, 31)):
        all_data_per_char[i] = data_for_eval[0][gcd_res['stimulus'] == stimuli_i]
        all_data_per_char_as_matrix[:, i, :, :] = data_for_eval[0][gcd_res['stimulus'] == stimuli_i]

    target_per_char = dict()
    for i, stimuli_i in enumerate(range(1, 31)):
        target_per_char[i] = data_for_eval[1][gcd_res['stimulus'] == stimuli_i]
        target_per_char_as_matrix[:, i] = data_for_eval[1][gcd_res['stimulus'] == stimuli_i]

    return all_data_per_char, target_per_char, train_mode_per_block, all_data_per_char_as_matrix, target_per_char_as_matrix
コード例 #3
0
def create_data_for_compare_by_repetition(file_name):
    gcd_res = readCompleteMatFile(file_name)
    sub_gcd_res = dict(
        train_trial=gcd_res['train_trial'][gcd_res['train_mode'] != 1],
        train_block=gcd_res['train_block'][gcd_res['train_mode'] != 1],
        stimulus=gcd_res['stimulus'][gcd_res['train_mode'] != 1])
    return sub_gcd_res
コード例 #4
0
def create_train_data(file_name, down_samples_param):
    all_positive_train = []
    all_negative_train = []

    others = ["RSVP_Color116msVPgcd.mat"]

    for other_file_name in others:
        file_name = r'C:\Users\ORI\Documents\Thesis\dataset_all\{0}'.format(
            other_file_name)
        gcd_res = readCompleteMatFile(file_name)
        last_time_stamp = 800
        fist_time_stamp = -200

        data_for_eval = ExtractDataVer4(gcd_res['all_relevant_channels'],
                                        gcd_res['marker_positions'],
                                        gcd_res['target'], fist_time_stamp,
                                        last_time_stamp)

        # total_time = last_time_stamp - fist_time_stamp
        # number_of_original_samples = total_time / 5
        # new_number_of_time_stamps = number_of_original_samples / down_samples_param
        #
        #
        # # print  data_for_eval
        # temp_data_for_eval = np.zeros((data_for_eval[0].shape[0], new_number_of_time_stamps, data_for_eval[0].shape[2]))
        #
        # for new_i, i in enumerate(range(0, 200, new_number_of_time_stamps)):
        #     temp_data_for_eval[:, new_i, :] = np.mean(data_for_eval[0][:, range(i, (i + new_number_of_time_stamps)), :], axis=1)
        print data_for_eval[0].shape
        temp_data_for_eval = downsample_data(data_for_eval[0],
                                             data_for_eval[0].shape[1],
                                             down_samples_param)

        positive_train_data_gcd = temp_data_for_eval[np.all(
            [gcd_res['train_mode'] == 1, gcd_res['target'] == 1], axis=0)]
        negative_train_data_gcd = temp_data_for_eval[np.all(
            [gcd_res['train_mode'] == 1, gcd_res['target'] == 0], axis=0)]
        all_positive_train.append(positive_train_data_gcd)
        all_negative_train.append(negative_train_data_gcd)

    positive_train_data_gcd = np.vstack(all_positive_train)
    negative_train_data_gcd = np.vstack(all_negative_train)

    all_data = np.vstack([positive_train_data_gcd, negative_train_data_gcd])

    all_tags = np.vstack([
        np.ones((positive_train_data_gcd.shape[0], 1)),
        np.zeros((negative_train_data_gcd.shape[0], 1))
    ])
    categorical_tags = to_categorical(all_tags)

    shuffeled_samples, suffule_tags = shuffle(all_data,
                                              categorical_tags,
                                              random_state=0)
    # shuffeled_samples, suffule_tags = (all_data, categorical_tags)
    return shuffeled_samples, suffule_tags
コード例 #5
0
def create_evaluation_data(file_name, down_samples_param):
    gcd_res = readCompleteMatFile(file_name)
    data_for_eval = ExtractDataVer4(gcd_res['all_relevant_channels'],
                                    gcd_res['marker_positions'],
                                    gcd_res['target'], -200, 800)
    # print  data_for_eval

    temp_data_for_eval = downsample_data(data_for_eval[0],
                                         data_for_eval[0].shape[1],
                                         down_samples_param)

    test_data_gcd, test_target_gcd = temp_data_for_eval[
        gcd_res['train_mode'] != 1], data_for_eval[1][
            gcd_res['train_mode'] != 1]
    return test_data_gcd, test_target_gcd
コード例 #6
0

def get_indexes_by_stimuli(stimulus_range, stimulus, train_trial, train_block):

    stimuli_histogram = {}
    for i in stimulus_range:
        stimuli_histogram[i] = dict(stimuli=i,
                                    idx=np.where(stimulus == i)[0],
                                    train_trial=train_trial[np.where(stimulus == i)[0]],
                                    train_block=train_block[np.where(stimulus == i)[0]])
    return stimuli_histogram


import numpy as np
if __name__ == '__main__':
    res = readCompleteMatFile(r'C:\Users\ori22_000\Documents\Thesis\dataset_all\RSVP_Color116msVPgcd.mat');

    print get_indexes_by_stimuli(range(1,3), res['stimulus'], res['train_trial'], res['train_block'])
    # # stimuli_histogram = {}
    # for i in range(1,31):
    #     stimuli_histogram[i] = dict(stimuli=i,
    #                                 idx=np.where(res['stimulus'] == i)[0],
    #                                 train_trial=res['train_trial'][np.where(res['stimulus'] == i)[0]],
    #                                 train_block=res['train_block'][np.where(res['stimulus'] == i)[0]])
        # np.where(res['stimulus'] == i)[0]
        # print np.where(res['stimulus'] == i)[0]
        # print res['train_trial'][np.where(res['stimulus'] == i)[0]]
        # print res['train_block'][np.where(res['stimulus'] == i)[0]]
        # print np.where(res['stimulus'] == i)[0]

    # print stimuli_histogram[2]['idx'][np.where(stimuli_histogram[2]['train_block'] <= 3)[0]]
コード例 #7
0
                    "RSVP_Color116msVPgcg.mat",
                    "RSVP_Color116msVPgch.mat",
                    "RSVP_Color116msVPiay.mat",
                    "RSVP_Color116msVPicn.mat"];

    data_base_dir = r'C:\Users\ORI\Documents\Thesis\dataset_all'
    # model = LDA()

    all_models = [LSTM_EEG_CNN(50.0, 20)]
    for model_type in all_models:

        all_model_results = []

        for subject in all_subjects:
            file_name = os.path.join(data_base_dir, subject)
            gcd_res = readCompleteMatFile(file_name)
            repetition_eval = EvaluateByRepetition(file_name)

            # for data_extraction_method in [create_training_and_testing(gcd_res, 0, 400, 1, True),
            #                                create_training_and_testing(gcd_res, 0, 400, 1, False),
            #                                create_training_and_testing(gcd_res, 0, 400, 8, True),
            #                                create_training_and_testing(gcd_res, 0, 400, 8, False),
            #                                create_training_and_testing(gcd_res, -200, 800, 1, True),
            #                                create_training_and_testing(gcd_res, -200, 800, 1, False),
            #                                create_training_and_testing(gcd_res, -200, 800, 8, True),
            #                                create_training_and_testing(gcd_res, -200, 800, 8, False)
            #                                ]:

            for data_extraction_method in [create_training_and_testing(gcd_res, -200, 800, 8, False)
                                           ]:
コード例 #8
0
def foo():
    return 1, 2, 3


if __name__ == "__main__":



    # [all_target1, all_non_target1] = LoadSingleSubjectPythonByMode(r"C:\Users\ori22_000\Documents\Thesis\dataset\VPfat_11_01_24\RSVP_Color116msVPfat.mat" ,1)

    # [all_target2, all_non_target2] = LoadSingleSubjectPythonByMode(r"C:\Users\ori22_000\Documents\Thesis\dataset\VPfat_11_01_24\RSVP_Color116msVPfat.mat" ,2)

    # [all_target3, all_non_target3] = LoadSingleSubjectPythonByMode(r"C:\Users\ori22_000\Documents\Thesis\dataset\VPfat_11_01_24\RSVP_Color116msVPfat.mat" ,3)

    # LoadSingleSubjectPython(r"C:\Users\ori22_000\Documents\Thesis\dataset\VPfat_11_01_24\RSVP_Color116msVPfat.mat");

    temp = readCompleteMatFile(r"C:\Users\ori22_000\Documents\Thesis\dataset\VPfat_11_01_24\RSVP_Color116msVPfat.mat")
    PredictByRepetitions3(temp, 3, None);

    all_relevant_channels, channels_names, marker_positions, target = readCompleteMatFile(
        r"C:\Users\ori22_000\Documents\Thesis\dataset\VPfat_11_01_24\RSVP_Color116msVPfat.mat")
    # other option: take the model

    # load the trial data

    # load the prediction data - the prediction data is an index with tag\probablity


    a, b = foo()
    print ('hello');
コード例 #9
0
        "RSVP_Color116msVPgch.mat", "RSVP_Color116msVPiay.mat",
        "RSVP_Color116msVPicn.mat"
    ]

    all_subjects = ["RSVP_Color116msVPicr.mat", "RSVP_Color116msVPpia.mat"]

    data_base_dir = r'C:\Users\ORI\Documents\Thesis\dataset_all'
    # model = LDA()

    all_models = [LSTM_CNN_EEG_Comb(50, 20)]
    for model_type in all_models:

        # all_model_results = [create_training_and_testing(gcd_res, -200, 800, 1, False)]
        all_model_results = []
        all_res = [
            readCompleteMatFile(os.path.join(data_base_dir, subject))
            for subject in all_subjects
        ]
        training_data, train_tags, = create_train_data_from_all(
            all_res, -200, 800, 1, False)
        model = model_type
        model.fit(training_data, train_tags)

        for subject in all_subjects:
            file_name = os.path.join(data_base_dir, subject)
            gcd_res = readCompleteMatFile(file_name)
            repetition_eval = EvaluateByRepetition(file_name)

            for data_extraction_method in [
                    create_only_testing(gcd_res, -200, 800, 1, False)
            ]:
コード例 #10
0
    negative_train_data_gcd = temp_data_for_eval[
        np.all([indexes, data_from_mat['target'] == 0], axis=0)]
    all_positive_train.append(positive_train_data_gcd)
    all_negative_train.append(negative_train_data_gcd)

    positive_train_data_gcd = np.vstack(all_positive_train)
    negative_train_data_gcd = np.vstack(all_negative_train)

    all_data = np.vstack([positive_train_data_gcd, negative_train_data_gcd])

    all_tags = np.vstack(
        [np.ones((positive_train_data_gcd.shape[0], 1)), np.zeros((negative_train_data_gcd.shape[0], 1))])
    categorical_tags = to_categorical(all_tags)

    shuffeled_samples, suffule_tags = (all_data, categorical_tags)
    return shuffeled_samples, suffule_tags



if __name__ == "__main__":
    file_name = r'C:\Users\ORI\Documents\Thesis\dataset_all\RSVP_Color116msVPfat.mat'

    # read the raw data
    raw_data = readCompleteMatFile(file_name)
    train_data, train_tag = create_train_data(raw_data, 8, raw_data['train_mode'] == 1)
    test_data, test_tag = create_train_data(raw_data, 8, raw_data['train_mode'] != 1)

    # now, extract epoch

    print "done"
コード例 #11
0
def get_indexes_by_stimuli(stimulus_range, stimulus, train_trial, train_block):

    stimuli_histogram = {}
    for i in stimulus_range:
        stimuli_histogram[i] = dict(
            stimuli=i,
            idx=np.where(stimulus == i)[0],
            train_trial=train_trial[np.where(stimulus == i)[0]],
            train_block=train_block[np.where(stimulus == i)[0]])
    return stimuli_histogram


import numpy as np
if __name__ == '__main__':
    res = readCompleteMatFile(
        r'C:\Users\ori22_000\Documents\Thesis\dataset_all\RSVP_Color116msVPgcd.mat'
    )

    print get_indexes_by_stimuli(range(1, 3), res['stimulus'],
                                 res['train_trial'], res['train_block'])
    # # stimuli_histogram = {}
    # for i in range(1,31):
    #     stimuli_histogram[i] = dict(stimuli=i,
    #                                 idx=np.where(res['stimulus'] == i)[0],
    #                                 train_trial=res['train_trial'][np.where(res['stimulus'] == i)[0]],
    #                                 train_block=res['train_block'][np.where(res['stimulus'] == i)[0]])
    # np.where(res['stimulus'] == i)[0]
    # print np.where(res['stimulus'] == i)[0]
    # print res['train_trial'][np.where(res['stimulus'] == i)[0]]
    # print res['train_block'][np.where(res['stimulus'] == i)[0]]
    # print np.where(res['stimulus'] == i)[0]
コード例 #12
0
    model.add(MaxPooling2D(pool_size=(1, number_of_in_channels)))
    model.add(Convolution2D(nb_filter=number_of_out_channels, nb_row=6, nb_col=1, border_mode='same',init='glorot_normal'))
    model.add(MaxPooling2D(pool_size=(20, 1)))
    model.add(Activation('relu'))
    model.add(Flatten())
    model.add(Dense(100))
    model.add(Activation('relu'))
    # model.add(LSTM(input_dim=55, output_dim=20,return_sequences=False))
    # # model.add(Dense(275))
    # # model.add(Activation('tanh'))
    model.add(Dense(2))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='sgd')

    file_name = r'C:\Users\ORI\Documents\Thesis\dataset_all\RSVP_Color116msVPfat.mat'



    raw_data = readCompleteMatFile(file_name)
    train_data, train_tag = create_train_data(raw_data, 8, raw_data['train_mode'] == 1)

    train_data_single_color_channel = np.expand_dims(stats.zscore(train_data, axis = 1), axis = 1)

    test_data, test_tag = create_train_data(raw_data, 8, raw_data['train_mode'] != 1)
    model.fit(train_data_single_color_channel, train_tag, nb_epoch=1, show_accuracy=True)

    test_data_single_color_channel = np.expand_dims(stats.zscore(test_data, axis=1), axis=1)
    print model.evaluate(test_data_single_color_channel, test_tag, show_accuracy=True)
    print train_data.shape

コード例 #13
0
def foo():
    return 1, 2, 3


if __name__ == "__main__":

    # [all_target1, all_non_target1] = LoadSingleSubjectPythonByMode(r"C:\Users\ori22_000\Documents\Thesis\dataset\VPfat_11_01_24\RSVP_Color116msVPfat.mat" ,1)

    # [all_target2, all_non_target2] = LoadSingleSubjectPythonByMode(r"C:\Users\ori22_000\Documents\Thesis\dataset\VPfat_11_01_24\RSVP_Color116msVPfat.mat" ,2)

    # [all_target3, all_non_target3] = LoadSingleSubjectPythonByMode(r"C:\Users\ori22_000\Documents\Thesis\dataset\VPfat_11_01_24\RSVP_Color116msVPfat.mat" ,3)

    # LoadSingleSubjectPython(r"C:\Users\ori22_000\Documents\Thesis\dataset\VPfat_11_01_24\RSVP_Color116msVPfat.mat");

    temp = readCompleteMatFile(
        r"C:\Users\ori22_000\Documents\Thesis\dataset\VPfat_11_01_24\RSVP_Color116msVPfat.mat"
    )
    PredictByRepetitions3(temp, 3, None)

    all_relevant_channels, channels_names, marker_positions, target = readCompleteMatFile(
        r"C:\Users\ori22_000\Documents\Thesis\dataset\VPfat_11_01_24\RSVP_Color116msVPfat.mat"
    )
    # other option: take the model

    # load the trial data

    # load the prediction data - the prediction data is an index with tag\probablity

    a, b = foo()
    print('hello')