コード例 #1
0
def create_train_data(gcd_res, fist_time_stamp=0, last_time_stamp=400, down_samples_param=1,
                      take_same_number_positive_and_negative=False):
    all_positive_train = []
    all_negative_train = []

    data_for_eval = ExtractDataVer4(gcd_res['all_relevant_channels'], gcd_res['marker_positions'],
                                    gcd_res['target'], fist_time_stamp, last_time_stamp)

    temp_data_for_eval = downsample_data(data_for_eval[0], data_for_eval[0].shape[1], down_samples_param)

    # extract the calibration_data
    positive_train_data_gcd = temp_data_for_eval[
        np.all([gcd_res['train_mode'] == 1, gcd_res['target'] == 1], axis=0)]
    negative_train_data_gcd = temp_data_for_eval[
        np.all([gcd_res['train_mode'] == 1, gcd_res['target'] == 0], axis=0)]

    all_positive_train.append(positive_train_data_gcd)
    all_negative_train.append(negative_train_data_gcd)

    positive_train_data_gcd = np.vstack(all_positive_train)
    if take_same_number_positive_and_negative:
        negative_train_data_gcd = rng.permutation(np.vstack(all_negative_train))[0:positive_train_data_gcd.shape[0]]
    else:
        negative_train_data_gcd = np.vstack(all_negative_train)

    all_data = np.vstack([positive_train_data_gcd, negative_train_data_gcd])

    all_tags = np.vstack(
        [np.ones((positive_train_data_gcd.shape[0], 1)), np.zeros((negative_train_data_gcd.shape[0], 1))])
    categorical_tags = to_categorical(all_tags)

    shuffeled_samples, suffule_tags = shuffle(all_data, all_tags, random_state=0)

    return shuffeled_samples, suffule_tags
コード例 #2
0
def create_train_data_from_all(all_gcd_res,
                               fist_time_stamp=0,
                               last_time_stamp=400,
                               down_samples_param=1,
                               take_same_number_positive_and_negative=False):

    all_positive_train = []
    all_negative_train = []
    all_data = None
    all_tags = None
    for i, single_subject_data in enumerate(all_gcd_res):
        data_for_eval = ExtractDataVer4(
            single_subject_data['all_relevant_channels'],
            single_subject_data['marker_positions'],
            single_subject_data['target'], fist_time_stamp, last_time_stamp)

        temp_data_for_eval = downsample_data(data_for_eval[0],
                                             data_for_eval[0].shape[1],
                                             down_samples_param)
        current_subject_tag = single_subject_data['target'][
            single_subject_data['train_mode'] != 1]
        current_subject_data = temp_data_for_eval[
            single_subject_data['train_mode'] != 1]
        if i == 0:
            all_tags = current_subject_tag
            all_data = current_subject_data
        else:
            all_tags = np.hstack([all_tags, current_subject_tag])
            all_data = np.vstack([all_data, current_subject_data])

        # shuffeled_samples, suffule_tags = shuffle(all_data, all_tags, random_state=0)

    return all_data, all_tags
コード例 #3
0
ファイル: common.py プロジェクト: Ori226/thesis_clean_v3_true
def create_train_data(gcd_res,
                      fist_time_stamp=0,
                      last_time_stamp=400,
                      down_samples_param=1,
                      take_same_number_positive_and_negative=False):
    all_positive_train = []
    all_negative_train = []

    data_for_eval = ExtractDataVer4(gcd_res['all_relevant_channels'],
                                    gcd_res['marker_positions'],
                                    gcd_res['target'], fist_time_stamp,
                                    last_time_stamp)

    temp_data_for_eval = downsample_data(data_for_eval[0],
                                         data_for_eval[0].shape[1],
                                         down_samples_param)

    all_tags = gcd_res['target'][gcd_res['train_mode'] == 1]

    all_data = temp_data_for_eval[gcd_res['train_mode'] == 1]

    categorical_tags = to_categorical(all_tags)

    # shuffeled_samples, suffule_tags = shuffle(all_data, all_tags, random_state=0)

    return all_data, all_tags
コード例 #4
0
def create_train_data_split(gcd_res, fist_time_stamp=0, last_time_stamp=400, down_samples_param=1,
                      take_same_number_positive_and_negative=False):


    all_positive_train = []
    all_negative_train = []

    data_for_eval = ExtractDataVer4(gcd_res['all_relevant_channels'], gcd_res['marker_positions'],
                                    gcd_res['target'], fist_time_stamp, last_time_stamp)

    temp_data_for_eval = downsample_data(data_for_eval[0], data_for_eval[0].shape[1], down_samples_param)

    # extract the calibration_data
    positive_train_data_gcd = temp_data_for_eval[
        np.all([gcd_res['train_mode'] == 1, gcd_res['target'] == 1], axis=0)]
    negative_train_data_gcd = temp_data_for_eval[
        np.all([gcd_res['train_mode'] == 1, gcd_res['target'] == 0], axis=0)]

    negative_splits = np.split(shuffle(negative_train_data_gcd), 29)
    all_data_splits = []
    all_tag_splits = []
    for i in range(29):
        suffuled_split_data, suffuled_split_tag = shuffle(np.vstack([positive_train_data_gcd, negative_splits[i]]),
                                                          np.vstack([np.ones((positive_train_data_gcd.shape[0], 1)),
                                                                     np.zeros((negative_splits[i].shape[0], 1))]).astype(
                                                              np.int))
        all_data_splits.append(suffuled_split_data)
        all_tag_splits.append(suffuled_split_tag)

    return all_data_splits, all_tag_splits
コード例 #5
0
def create_data_rep_training(file_name, fist_time_stamp, last_time_stamp):
    gcd_res = readCompleteMatFile(file_name)
    data_for_eval = ExtractDataVer4(gcd_res['all_relevant_channels'],
                                    gcd_res['marker_positions'],
                                    gcd_res['target'], fist_time_stamp,
                                    last_time_stamp)

    data_for_eval = (downsample_data(data_for_eval[0],
                                     data_for_eval[0].shape[1],
                                     1), data_for_eval[1])

    train_mode_per_block = gcd_res['train_mode'].reshape(-1, 30)[:, 0]
    all_data_per_char_as_matrix = np.zeros(
        (train_mode_per_block.shape[0], 30, data_for_eval[0].shape[1],
         data_for_eval[0].shape[2]))
    all_data_per_char = dict()
    target_per_char_as_matrix = np.zeros((train_mode_per_block.shape[0], 30),
                                         dtype=np.int)
    for i, stimuli_i in enumerate(range(1, 31)):
        all_data_per_char[i] = data_for_eval[0][gcd_res['stimulus'] ==
                                                stimuli_i]
        all_data_per_char_as_matrix[:, i, :, :] = data_for_eval[0][
            gcd_res['stimulus'] == stimuli_i]

    target_per_char = dict()
    for i, stimuli_i in enumerate(range(1, 31)):
        target_per_char[i] = data_for_eval[1][gcd_res['stimulus'] == stimuli_i]
        target_per_char_as_matrix[:, i] = data_for_eval[1][gcd_res['stimulus']
                                                           == stimuli_i]

    return all_data_per_char, target_per_char, train_mode_per_block, all_data_per_char_as_matrix, target_per_char_as_matrix
コード例 #6
0
def create_train_data(data_from_mat, down_samples_param, indexes):
    all_positive_train = []
    all_negative_train = []

    last_time_stamp = 800
    fist_time_stamp = -200

    data_for_eval = ExtractDataVer4(data_from_mat['all_relevant_channels'], data_from_mat['marker_positions'],
                                    data_from_mat['target'], fist_time_stamp, last_time_stamp)

    temp_data_for_eval = downsample_data(data_for_eval[0],data_for_eval[0].shape[1], down_samples_param)

    positive_train_data_gcd = temp_data_for_eval[
        np.all([indexes, data_from_mat['target'] == 1], axis=0)]
    negative_train_data_gcd = temp_data_for_eval[
        np.all([indexes, data_from_mat['target'] == 0], axis=0)]
    all_positive_train.append(positive_train_data_gcd)
    all_negative_train.append(negative_train_data_gcd)

    positive_train_data_gcd = np.vstack(all_positive_train)
    negative_train_data_gcd = np.vstack(all_negative_train)

    all_data = np.vstack([positive_train_data_gcd, negative_train_data_gcd])

    all_tags = np.vstack(
        [np.ones((positive_train_data_gcd.shape[0], 1)), np.zeros((negative_train_data_gcd.shape[0], 1))])
    categorical_tags = to_categorical(all_tags)

    shuffeled_samples, suffule_tags = (all_data, categorical_tags)
    return shuffeled_samples, suffule_tags
コード例 #7
0
def create_train_data(file_name, down_samples_param):
    all_positive_train = []
    all_negative_train = []

    others = ["RSVP_Color116msVPgcd.mat"]

    for other_file_name in others:
        file_name = r'C:\Users\ORI\Documents\Thesis\dataset_all\{0}'.format(
            other_file_name)
        gcd_res = readCompleteMatFile(file_name)
        last_time_stamp = 800
        fist_time_stamp = -200

        data_for_eval = ExtractDataVer4(gcd_res['all_relevant_channels'],
                                        gcd_res['marker_positions'],
                                        gcd_res['target'], fist_time_stamp,
                                        last_time_stamp)

        # total_time = last_time_stamp - fist_time_stamp
        # number_of_original_samples = total_time / 5
        # new_number_of_time_stamps = number_of_original_samples / down_samples_param
        #
        #
        # # print  data_for_eval
        # temp_data_for_eval = np.zeros((data_for_eval[0].shape[0], new_number_of_time_stamps, data_for_eval[0].shape[2]))
        #
        # for new_i, i in enumerate(range(0, 200, new_number_of_time_stamps)):
        #     temp_data_for_eval[:, new_i, :] = np.mean(data_for_eval[0][:, range(i, (i + new_number_of_time_stamps)), :], axis=1)
        print data_for_eval[0].shape
        temp_data_for_eval = downsample_data(data_for_eval[0],
                                             data_for_eval[0].shape[1],
                                             down_samples_param)

        positive_train_data_gcd = temp_data_for_eval[np.all(
            [gcd_res['train_mode'] == 1, gcd_res['target'] == 1], axis=0)]
        negative_train_data_gcd = temp_data_for_eval[np.all(
            [gcd_res['train_mode'] == 1, gcd_res['target'] == 0], axis=0)]
        all_positive_train.append(positive_train_data_gcd)
        all_negative_train.append(negative_train_data_gcd)

    positive_train_data_gcd = np.vstack(all_positive_train)
    negative_train_data_gcd = np.vstack(all_negative_train)

    all_data = np.vstack([positive_train_data_gcd, negative_train_data_gcd])

    all_tags = np.vstack([
        np.ones((positive_train_data_gcd.shape[0], 1)),
        np.zeros((negative_train_data_gcd.shape[0], 1))
    ])
    categorical_tags = to_categorical(all_tags)

    shuffeled_samples, suffule_tags = shuffle(all_data,
                                              categorical_tags,
                                              random_state=0)
    # shuffeled_samples, suffule_tags = (all_data, categorical_tags)
    return shuffeled_samples, suffule_tags
コード例 #8
0
def create_evaluation_data(gcd_res, fist_time_stamp=0, last_time_stamp=400, down_samples_param=1):
    #     gcd_res = readCompleteMatFile(file_name)
    data_for_eval = ExtractDataVer4(gcd_res['all_relevant_channels'], gcd_res['marker_positions'], gcd_res['target'],
                                    fist_time_stamp, last_time_stamp)
    # print  data_for_eval

    temp_data_for_eval = downsample_data(data_for_eval[0], data_for_eval[0].shape[1], down_samples_param)

    test_data_gcd, test_target_gcd = temp_data_for_eval[gcd_res['train_mode'] != 1], data_for_eval[1][
        gcd_res['train_mode'] != 1]
    return test_data_gcd, test_target_gcd