예제 #1
0
def get_dataset(subject, ins):
    ch_fs_instances = []
    ch_tags_instances = []
    samples_trial = 7000  # samples per trial
    no_ch = 14
    sr = 128
    instance_len = sr * 2  # to create sub instances of 2 sec
    EEG_data = spio.loadmat(
        'EID-M.mat',
        squeeze_me=True)['eeg_close_ubicomp_8sub'].T  # (15, 168000)
    _labels = EEG_data[14, :]  # len(168000)
    s_index = consecutive_index(EEG_data[no_ch, :], subject)[0]  # [0, 20999]
    for s_instance in range(s_index[0] + 14000, s_index[1] + 1, samples_trial):
        _instance = EEG_data[:no_ch, s_instance:s_instance +
                             samples_trial]  # (14, 7000)
        max_instances = _instance.shape[
            1] / instance_len  # this is not necessary, but I added it just FYI, 27
        for _i in range(0, ins):  # sub instances
            if _i < max_instances:
                index_start, index_end = instance_len * _i, instance_len * (
                    _i + 1)
                sub_instance = _instance[:, index_start:index_end]
                ch_fs_instances.append(get_features_emd(sub_instance, sr))
                ch_tags_instances.append('subject_{0}'.format(subject))
    return {"data": ch_fs_instances, "target": ch_tags_instances}
def main():
    logging.info(" ***** EMD, CHANNELS:7, FEATURES: energy  ***** \n")
    INSTANCES = [10, 20, 40, 60]
    sr = 200
    ch = [1, 42, 46, 51, 52, 54, 55]
    for ins in INSTANCES:
        logging.info(" -------- Instance: {0} --------".format(ins))
        ch_fs_instances = []
        ch_tags_instances = []
        for subject in range(1, 27):  # 26
            for session in range(1, 5):  # 4
                s_s_chs = get_subdataset(subject, session)
                _index = [
                    i + 1 for i, d in enumerate(s_s_chs[:, -1]) if d == 1
                ]
                instances = get_samples(_index, s_s_chs, sr)
                for f_instance in range(0, ins):
                    if f_instance not in [15, 21, 23, 45]:
                        instance = preprocessing_P300(instances, f_instance,
                                                      sr, ch)
                        ch_fs_instances.append(get_features_emd(
                            instance, sr))  # CHANNELS: 14
                        ch_tags_instances.append('subject_{0}'.format(subject))
        dataset = {"data": ch_fs_instances, "target": ch_tags_instances}

        dataTraining = dataset['data']
        targetTraining = dataset['target']
        result = selector(dataTraining, targetTraining)

        logging.info("Best classifier {0} with accuracy {1} \n".format(
            result['classifier'], result['accuracy']))

        # saving the model
        model_name = 'P300_EMD_ch7_energy_ins%02d.sav' % ins
        pickle.dump(result["model"], open(model_name, 'wb'))
예제 #3
0
def main():
    logging.info(" ***** EMD, CHANNELS:7, FEATURES: energy + hht ***** ")
    logging.info("--------------no preprocessing-------------- \n")
    INSTANCES = [10, 20]
    sr = 200
    ch = [1, 42, 46, 51, 52, 54, 55]
    # ch =  ch = [0, 1, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 33, 34, 36, 36, 38, 40,
    # 41, 42, 44, 46, 48, 50, 51, 52, 53, 54, 55]  32 channels
    for ins in INSTANCES:
        logging.info(" -------- Instance: {0} --------".format(ins))
        ch_fs_instances = []
        ch_tags_instances = []
        for subject in range(1, 27):  # 26
            for session in range(1, 5):  # 4
                s_s_chs = get_subdataset(subject, session)
                _index = [i + 1 for i, d in enumerate(s_s_chs[:, -1]) if d == 1]
                instances = get_samples(_index, s_s_chs, sr)
                for f_instance in range(0, ins):
                    if f_instance not in [15, 21, 23, 45]:
                        instance = np.array(instances[f_instance, :, 1:-1]).transpose()
                        ins7 = instance[ch, :]
                        ch_fs_instances.append(get_features_emd(ins7, sr))  # CHANNELS: 14
                        ch_tags_instances.append('subject_{0}'.format(subject))
        dataset = {"data": ch_fs_instances, "target": ch_tags_instances}

        dataTraining = dataset['data']
        targetTraining = dataset['target']
        result = selector(dataTraining, targetTraining)

        logging.info("Best classifier {0} with accuracy {1} \n".format(result['classifier'], result['accuracy']))

        # saving the model
        model_name = 'P300_EMD_ch7_energy_hht_nopre_ins%02d.sav' % ins
        pickle.dump(result["model"], open(model_name, 'wb'))
예제 #4
0
def main():
    logging.info(" ***** Resting state, EMD, FEATURES: energy ***** \n")

    ch_fs_instances = []
    ch_tags_instances = []

    samples_subject = 21000  # samples per subject
    samples_trial = 7000  # samples per trial
    no_subjects = 8
    no_ch = 14
    sr = 128

    for ins in [10, 20]:
        logging.info(" -------- Instance: {0} --------".format(ins))
        instance_len = sr * 2  # to create sub instances of 2 sec
        EEG_data = spio.loadmat(
            'EID-M.mat',
            squeeze_me=True)['eeg_close_ubicomp_8sub'].T  # (15, 168000)
        _labels = EEG_data[14, :]  # len(168000)

        for subject in range(1, no_subjects + 1):
            s_index = consecutive_index(EEG_data[no_ch, :],
                                        subject)[0]  # [0, 20999],

            for s_instance in range(s_index[0], s_index[0] + samples_trial + 1,
                                    samples_trial):
                _instance = EEG_data[:no_ch, s_instance:s_instance +
                                     samples_trial]  # (14, 7000)
                max_instances = _instance.shape[
                    1] / instance_len  # this is not necessary, but I added it just FYI, 27

                for _i in range(0, ins):  # sub instances
                    if _i < max_instances:
                        index_start, index_end = instance_len * _i, instance_len * (
                            _i + 1)
                        sub_instance = _instance[:, index_start:index_end]
                        sub_ins = preprocessing_resting(sub_instance)
                        ch_fs_instances.append(get_features_emd(sub_ins, sr))
                        ch_tags_instances.append('subject_{0}'.format(subject))
        dataset = {"data": ch_fs_instances, "target": ch_tags_instances}

        dataTraining = dataset['data']
        targetTraining = dataset['target']
        result = selector(dataTraining, targetTraining)

        logging.info("Best classifier {0} with accuracy {1}".format(
            result['classifier'], result['accuracy']))

        # saving the model
        model_name = 'EMD_resting_energy_ins%02d.sav' % ins
        pickle.dump(result["model"], open(model_name, 'wb'))
예제 #5
0
def get_dataset(subject, ins):
    sr = 200
    ch = [1, 42, 46, 51, 52, 54, 55]  # 7 channels
    # ch =  ch = [0, 1, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 33, 34, 36, 36, 38, 40,
    # 41, 42, 44, 46, 48, 50, 51, 52, 53, 54, 55]  32 channels
    session = 5
    ch_fs_instances = []
    ch_tags_instances = []

    s_s_chs = get_subdataset(subject, session)
    _index = [i + 1 for i, d in enumerate(s_s_chs[:, -1]) if d == 1]
    instances = get_samples(_index, s_s_chs, sr)
    for f_instance in range(0, ins):
        if f_instance not in [15, 21, 23, 45]:
            instance = preprocessing_P300(instances, f_instance, sr, ch)
            ch_fs_instances.append(get_features_emd(instance,
                                                    sr))  # CHANNELS: 14
            ch_tags_instances.append('subject_{0}'.format(subject))
    return {"data": ch_fs_instances, "target": ch_tags_instances}
def main():
    logging.info(" ***** Resting state, EMD , FEATURES: energy ***** ")
    logging.info(" ---------- No preprocessing ---------- \n \n")

    ch_fs_instances = []
    ch_tags_instances = []

    samples_subject = 21000 # samples per subject
    samples_trial = 7000  # samples per trial
    no_subjects = 8
    no_ch = 14
    sr = 128

    for ins in [10, 20]:
        logging.info(" -------- Instance: {0} --------".format(ins))
        instance_len = sr * 2  # to create sub instances of 2 sec
        EEG_data = spio.loadmat('EID-M.mat', squeeze_me=True)['eeg_close_ubicomp_8sub'].T  # (15, 168000)
        _labels = EEG_data[14, :]  # len(168000)

        for subject in range(1, no_subjects + 1):
            s_index = consecutive_index(EEG_data[no_ch, :], subject)[0]  # [0, 20999],

            for s_instance in range(s_index[0], s_index[0] + samples_trial + 1, samples_trial):
                _instance = EEG_data[:no_ch, s_instance:s_instance + samples_trial]  # (14, 7000)
                max_instances = _instance.shape[1] / instance_len  # this is not necessary, but I added it just FYI, 27

                for _i in range(0, ins):  # sub instances
                    if _i < max_instances:
                        index_start, index_end = instance_len * _i, instance_len * (_i + 1)
                        sub_instance = _instance[:, index_start:index_end]
                        ch_fs_instances.append(get_features_emd(sub_instance, sr))
                        ch_tags_instances.append('subject_{0}'.format(subject))
        dataset = {"data": ch_fs_instances, "target": ch_tags_instances}

        dataTraining = dataset['data']
        targetTraining = dataset['target']

        print("targetTraining: ", targetTraining)