Exemple #1
0
def main():
    # map segments to subject ID
    map_df=from_map_to_df(subject_segment_map,'subjects')
    # load delta theta tensors.
    df_features=load_object(PATH_DATA_ID_2+
                                        'Datasets_delta_theta/features.files')

    # load trained ID_2 parameters.
    id2_clf=load_object(path_id2_clf+'eeg_delta_theta_clf.file')
    poly_obj=load_object(path_id2_clf+'eeg_delta_theta_poly_obj.file')
    best_features_poly=load_object(path_id2_clf+
                                    'eeg_delta_theta_best_features_poly.file')
    
    X=prepare_for_predict(df_features,
                        poly_obj,
                        best_features_poly)
    predictions=id2_clf.predict_proba(X)[:,1]
    df_prediction=\
        map_df.join(pd.DataFrame(predictions,columns=['Predicted_Target_id2']))
    df_prediction_decision=df_prediction.groupby('subjects').mean()
    df_prediction_decision.rename(
                columns={'Predicted_Target_id2':'Decision_id2'},inplace=True)
    print(df_prediction)
    print(df_prediction_decision)
    df_prediction.to_csv(PATH_RESULTS+'Predicted_Target_id2.csv')
    df_prediction_decision.to_csv(PATH_RESULTS+'Decision_id2.csv')
Exemple #2
0
def main():
    #load trained learner
    learn = load_learner(PATH_DATA_ID_44)
    #load dictionary of subject:index_interval
    subject_segment_map = load_object(PATH_DATA + 'mapper_subject.file')
    #load dataframe. with name of files.png
    subject_segment_map_img = load_object(PATH_DATA_ID_44 +
                                          'image_name_map.file')
    #create dataframe from index interval and name of subject
    map_df = from_map_to_df(subject_segment_map, 'subjects')
    #the resulting dataframe has columns subjects('id_subject')
    # and name('file name of .png')
    df_images_id = map_df.join(subject_segment_map_img)

    predictions_hold = []
    for each_png in df_images_id.name:
        x = open_image(path_to_images + each_png)
        p = learn.predict(x)
        tensor = p[2]
        single_pred = np.round(tensor.cpu().detach().numpy()[0], 5)
        predictions_hold = predictions_hold + [single_pred]

    df_prediction=\
        df_images_id.join(pd.DataFrame(predictions_hold,
                                        columns=['Predicted_Target_id44']))
    df_prediction_decision = df_prediction.groupby('subjects').mean()
    df_prediction_decision.rename(
        columns={'Predicted_Target_id44': 'Decision_id44'}, inplace=True)
    print(df_prediction.head())
    print(df_prediction_decision.head)

    df_prediction.to_csv(PATH_RESULTS + 'Predicted_Target_id44.csv')
    df_prediction_decision.to_csv(PATH_RESULTS + 'Decision_id44.csv')


#if __name__ == "__main__":
#    main()
Exemple #3
0
import configparser
from OFHandlers import load_object
import pandas as pd
from mapSubjectSegment import from_map_to_df

#read configurarion file
config = configparser.ConfigParser()
root_path= os.getcwd().replace('scripts','')
script_location = os.getcwd().replace('scripts','configuration')
config.read_file(open(script_location+'/config.cfg'))

PATH_DATA_ID_2   = root_path + config.get('PATH_STORE','PATH_DATA_ID_2')
path_id2_clf     = PATH_DATA_ID_2+'ID2_classifier/'
PATH_DATA        = root_path + config.get('PATH_STORE','PATH_DATA')
PATH_RESULTS        = root_path + config.get('PATH_STORE','PATH_RESULTS')
subject_segment_map=load_object(PATH_DATA+'mapper_subject.file')

def prepare_for_predict(X,
                        poly_obj,
                        best_features_poly):
    """pipeline of transformations to make a prediction."""
    X_poly=poly_obj.transform(X)
    cols_poly=poly_obj.get_feature_names(X.columns)

    df_X_poly=pd.DataFrame(X_poly,columns=cols_poly,index=X.index)
    X=df_X_poly[best_features_poly]
    return X

def main():
    # map segments to subject ID
    map_df=from_map_to_df(subject_segment_map,'subjects')
Exemple #4
0
#this parameters cannot be changed
#the CNN of id44 would give different results.
mpl.rcParams['figure.dpi'] = 72
mpl.rcParams['figure.edgecolor'] = (1, 1, 1, 0)
mpl.rcParams['figure.facecolor'] = (1, 1, 1, 0)
mpl.rcParams['figure.figsize'] = [6.0, 4.0]
mpl.rcParams['figure.subplot.bottom'] = 0.125

#read configurarion file
config = configparser.ConfigParser()
root_path = os.getcwd().replace('scripts', '')
script_location = os.getcwd().replace('scripts', 'configuration')
config.read_file(open(script_location + '/config.cfg'))

PATH_DATA_ID_44 = config.get('PATH_STORE', 'PATH_DATA_ID_44')
df_fast = load_object(root_path + PATH_DATA_ID_44 +
                      'Datasets_alpha_beta/features.files')

path_to_images = root_path + PATH_DATA_ID_44 + 'Datasets_image_alpha_beta/'
# 110=10*11. We will an image represating 110 rows, one per channels
# and two columns, alpha and beta.
dim_tuple_images = (110, 2)


def generate_images(features, dim_tuple, path_to_save):
    for i in range(0, len(features)):
        sample = np.array(features.iloc[i]).reshape(dim_tuple)
        #save image
        fig = plt.figure(figsize=(7, 7))
        ax = fig.add_subplot(1, 1, 1)
        ax.imshow(sample, aspect='auto', origin='lower')
        plt.axis('off')
Exemple #5
0
def main():
    subjects_id=pd.read_csv(CSV_SUBJECTS_IDS)
    list_ids=subjects_id.id.values.tolist()
    i=0
    mapper_subject={}
    for each_subject in list_ids:
        path_0=PATH_SIGNALS_CSV+'/'+each_subject+'_'+FILES_USE[0]
        path_1=PATH_SIGNALS_CSV+'/'+each_subject+'_'+FILES_USE[1]
        path_2=PATH_SIGNALS_CSV+'/'+each_subject+'_'+FILES_USE[2]

        #Obtain a mne object from the csv
        #this objects combines the information from the 3 csv
        #downloaded from AWS.
        raw=csv_to_mne(path_subject_channels=path_0,
                        path_subject_signal=path_1,
                        path_subject_events=path_2,
                        s_freq=S_FREQ,
                        r_s_freq=R_S_FREQ,
                        montage=MONTAGE,
                        scale=SCALE,
                        event_id=EVENT_ID)
        #make a copy of the object. one is input 
        #as tolal channels. The other is for clustered signal.
        copy_raw_1=raw.copy()
        copy_raw_2=raw.copy()

        #transforms the mne object into a n-numpy tensor.
        # n is the number of segments. Since the signal was has 5
        # event marked, we have 5 segments.
        #third order Butterworth-filter is applied too.
        tensor_with_segments=create_tensor(input_signal=copy_raw_1,
                                            event_mark=EVENT_ID,
                                            low_cut_hz=LOW_CUT_HZ,
                                            high_cut_hz=HIGH_CUT_HZ,
                                            interval_ms=INTERVAL_SEC*1000,
                                            channels_clusters=None,
                                            n_cluster=None)

        #transforms the mne object into a numpy tensor. 
        #third order Butterworth-filter is applied too.
        #this signal is clustered according to N_CLUSTERS
        tensor_with_segments_clustered=create_tensor(input_signal=copy_raw_2,
                        event_mark=EVENT_ID,
                        low_cut_hz=LOW_CUT_HZ,
                        high_cut_hz=HIGH_CUT_HZ,
                        interval_ms=INTERVAL_SEC*1000,
                        channels_clusters=load_object(PATH_SAVE_CLUSTERED_DF),
                        n_cluster=N_CLUSTERS)

        #concatenate the n-tensor for each subject
        #this will create the final dataset for ID2 and ID44.
        try:
            if(i==0):
                concat_signal=tensor_with_segments
                concat_signal_clustered=tensor_with_segments_clustered
                mapper_subject[each_subject]=[0,len(tensor_with_segments.y)]
            else:
                print(i,each_subject)
                concat_signal.X=np.vstack([concat_signal.X,
                                        tensor_with_segments.X])
                concat_signal_clustered.X=np.vstack([concat_signal_clustered.X,
                                        tensor_with_segments_clustered.X])
                #save subjects positions
                start=len(concat_signal.y)
                concat_signal.y=np.concatenate(
                    (concat_signal.y,tensor_with_segments.y), axis=0)
                end=len(concat_signal.y)
                mapper_subject[each_subject]=[start,end]
                print('concat_signal.X.shape',concat_signal.X.shape)
                print('concat_signal.y.shape',concat_signal.y.shape)
        except Exception as e:
            print('error occured see:')
            print(e)
            pass
        i=i+1
    save_object(PATH_DATA_ID_2+'data_set_tensor.file',
                                                concat_signal_clustered)
    save_object(PATH_DATA_ID_44+'data_set_tensor.file',
                                                concat_signal)
    save_object(PATH_DATA+'mapper_subject.file',mapper_subject)

#if __name__ == '__main__':
#    main()
Exemple #6
0
import os
import sys
import pandas as pd
import numpy as np
from OFHandlers import save_object, load_object
import configparser

#read configurarion file
config = configparser.ConfigParser()
root_path = os.getcwd().replace('scripts', '')
script_location = os.getcwd().replace('scripts', 'configuration')
config.read_file(open(script_location + '/config.cfg'))
PATH_DATA_ID_2 = root_path + config.get('PATH_STORE', 'PATH_DATA_ID_2')
PATH_DATA_ID_44 = root_path + config.get('PATH_STORE', 'PATH_DATA_ID_44')
R_S_FREQ = int(config.get('ML_VARS', 'R_S_FREQ'))
tensor_segments_id2 = load_object(PATH_DATA_ID_2 + 'data_set_tensor.file')
tensor_segments_id44 = load_object(PATH_DATA_ID_44 + 'data_set_tensor.file')


def get_index_band(rate, lower, upper):
    lower_index = int(lower * rate)
    upper_index = int(upper * rate)
    return [lower_index, upper_index]


def get_power_spectrum(X, fs, slow_band):
    total_sample_number = X.shape[0]
    channel = X.shape[1]
    print('channel', channel)
    points_per_signal = X.shape[2]
    sample_holder = []