def train_dataset_feature_inter(train_dir='train1', subject_list=['subject_1'], feature_type='TD4',
        dataset='data1', fold_pre='250_100', z_score=False, channel_pos_list=['O'], 
        action_num=11, chan_num=4):
    
    my_clfs = ["LDA"]

    start_time = time.time()

    if feature_type == 'TD4':
        feat_num = 4
    elif feature_type == 'TD5':
        feat_num = 5

    chan_len = feat_num * chan_num

    norm = ''
    channel_pos_list = channel_pos_list[1:]

    for sub in subject_list:
        trains, classes = data_load.load_feature_dataset(train_dir, sub, feature_type, action_num)

        if z_score:
            trains = data_normalize(trains)
            norm = '_norm'
        trains_inter = trains[:, 0:chan_len]
        tests_inter = trains[:,chan_len:]

        training_lda_TD4_inter(
            my_clfs, trains_inter, tests_inter, classes,
            log_fold=fold_pre + '/' + feature_type + '_' + dataset + '_' + sub + norm,
            pos_list=channel_pos_list, num=1, chan_len=chan_len, action_num=action_num)
        print "Total times: ", time.time() - start_time, 's'
Пример #2
0
def train_dataset_feature(train_dir='train1',
                          subject_list=['subject_1'],
                          type='TD4',
                          dataset='data1',
                          fold_pre='250_100',
                          z_score=False):
    # my_clfs = ["LDA", "SVC_linear", "SVC_rbf", "Logistic", "QDA", "GaussianNB"]
    # my_clfs = ["LDA", "QDA", "GaussianNB", "SVC_linear", "SVC_rbf", "Logistic"]

    my_clfs = ["LDA"]

    start_time = time.time()
    for sub in subject_list:
        trains, classes = data_load.load_feature_dataset(train_dir, sub)
        if z_score:
            trains = data_normalize(trains)
            sub = 'norm_' + sub
        # print trains.mean(axis=0), trains.std(axis=0)
        # sys.exit(0)
        classifier.training_lda_TD4(my_clfs,
                                    trains,
                                    classes,
                                    log_fold=fold_pre + '/' + type + '_' +
                                    dataset + '_' + sub,
                                    num=1)


#
# classifier.training_lda_TD4_cross(my_clfs, trains1, classes1, trains2, classes2, log_fold = 'TD4_data4_'+subject+'_1to2', num=1)
# classifier.training_lda_TD4_cross(my_clfs, trains2, classes2, trains1, classes1, log_fold = 'TD4_data4_'+subject+'_2to1', num=1)
    print "Total times: ", time.time() - start_time, 's'
Пример #3
0
def train_dataset_feature_intra(
        train_dir='train1', subject_list=['subject_1'], feature_type='TD4', dataset='data1',
        fold_pre='250_100', z_score=False, channel_pos_list=['O'], action_num=7, chan_num=4):
    
    print 'train_dataset_feature_intra......'
    
    my_clfs = ["LDA"]
    start_time = time.time()
    
    if feature_type == 'TD4':
        feat_num = 4
    elif feature_type == 'TD5':
        feat_num = 5
    chan_len = feat_num * chan_num

    # 0均值标准化标志
    norm = ''
    for sub in subject_list:
        trains, classes = data_load.load_feature_dataset(train_dir, sub, feature_type, action_num)
        
        # 是否进行0均值标准化
        if z_score:
            trains = data_normalize(trains)
            norm = '_norm'

        classifier_lda.training_lda_TD4_intra(
            my_clfs, trains, classes,
            log_fold=fold_pre + '/' + feature_type + '_' + dataset + '_' + sub + norm,
            pos_list=channel_pos_list, num=1, chan_len=chan_len,action_num=action_num,
            feature_type=feature_type,chan_num=chan_num)
    
    print "Total times: ", time.time() - start_time, 's'
Пример #4
0
def feature_action_sensitivity(feature_type='TD4'):
    ''' 对每个特征,分析其在不移位和移位情况下的协方差 '''
    results = []
    
    subjects = ['subject_' + str(i + 1) for i in range(5)]
    # print subjects
    # sys.exit(0)

    channel_pos_list = ['S0',                                             # 中心位置
                        'U1', 'U2', 'D1', 'D2', 'L1', 'L2', 'R1', 'R2']  # 上 下 左 右
    pos_num = len(channel_pos_list)
    
    actions = [i+1 for i in range(7)]
    action_num = len(actions)                        # 7 动作类型个数

    if feature_type == 'TD4':
        feature_list = ['MAV', 'ZC', 'SSC', 'WL']
    elif feature_type == 'TD5':
        feature_list = ['MAV', 'ZC', 'SSC', 'WL','RMS']
    feat_num = len(feature_list)                    # 4 特征维度

    groups = [i+1 for i in range(4)]
    group_num = len(groups)                         # 4 通道数
    group_span = group_num*feat_num
    # print group_span
    action_span = feat_num*group_num                # 16
    # print groups, channel_num, channel_span, feat_num
    
    train_dir = 'train4_250_100'


    results.append(['subject', 'action', 'feature', 'group', 'means_shift', 'std_shift'] )
    plsca = PLSCanonical(n_components=2)
    # pos = 1
    k=0
    for pos_idx, pos_name in enumerate(channel_pos_list[1:]):
        pos = pos_idx+1
        for subject in subjects:
            # shift_simulation = np.ones((action_num,action_span,2))
            trains, classes = data_load.load_feature_dataset(train_dir, subject, feature_type)
            # m = trains.shape[0]
            # print trains.shape, classes.shape, m
            # print group_span, group_span*2
            # sys.exit(0)
            # m = trains.shape[0]*2/3
            m = trains.shape[0]/2
            X_train = trains[:m, group_span*pos: group_span*(pos+1)]
            Y_train = trains[:m:, :group_span]
            X_test = trains[m:, group_span*pos: group_span*(pos+1)]
            Y_test = trains[m:, :group_span]

            plsca.fit(X_train, Y_train)
            X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
            X_test_r, Y_test_r = plsca.transform(X_test, Y_test)

            filename=subject+'_'+pos_name
            # plot_plsc_figure(X_train_r,Y_train_r,X_test_r, Y_test_r, filename)
            plot_plsc_figure_two(X_train_r,Y_train_r,X_test_r, Y_test_r, filename)
Пример #5
0
def train_dataset_feature_inter(
        train_dir='train1', subject_list=['subject_1'], type='TD4',
        dataset='data1', fold_pre='250_100', z_score=False, channel_pos_list=['O']):
    my_clfs = ["LDA"]

    start_time = time.time()
    for sub in subject_list:
        trains, classes = data_load.load_feature_dataset(train_dir, sub)

        # 选取15个动作中的前六个动作, 临时
        # print trains.shape, classes.shape, trains.shape[0]/15*6
        len_temp = trains.shape[0] / 15 * 6
        trains = trains[0:len_temp, :]
        classes = classes[0:len_temp]
        # print trains.shape, classes.shape
        # sys.exit(0)

        if z_score:
            trains = data_normalize(trains)
            sub = 'norm_' + sub
        if dataset == 'data1':
            chan_span = 16          # 跨度
            chan_num = 2
        elif dataset == 'dataset4':
            chan_span = 0           # 跨度
            chan_num = 4
        if type == 'TD4':
            feat_num = 4
        chan_len = feat_num * chan_span
        tests_inter = np.array([])
        for idx, channel_pos in enumerate(channel_pos_list):
            start = idx*feat_num
            if idx == 0:
                trains_inter = np.concatenate( 
                    (trains[:, start: start + feat_num], trains[:, start+chan_len:start+chan_len+feat_num]),
                    axis=1)
            elif idx!=0:
                test_temp = np.concatenate( 
                    (trains[:, start: start + feat_num], trains[:, start+chan_len:start+chan_len+feat_num]),
                    axis=1)
                if tests_inter.shape[0] != 0:
                    tests_inter = np.concatenate( (tests_inter, test_temp), axis=1)
                else:
                    tests_inter = test_temp
            # print trains_inter.shape, tests_inter.shape
            # if idx == 5:
            #     sys.exit(0)
        channel_pos_list = channel_pos_list[1:]
        # print channel_pos_list
        # print trains_inter.shape, tests_inter.shape
        # sys.exit(0)

        classifier.training_lda_TD4_inter(
            my_clfs, trains_inter, tests_inter, classes,
            log_fold=fold_pre + '/' + type + '_' + dataset + '_' + sub + '_updated',
            pos_list=channel_pos_list, num=1, chan_len=chan_num*feat_num)
        print "Total times: ", time.time() - start_time, 's'
Пример #6
0
def train_dataset_feature_inter(train_dir='train4_250_100',
                                subject_list=['subject_1'],
                                feature_type='TD4',
                                dataset='data4',
                                fold_pre='250_100',
                                z_score=False,
                                channel_pos_list=['S0'],
                                action_num=7,
                                chan_num=4):
    my_clfs = ["LDA"]

    start_time = time.time()

    channel_pos_list_shift = channel_pos_list[1:]

    if feature_type == 'TD4':
        feat_num = 4  # 特征维度 TD4:4
    elif feature_type == 'TD5':
        feat_num = 5

    chan_len = feat_num * chan_num  # 16
    norm = ''

    for sub in subject_list:
        trains, classes = data_load.load_feature_dataset(
            train_dir, sub, feature_type, action_num)

        if z_score:
            trains = data_normalize(trains)
            norm = '_norm'

        trains_S0 = trains[:, 0:chan_len]
        trains_shift = trains[:, chan_len:]

        num = 1

        # 生成CCA映射矩阵
        # classifier_lda_cca.generate_transform_equations(
        #     trains_S0, trains_shift, pos_list=channel_pos_list_shift, chan_len=chan_len, subject=sub)

        # 进行中心训练策略
        classifier_lda_cca.training_lda_TD4_inter(
            my_clfs,
            trains_S0,
            trains_shift,
            classes,
            log_fold=fold_pre + '/' + feature_type + '_' + dataset + '_' +
            sub + norm,
            pos_list=channel_pos_list_shift,
            chan_len=chan_len,
            chan_num=chan_num,
            feature_type=feature_type,
            action_num=action_num,
            num=num,
            subject=sub)

        print "Total times: ", time.time() - start_time, 's'
Пример #7
0
def feature_action_sensitivity(feature_type='TD4'):
    ''' 对每个特征,分析其在不移位和移位情况下的协方差 '''
    results = []
    
    subjects = ['subject_' + str(i + 1) for i in range(1)]

    channel_pos_list = ['S0',                                             # 中心位置
                        'U1', 'U2', 'D1', 'D2', 'L1', 'L2', 'R1', 'R2']  # 上 下 左 右
    pos_num = len(channel_pos_list)
    
    actions = [i+1 for i in range(7)]
    action_num = len(actions)                        # 7 动作类型个数

    if feature_type == 'TD4':
        feature_list = ['MAV', 'ZC', 'SSC', 'WL']
    elif feature_type == 'TD5':
        feature_list = ['MAV', 'ZC', 'SSC', 'WL','RMS']
    feat_num = len(feature_list)                    # 4 特征维度

    groups = [i+1 for i in range(4)]
    group_num = len(groups)                         # 4 通道数
    group_span = group_num*feat_num
    # print group_span
    action_span = feat_num*group_num                # 16
    # print groups, channel_num, channel_span, feat_num
    
    train_dir = 'train4_250_100'


    results.append(['subject', 'action', 'feature', 'group', 'means_shift', 'std_shift'] )
    plsca = PLSCanonical(n_components=2)
    # pos = 1
    k=0
    for pos_idx, pos_name in enumerate(channel_pos_list[1:]):
        pos = pos_idx+1
        for subject in subjects:
            # shift_simulation = np.ones((action_num,action_span,2))
            trains, classes = data_load.load_feature_dataset(train_dir, subject, feature_type)
            # m = trains.shape[0]
            # print trains.shape, classes.shape, m
            # print group_span, group_span*2
            # sys.exit(0)
            # m = trains.shape[0]*2/3
            m = trains.shape[0]/2
            X_train = trains[:m, group_span*pos: group_span*(pos+1)]
            Y_train = trains[:m:, :group_span]
            X_test = trains[m:, group_span*pos: group_span*(pos+1)]
            Y_test = trains[m:, :group_span]

            plsca.fit(X_train, Y_train)
            X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
            X_test_r, Y_test_r = plsca.transform(X_test, Y_test)

            filename=subject+'_'+pos_name
            # plot_plsc_figure(X_train_r,Y_train_r,X_test_r, Y_test_r, filename)
            plot_plsc_figure_two(X_train_r,Y_train_r,X_test_r, Y_test_r, filename)
def feature_action_sensitivity(feature_type='TD4'):
    ''' 对每个特征,分析其在不移位和移位情况下的差异性 '''
    results = []
    
    subjects = ['subject_' + str(i + 1) for i in range(5)]

    channel_pos_list = ['S0',                                             # 中心位置
                        'U1', 'U2', 'D1', 'D2', 'L1', 'L2', 'R1', 'R2']  # 上 下 左 右
    pos_num = len(channel_pos_list)
    
    actions = [i+1 for i in range(7)]
    action_num = len(actions)                        # 7 动作类型个数

    if feature_type == 'TD4':
        feature_list = ['MAV', 'ZC', 'SSC', 'WL']
    elif feature_type == 'TD5':
        feature_list = ['MAV', 'ZC', 'SSC', 'WL','RMS']
    feat_num = len(feature_list)                    # 4 特征维度

    groups = [i+1 for i in range(4)]
    group_num = len(groups)                         # 4 通道数

    action_span = feat_num*group_num                # 16
    # print groups, channel_num, channel_span, feat_num
    
    train_dir = 'train4_250_100'


    results.append(['subject', 'action', 'feature', 'group','means_shift', 'std_shift'] )

    for subject in subjects:
        shift_simulation = np.ones((action_num,action_span,2))
        trains, classes = data_load.load_feature_dataset(train_dir, subject, feature_type)
        gaussion_distribute = np.ones( (len(actions), len(groups), len(feature_list), 2))
        for action in actions:
            trains_action = trains[classes == action]
            means = np.mean(trains_action, axis=0)
            stds = np.std(trains_action, axis=0)
            
            for group in groups:
                for feat_idx, feat_name in enumerate(feature_list):
                    idx_S0 = (group-1)*feat_num+feat_idx
                    idx = np.array([(i+1)*action_span+feat_idx+(group-1)*feat_num 
                            for i in range(pos_num-1)])
                    
                    means_shift = (np.mean(means[idx]) - np.mean(means[idx_S0]))/np.mean(means[idx_S0])
                    std_shift = (np.std(stds[idx]) - np.std(stds[idx_S0]))
                    shift_simulation[action-1, (group-1)*feat_num+feat_idx, :] = np.array([means_shift, std_shift]) 
                    # means_shift = abs(means[idx] - means[idx_S0])/means[idx_S0] \
                    #             + abs(stds[idx]-stds[idx_S0])/stds[idx_S0]
                    results.append([subject, str(action), feat_name, str(group), str(means_shift), str(std_shift)])
                    gaussion_distribute[action-1, group-1, feat_idx,:] = [means_shift, std_shift]
                    # print subject, action, feat_name, group, means_shift[:]
        log_result(gaussion_distribute, root_path + '/result_gaussian/sensitivity/'+subject+'_simulation_1', 2)       
    log_result(results, root_path + '/result_gaussian/sensitivity/feature_action_sensitivity_5', 2)
def train_dataset_feature_inter(
    train_dir="train1",
    subject_list=["subject_1"],
    feature_type="TD4",
    dataset="data1",
    fold_pre="250_100",
    z_score=False,
    channel_pos_list=["O"],
    action_num=11,
    chan_num=4,
):

    my_clfs = ["LDA"]

    start_time = time.time()

    if feature_type == "TD4":
        feat_num = 4
    elif feature_type == "TD5":
        feat_num = 5

    chan_len = feat_num * chan_num

    norm = ""
    channel_pos_list = channel_pos_list[1:]

    for sub in subject_list:
        trains, classes = data_load.load_feature_dataset(train_dir, sub, feature_type, action_num)

        if z_score:
            trains = data_normalize(trains)
            norm = "_norm"
        trains_inter = trains[:, 0:chan_len]
        tests_inter = trains[:, chan_len:]

        training_lda_TD4_inter(
            my_clfs,
            trains_inter,
            tests_inter,
            classes,
            log_fold=fold_pre + "/" + feature_type + "_" + dataset + "_" + sub + norm,
            pos_list=channel_pos_list,
            num=1,
            chan_len=chan_len,
            action_num=action_num,
        )
        print "Total times: ", time.time() - start_time, "s"
Пример #10
0
def train_dataset_feature_intra(train_dir='train1',
                                subject_list=['subject_1'],
                                feature_type='TD4',
                                dataset='data1',
                                fold_pre='250_100',
                                z_score=False,
                                channel_pos_list=['O'],
                                action_num=7,
                                chan_num=4):

    print 'train_dataset_feature_intra......'

    my_clfs = ["LDA"]
    start_time = time.time()

    if feature_type == 'TD4':
        feat_num = 4
    if feature_type == 'TD5':
        feat_num = 5
    chan_len = feat_num * chan_num

    # 0均值标准化标志
    norm = ''
    for sub in subject_list:
        trains, classes = data_load.load_feature_dataset(
            train_dir, sub, feature_type, action_num)

        # 是否进行0均值标准化
        if z_score:
            trains = data_normalize(trains)
            norm = '_norm'

        classifier_lda_cca.training_lda_TD4_intra(
            my_clfs,
            trains,
            classes,
            log_fold=fold_pre + '/' + feature_type + '_' + dataset + '_' +
            sub + norm,
            pos_list=channel_pos_list,
            num=1,
            chan_len=chan_len,
            action_num=action_num,
            feature_type=feature_type,
            chan_num=chan_num)

    print "Total times: ", time.time() - start_time, 's'
Пример #11
0
def train_dataset_feature_intra(
        train_dir='train1', subject_list=['subject_1'], type='TD4',
        dataset='data1', fold_pre='250_100', z_score=False, channel_pos_list=['O']):
    # my_clfs = ["LDA", "SVC_linear", "SVC_rbf", "Logistic", "QDA", "GaussianNB"]
    # my_clfs = ["LDA", "QDA", "GaussianNB", "SVC_linear", "SVC_rbf",
    # "Logistic"]

    my_clfs = ["LDA"]

    start_time = time.time()
    for sub in subject_list:
        trains, classes = data_load.load_feature_dataset(train_dir, sub)
        if z_score:
            trains = data_normalize(trains)
            sub = 'norm_' + sub
        if dataset == 'data1':
            chan_span = 17          # 跨度
        elif dataset == 'dataset4':
            chan_span = 0           # 跨度,
        if type == 'TD4':
            feat_num = 4
        chan_len = feat_num * chan_span

        for idx, channel_pos in enumerate(channel_pos_list):
            start = idx*feat_num
            # print start, start+feat_num, start+chan_len, start+feat_num+chan_len
            trains_intra = np.concatenate( 
                (trains[:, start: start + feat_num], trains[:, start+chan_len:start+chan_len+feat_num]),
                axis=1)
            # print trains_intra.shape
            # if idx == 1 :
            #     sys.exit(0)
            # print 'Trains and classes: ', trains_intra.shape, classes.shape, idx * chan_len, idx * chan_len + chan_len
            classifier.training_lda_TD4_intra(
                my_clfs, trains, classes,
                log_fold=fold_pre + '/' + type + '_' + dataset + '_' + sub + '_updated',
                log_file_pos=channel_pos, num=1)

#
    # classifier.training_lda_TD4_cross(my_clfs, trains1, classes1, trains2, classes2, log_fold = 'TD4_data4_'+subject+'_1to2', num=1)
    # classifier.training_lda_TD4_cross(my_clfs, trains2, classes2, trains1, classes1, log_fold = 'TD4_data4_'+subject+'_2to1', num=1)
    print "Total times: ", time.time() - start_time, 's'
Пример #12
0
def train_dataset_feature(train_dir='train1', subject_list=['subject_1'], type='TD4', dataset='data1', fold_pre='250_100', z_score=False):
    # my_clfs = ["LDA", "SVC_linear", "SVC_rbf", "Logistic", "QDA", "GaussianNB"]
    # my_clfs = ["LDA", "QDA", "GaussianNB", "SVC_linear", "SVC_rbf", "Logistic"]

    my_clfs = ["LDA"]

    start_time = time.time()
    for sub in subject_list:
        trains, classes = data_load.load_feature_dataset(train_dir, sub)
        if z_score:
            trains = data_normalize(trains)
            sub = 'norm_' + sub
        # print trains.mean(axis=0), trains.std(axis=0)
        # sys.exit(0)
        classifier.training_lda_TD4(
            my_clfs, trains, classes, log_fold=fold_pre + '/' + type + '_' + dataset + '_' + sub, num=1)
#
    # classifier.training_lda_TD4_cross(my_clfs, trains1, classes1, trains2, classes2, log_fold = 'TD4_data4_'+subject+'_1to2', num=1)
    # classifier.training_lda_TD4_cross(my_clfs, trains2, classes2, trains1, classes1, log_fold = 'TD4_data4_'+subject+'_2to1', num=1)
    print "Total times: ", time.time() - start_time, 's'
Пример #13
0
def train_dataset_feature_inter(
        train_dir='train4_250_100', subject_list=['subject_1'], feature_type='TD4', dataset='data4',
        fold_pre='250_100', z_score=False, channel_pos_list=['S0'], action_num=7, group_num=4):
    my_clfs = ["LDA"]

    start_time = time.time()
    # channel_pos_list_shift = channel_pos_list[1:]
    channel_pos_list_shift = channel_pos_list
    action_num = 7
    group_num = 4
    if feature_type == 'TD4':
        feat_num = 4                    # 特征维度 TD4:4 
    elif feature_type == 'TD5':
        feat_num = 5

    chan_num = 4                        # 通道个数,4通道
    chan_len = feat_num * chan_num             # 16

    for sub in subject_list:
        trains, classes = data_load.load_feature_dataset(train_dir, sub, feature_type)

        tests_inter = np.array([])
        trains_inter = trains[:, 0:chan_len]
        trains_simu, classes_simu = guassion_simu(trains_inter, classes, sub, action_num, chan_num, feat_num)
        # trains_simu, classes_simu = trains_inter, classes

        tests_inter = trains
        
        if z_score:
            trains_simu = data_normalize(trains_simu)
            tests_inter = data_normalize(tests_inter)
            sub = 'norm_' + sub
            
        num = 3

        classifier_lda.training_lda_TD4_inter(
            my_clfs, trains_simu, classes_simu, tests_inter, classes,
            log_fold=fold_pre + '/' + feature_type + '_' + dataset + '_' + sub + '_simu1',
            pos_list=channel_pos_list_shift, chan_len=chan_len, group_num=group_num,
            feature_type=feature_type, action_num=action_num, num=num)
        print "Total times: ", time.time() - start_time, 's'
Пример #14
0
def feature_action_sensitivity(feature_type='TD4'):
    ''' 对每个特征,分析其在不移位和移位情况下的差异性 '''
    results = []

    subjects = ['subject_' + str(i + 1) for i in range(5)]

    channel_pos_list = [
        'S0',  # 中心位置
        'U1',
        'U2',
        'D1',
        'D2',
        'L1',
        'L2',
        'R1',
        'R2'
    ]  # 上 下 左 右
    pos_num = len(channel_pos_list)

    actions = [i + 1 for i in range(7)]
    action_num = len(actions)  # 7 动作类型个数

    if feature_type == 'TD4':
        feature_list = ['MAV', 'ZC', 'SSC', 'WL']
    elif feature_type == 'TD5':
        feature_list = ['MAV', 'ZC', 'SSC', 'WL', 'RMS']
    feat_num = len(feature_list)  # 4 特征维度

    groups = [i + 1 for i in range(4)]
    group_num = len(groups)  # 4 通道数

    action_span = feat_num * group_num  # 16
    # print groups, channel_num, channel_span, feat_num

    train_dir = 'train4_250_100'

    results.append(
        ['subject', 'action', 'feature', 'group', 'means_shift', 'std_shift'])

    for subject in subjects:
        shift_simulation = np.ones((action_num, action_span, 2))
        trains, classes = data_load.load_feature_dataset(
            train_dir, subject, feature_type)
        gaussion_distribute = np.ones(
            (len(actions), len(groups), len(feature_list), 2))
        for action in actions:
            trains_action = trains[classes == action]
            means = np.mean(trains_action, axis=0)
            stds = np.std(trains_action, axis=0)

            for group in groups:
                for feat_idx, feat_name in enumerate(feature_list):
                    idx_S0 = (group - 1) * feat_num + feat_idx
                    idx = np.array([(i + 1) * action_span + feat_idx +
                                    (group - 1) * feat_num
                                    for i in range(pos_num - 1)])

                    # means_shift = abs(means[idx] - means[idx_S0])/means[idx_S0]
                    # means_shift = abs(means[idx] - means[idx_S0])/means[idx_S0] \
                    #             + abs(stds[idx]-stds[idx_S0])/stds[idx_S0]
                    # results.append([subject, str(action), feat_name, str(group)] + map(str, means_shift))

                    means_shift = np.mean(means[idx]) - np.mean(means[idx_S0])
                    std_shift = np.std(stds[idx]) - np.std(stds[idx_S0])
                    shift_simulation[action - 1, (group - 1) * feat_num +
                                     feat_idx, :] = np.array(
                                         [means_shift, std_shift])
                    # means_shift = abs(means[idx] - means[idx_S0])/means[idx_S0] \
                    #             + abs(stds[idx]-stds[idx_S0])/stds[idx_S0]
                    results.append([
                        subject,
                        str(action), feat_name,
                        str(group),
                        str(means_shift),
                        str(std_shift)
                    ])
                    gaussion_distribute[action - 1, group - 1, feat_idx, :] = [
                        means_shift, std_shift
                    ]
                    # print subject, action, feat_name, group, means_shift[:]
        log_result(
            gaussion_distribute,
            root_path + '/result/sensitivity/' + subject + '_simulation_1', 2)
    log_result(results,
               root_path + '/result/sensitivity/feature_action_sensitivity_5',
               2)
def feature_action_sensitivity(feature_type="TD4"):
    """ 对每个特征,分析其在不移位和移位情况下的差异性 """
    results = []

    subjects = ["subject_" + str(i + 1) for i in range(5)]

    channel_pos_list = ["S0", "U1", "U2", "D1", "D2", "L1", "L2", "R1", "R2"]  # 中心位置  # 上 下 左 右
    pos_num = len(channel_pos_list)

    actions = [i + 1 for i in range(7)]
    action_num = len(actions)  # 7 动作类型个数

    if feature_type == "TD4":
        feature_list = ["MAV", "ZC", "SSC", "WL"]
    elif feature_type == "TD5":
        feature_list = ["MAV", "ZC", "SSC", "WL", "RMS"]
    feat_num = len(feature_list)  # 4 特征维度

    groups = [i + 1 for i in range(4)]
    group_num = len(groups)  # 4 通道数

    action_span = feat_num * group_num  # 16
    # print groups, channel_num, channel_span, feat_num

    train_dir = "train4_250_100"

    results.append(["subject", "action", "feature", "group", "means_shift", "std_shift"])

    for subject in subjects:
        shift_simulation = np.ones((action_num, action_span, 2))
        trains, classes = data_load.load_feature_dataset(train_dir, subject, feature_type)
        gaussion_distribute = np.ones((len(actions), len(groups), len(feature_list), 2))
        for action in actions:
            trains_action = trains[classes == action]
            means = np.mean(trains_action, axis=0)
            stds = np.std(trains_action, axis=0)

            for group in groups:
                for feat_idx, feat_name in enumerate(feature_list):
                    idx_S0 = (group - 1) * feat_num + feat_idx
                    idx = np.array(
                        [(i + 1) * action_span + feat_idx + (group - 1) * feat_num for i in range(pos_num - 1)]
                    )

                    # means_shift = abs(means[idx] - means[idx_S0])/means[idx_S0]
                    # means_shift = abs(means[idx] - means[idx_S0])/means[idx_S0] \
                    #             + abs(stds[idx]-stds[idx_S0])/stds[idx_S0]
                    # results.append([subject, str(action), feat_name, str(group)] + map(str, means_shift))

                    means_shift = np.mean(means[idx]) - np.mean(means[idx_S0])
                    std_shift = np.std(stds[idx]) - np.std(stds[idx_S0])
                    shift_simulation[action - 1, (group - 1) * feat_num + feat_idx, :] = np.array(
                        [means_shift, std_shift]
                    )
                    # means_shift = abs(means[idx] - means[idx_S0])/means[idx_S0] \
                    #             + abs(stds[idx]-stds[idx_S0])/stds[idx_S0]
                    results.append([subject, str(action), feat_name, str(group), str(means_shift), str(std_shift)])
                    gaussion_distribute[action - 1, group - 1, feat_idx, :] = [means_shift, std_shift]
                    # print subject, action, feat_name, group, means_shift[:]
        log_result(gaussion_distribute, root_path + "/result/sensitivity/" + subject + "_simulation_1", 2)
    log_result(results, root_path + "/result/sensitivity/feature_action_sensitivity_5", 2)