Beispiel #1
0
def torch4FROLS_pipeline(data_type, configs, n_trial):
    """基于 Kalman 滤波器的各个算法的 pipeline

    Args:
        data_type: 数据类型
        configs (dict): 配置字典
        n_trial: 试验次数
    """

    config = configs[data_type]
    WGCI100 = []
    for trial in range(n_trial):
        term_selector = Selector(
            f"{config['data_root']}{data_type}{config['term_path']}{trial+1}.mat"
        )
        # get data
        normalized_signals, Kalman_H, candidate_terms, Kalman_S_No = term_selector.make_selection(
        )
        print(f'data_type: {data_type}, trial: ### {trial+1}')
        kf = torch4FROLS(normalized_signals,
                         Kalman_H=Kalman_H,
                         n_epoch=config['n_epoch'])
        y_coef = kf.estimate_coef()
        # 总体误差
        whole_y_error = np.var(kf.y_error, 0)

        # 子模型情况
        terms_mat = sio.loadmat(
            f"{config['data_root']}{data_type}{config['term_path']}_WGCI{trial+1}.mat"
        )
        sub1 = []
        for ch in range(5):
            data_set = {
                'normalized_signals': terms_mat['normalized_signals'],
                'Hv': terms_mat['Hv'],
                'Kalman_H': terms_mat['Kalman_H'][0, ch],
                'terms_chosen': terms_mat['terms_chosen'][0, ch]
            }
            term_selector = Selector(data_set)
            # get data
            normalized_signals = term_selector.make_selection()[0]
            # 构造 Kalman Filter
            kf = torch4FROLS(normalized_signals,
                             Kalman_H=Kalman_H,
                             n_epoch=config['n_epoch'])
            # 估计系数
            y_coef = kf.estimate_coef()
            # 误差
            sub_y_error = np.var(kf.y_error, 0)
            sub1.append(np.log(sub_y_error / whole_y_error))
        WGCI100.append(np.asarray(sub1).T)

    mean_WGCI = np.mean(WGCI100, 0)
    var_WGCI = np.var(WGCI100, 0)
    print(f"mean_WGCI = {mean_WGCI}, var_WGCI = {var_WGCI}")
    fname1 = f"{config['data_root']}{data_type}_kalman4FROLS100_{config['est_fname']}WGCI100.txt"
    save_3Darray(
        fname1,
        np.array(
            [mean_WGCI * (mean_WGCI > 0.01), var_WGCI * (var_WGCI > 0.01)]))
Beispiel #2
0
def torch4FROLS_pipeline(data_type, configs, n_trial):
    """基于 Kalman 滤波器的各个算法的 pipeline

    Args:
        data_type: 数据类型
        configs (dict): 配置字典
        n_trial: 试验次数
    """

    config = configs[data_type]
    fname = f"{config['data_root']}{data_type}_torch4FROLS100_{config['est_fname']}"
    term_selector = Selector(
        f"{config['data_root']}{data_type}{config['term_path']}")
    terms_set = term_selector.make_terms()
    # get data
    normalized_signals, Kalman_H, candidate_terms, Kalman_S_No = term_selector.make_selection(
    )
    y_coef = 0
    for trial in range(n_trial):
        print(f'data_type: {data_type}, trial: ### {trial+1}')
        kf = torch4FROLS(normalized_signals,
                         Kalman_H=Kalman_H,
                         n_epoch=config['n_epoch'])
        y_coef += kf.estimate_coef()
    est_model = make_func4K4FROLS(y_coef / n_trial,
                                  candidate_terms,
                                  Kalman_S_No,
                                  fname=fname)
    # 输出结果
    print(est_model)
Beispiel #3
0
def kalman4ARX_pipeline(data_type, configs, n_trial):
    """基于 Kalman 滤波器的各个算法的 pipeline

    types = ['linear', 'longlag_linear']

    Args:
        data_type: 数据类型
        configs (dict): 配置字典
        n_trial: 试验次数
    """

    config = configs[data_type]
    term_selector = Selector(
        f"{config['data_root']}{data_type}{config['term_path']}")
    terms_set = term_selector.make_terms()
    # get data
    normalized_signals = term_selector.make_selection()[0]
    fname = f"{config['data_root']}{data_type}_kalman4ARX100_{config['est_fname']}"
    A_coef0 = 0
    for trial in range(n_trial):
        print(f'data_type: {data_type}, trial: ### {trial+1}')
        # 构造 Kalman Filter
        kf = Kalman4ARX(normalized_signals, config['max_lag'], uc=config['uc'])
        # 估计系数
        y_coef, A_coef = kf.estimate_coef(config['threshold'])
        A_coef0 += A_coef
    # 计算模型表达式并保存
    est_model = make_linear_func(A_coef0 / n_trial, fname=fname)
    # 输出结果
    print(est_model)
Beispiel #4
0
def frokf(noise_var,
          ndim,
          dtype,
          terms,
          length,
          root='../data/',
          trials=100,
          uc=0.01,
          ntest=50):
    assert dtype in ['linear', 'nonlinear'], 'type not support!'
    ax = []
    for trial in range(1, trials + 1):
        #     for trial in [trials]:
        terms_path = root + f'{dtype}_terms{ndim}D_{noise_var:2.2f}trial{trial}.mat'
        term = Selector(terms_path)
        _ = term.make_terms()
        normalized_signals, Kalman_H, candidate_terms, Kalman_S_No = term.make_selection(
        )
        # Kalman_S_No = np.sort(Kalman_S_No)
        y_coef = 0
        # 对FROKF多次实验取平均值
        for _ in trange(ntest):
            kf = Kalman4FROLS(normalized_signals, Kalman_H=Kalman_H, uc=uc)
            y_coef += kf.estimate_coef()
        y_coef /= ntest
        terms_set = corr_term(y_coef, candidate_terms, Kalman_S_No)
        flatten_coef, t = [], 0
        for i in range(ndim):
            tmp = []
            for k in terms[t:t + length[i]]:
                tmp.append(terms_set[i][k] if k in terms_set[i] else np.nan)
            flatten_coef.extend(tmp)
            t += length[i]
        ax.append(flatten_coef)
    return np.stack(ax)
 def test_torch4FROLS(self):
     terms_path = 'test_data/linear_terms.mat'
     term = Selector(terms_path)
     normalized_signals, Kalman_H, _, _ = term.make_selection()
     kf = torch4FROLS(normalized_signals, Kalman_H, n_epoch=100)
     y_coef = kf.estimate_coef()
     self.assertTrue(isinstance(kf, torch4FROLS))
     self.assertTrue(isinstance(y_coef, np.ndarray))
Beispiel #6
0
def kalman_pipeline(configs):
    """基于 Kalman 滤波器的各个算法的 pipeline

    Args:
        configs (dict): 配置字典
    """

    for data_type in configs.keys():
        config = configs[data_type]
        term_selector = Selector(
            f"{config['data_root']}{data_type}{config['term_path']}")
        terms_set = term_selector.make_terms()
        # get data
        normalized_signals, Kalman_H, candidate_terms, Kalman_S_No = term_selector.make_selection(
        )
        # 构造 Kalman Filter
        for algo_type in config['algorithm']:
            print(f"{data_type} {algo_type}")
            fname = f"{config['data_root']}{data_type}_{algo_type}_{config['est_fname']}"
            if algo_type == 'Kalman4ARX':
                kf = Kalman4ARX(normalized_signals,
                                config['max_lag'],
                                uc=config['uc'])
                # 估计系数
                y_coef, A_coef = kf.estimate_coef(config['threshold'])
                # 计算模型表达式并保存
                est_model = make_linear_func(A_coef, fname=fname)
                # 保存平均结果
            elif algo_type == 'Kalman4FROLS':
                kf = Kalman4FROLS(normalized_signals,
                                  Kalman_H=Kalman_H,
                                  uc=config['uc'])
                y_coef = kf.estimate_coef()
                est_model = make_func4K4FROLS(y_coef,
                                              candidate_terms,
                                              Kalman_S_No,
                                              fname=fname)
                # 保存平均结果
            elif algo_type == 'torch4FROLS':
                kf = torch4FROLS(normalized_signals,
                                 Kalman_H=Kalman_H,
                                 n_epoch=config['n_epoch'])
                y_coef = kf.estimate_coef()
                est_model = make_func4K4FROLS(y_coef,
                                              candidate_terms,
                                              Kalman_S_No,
                                              fname=fname)
                # 保存平均结果
            else:
                print('!Not Defined!')
            print(f"\n{data_type}_{algo_type} est model saved!\n")

            # 输出结果
            print(est_model)
Beispiel #7
0
def torch4FROLS_pipeline(data_type, configs, n_trial, id_correct, n_correct):
    """基于 Kalman 滤波器的各个算法的 pipeline

    Args:
        data_type: 数据类型
        configs (dict): 配置字典
        n_trial: 试验次数
    """

    config = configs[data_type]
    y_coef100 = np.zeros((100, 5, 5))
    y_coef9 = np.zeros((100, 9))
    for trial in range(n_trial):
        fname = f"{config['data_root']}{data_type}_torch4FROLS100_{config['est_fname']}{trial+1}.txt"
        term_selector = Selector(
            f"{config['data_root']}{data_type}{config['term_path']}{trial+1}.mat"
        )
        terms_set = term_selector.make_terms()
        # get data
        normalized_signals, Kalman_H, candidate_terms, Kalman_S_No = term_selector.make_selection(
        )
        print(f'data_type: {data_type}, trial: ### {trial+1}')
        kf = torch4FROLS(normalized_signals,
                         Kalman_H=Kalman_H,
                         n_epoch=config['n_epoch'])
        y_coef = kf.estimate_coef()
        print(kf.y_error, kf.y_error.shape)
        y_coef100[trial] = y_coef
        est_model = make_func4K4FROLS(y_coef,
                                      candidate_terms,
                                      Kalman_S_No,
                                      fname=fname)
        coef9 = []
        Kalman_S_No_order = np.sort(Kalman_S_No)
        for row in range(5):
            for t in range(n_correct[row]):
                idx = np.argwhere(
                    Kalman_S_No_order[row, :] == id_correct[data_type][row][t])
                value = y_coef[row, idx]
                coef9.append(value[0, 0])
        y_coef9[trial] = np.array(coef9)

        # 输出结果
        # print(est_model)
    fname1 = f"{config['data_root']}{data_type}_torch4FROLS100_{config['est_fname']}log.txt"
    save_3Darray(fname1, y_coef100)
    mean_y = np.mean(y_coef9, 0)
    var_y = np.var(y_coef9, 0)
    print(mean_y, var_y, sep='\n')
    fname1 = f"{config['data_root']}{data_type}_torch4FROLS100_{config['est_fname']}log100.txt"
    save_2Darray(fname1, np.array([mean_y, var_y]).T)
Beispiel #8
0
def kalman4ARX_pipeline(data_type, configs, n_trial):
    """基于 Kalman 滤波器的各个算法的 pipeline

    types = ['linear', 'longlag_linear']

    Args:
        data_type: 数据类型
        configs (dict): 配置字典
        n_trial: 试验次数
    """

    config = configs[data_type]
    if data_type == 'linear':
        y_coef100 = np.zeros((100, 5, 25))
    else:
        y_coef100 = np.zeros((100, 5, 50))
    for trial in range(n_trial):
        term_selector = Selector(
            f"{config['data_root']}{data_type}{config['term_path']}{trial+1}.mat"
        )
        terms_set = term_selector.make_terms()
        # get data
        normalized_signals = term_selector.make_selection()[0]
        fname = f"{config['data_root']}{data_type}_kalman4ARX100_{config['est_fname']}{trial+1}.txt"
        print(f'data_type: {data_type}, trial: ### {trial+1}')
        # 构造 Kalman Filter
        kf = Kalman4ARX(normalized_signals, config['max_lag'], uc=config['uc'])
        # 估计系数
        y_coef, A_coef = kf.estimate_coef(config['threshold'])
        y_coef100[trial] = y_coef
        # 计算模型表达式并保存
        est_model = make_linear_func(A_coef, fname=fname)
        # 输出结果
        # print(est_model)
    fname1 = f"{config['data_root']}{data_type}_kalman4ARX100_{config['est_fname']}log.txt"
    save_3Darray(fname1, y_coef100)
    mean_y = np.mean(y_coef100, 0)
    var_y = np.var(y_coef100, 0)
    print(mean_y, var_y, sep='\n')
    fname1 = f"{config['data_root']}{data_type}_kalman4ARX100_{config['est_fname']}log100.txt"
    save_3Darray(fname1, np.array([mean_y, var_y]))
Beispiel #9
0
print(est_model)

# !Kalman4FROLS 测试
# !Selector 测试
# !线性模型
terms_path = './data/nor_linear_terms.mat'
term = Selector(terms_path)
terms_repr = term.make_terms()

# *保存候选项集合
fname = './data/linear_candidate_terms.txt'
np.savetxt(fname, terms_repr, fmt='%s')

# *selection
print(term)
normalized_signals, Kalman_H, candidate_terms, Kalman_S_No = term.make_selection(
)

# *构造 Kalman Filter
kf = Kalman4FROLS(normalized_signals, Kalman_H=Kalman_H, uc=0.01)
y_coef = kf.estimate_coef()
print(y_coef)

# *估计模型生成
est_model = make_func4K4FROLS(y_coef,
                              candidate_terms,
                              Kalman_S_No,
                              fname='./data/K4FROLS_est_model.txt')
print(est_model)

# !非线性模型
terms_path = './data/nor_nonlinear_terms.mat'
Beispiel #10
0
def kalman4ARX_pipeline(data_type, configs, n_trial):
    """基于 Kalman 滤波器的各个算法的 pipeline

    types = ['linear', 'longlag_linear']

    Args:
        data_type: 数据类型
        configs (dict): 配置字典
        n_trial: 试验次数
    """

    config = configs[data_type]
    WGCI100 = []
    for trial in range(n_trial):
        # 计算总体情况
        term_selector = Selector(
            f"{config['data_root']}{data_type}{config['term_path']}{trial+1}.mat"
        )
        # get data
        normalized_signals = term_selector.make_selection()[0]
        print(f'data_type: {data_type}, trial: ### {trial+1}')
        # 构造 Kalman Filter
        kf = Kalman4ARX(normalized_signals, config['max_lag'], uc=config['uc'])
        # 估计系数
        y_coef, A_coef = kf.estimate_coef(config['threshold'])
        # 总体误差
        whole_y_error = np.var(kf.y_error, 0)

        # 子模型情况
        terms_mat = sio.loadmat(
            f"{config['data_root']}{data_type}{config['term_path']}_WGCI{trial+1}.mat"
        )
        sub1 = []
        for ch in range(5):
            data_set = {
                'normalized_signals': terms_mat['normalized_signals'],
                'Hv': terms_mat['Hv'],
                'Kalman_H': terms_mat['Kalman_H'][0, ch],
                'terms_chosen': terms_mat['terms_chosen'][0, ch]
            }
            term_selector = Selector(data_set)
            # get data
            normalized_signals = term_selector.make_selection()[0]
            # 构造 Kalman Filter
            kf = Kalman4ARX(normalized_signals,
                            config['max_lag'],
                            uc=config['uc'])
            # 估计系数
            y_coef, A_coef = kf.estimate_coef(config['threshold'])
            # 误差
            sub_y_error = np.var(kf.y_error, 0)
            sub1.append(np.log(sub_y_error / whole_y_error))
        WGCI100.append(np.asarray(sub1).T)

    mean_WGCI = np.mean(WGCI100, 0)
    var_WGCI = np.var(WGCI100, 0)
    print(f"mean_WGCI = {mean_WGCI}, var_WGCI = {var_WGCI}")
    fname1 = f"{config['data_root']}{data_type}_kalman4ARX100_{config['est_fname']}WGCI100.txt"
    save_3Darray(
        fname1,
        np.array(
            [mean_WGCI * (mean_WGCI > 0.01), var_WGCI * (var_WGCI > 0.01)]))