示例#1
0
def statistic_iterate_all(db, liste, batchsize=50, visual=False):
    """
    Iterates over all subcomponents in the list and evaluates the above tests, visualizes the tests
    and returns dataframe with percentage of stationary signals for every subcomponent in the dataset and dict
    with filename as key and dataframe as values.(1 if stationary)
    :param db: Database you want to iterate
    :type db: Database
    :param liste: list of all subcomponents
    :type liste: list
    :return: df with percentage of stationary signals for every subcomponent in the dataset, dict with filename as key
    """
    files = []

    for i, (name, dct) in enumerate(iterDict(db, liste, querytype='or')):
        files.append([name, dct])
        print(i)
        if i % batchsize == 0:
            p = Pool(processes=1)
            worker = partial(statistic_iterate_all_worker, liste)
            p.map(worker, files)
            p.close()
            files = []
    p = Pool(processes=1)
    worker = partial(statistic_iterate_all_worker, liste)
    p.map(worker, files)
    p.close()

    return
示例#2
0
def granger_df_producer(db,
                        liste,
                        stationary_path=cd.paths.stationary_results,
                        causality_path=cd.paths.causality_results,
                        batchsize=100):
    """
    Produces a Dataframe with the results of the granger test iterated over all files
    :param db: Database you want to iterate
    :type db: Database
    :param liste: list of subcomponents
    :type liste: list
    :return: df with results of granger test
    """

    stationary_path = os.path.realpath(stationary_path)
    causality_path = os.path.realpath(causality_path)

    combinations = make_combinations(liste)
    worker_input = []

    for i, (file_name,
            dct) in enumerate(iterDict(db, liste, querytype='or',
                                       linenr=True)):
        try:
            file_path = os.path.join(stationary_path, file_name)
            save_path = os.path.join(causality_path, file_name)
            worker_input.append([
                file_name, file_path, save_path, stationary_path, dct,
                combinations, liste, causality_path
            ])
        except:
            print('couldnt append file to chucklist')
            continue
        print(i)

        if i % batchsize == 0 and i >= 1:
            p = Pool(processes=12)
            p.map(granger_df_producer_worker, worker_input)
            p.close()
            worker_input = []

    p = Pool(processes=12)
    p.map(granger_df_producer_worker, worker_input)
    p.close()
示例#3
0
文件: Granger.py 项目: bHilz/ADoDC
#most_subcompos = pickle.load(open('most_subcomponents.p', 'rb')).index[:99]
#most_subcompos = list(most_subcompos)

start = time.time()
n = 0
cd.data.store(n, 'test')
combinations = []

for X in most_subcompos:
    for Y in most_subcompos:
        if (X, Y) in combinations or (Y, X) in combinations or X == Y:
            continue
        combinations.append((X, Y))
output = []

for file_name, dct in iterDict(db, most_subcompos, querytype='or',
                               linenr=True):
    n = n + 1
    print(n)
    if file_name not in os.listdir(stationary_path):
        continue
    else:
        stationary_file = pickle.load(
            open(os.path.join(stationary_path, file_name), 'rb'))

    for X, Y in combinations:
        if X in dct and Y in dct:

            if stationary_file[X][0] == 1 and stationary_file[X][1] == 1:
                ergebnisX = True
            else:
                ergebnisX = False
示例#4
0
#most_subcompos = ['ATT_DesRoll', 'ATT_Roll']
most_subcompos = pickle.load(open('most_subcomponents.p', 'rb')).index[:99]
most_subcompos = list(most_subcompos)

n = 0

combinations = []
for X in most_subcompos:
    for Y in most_subcompos:
        if (X, Y) in combinations or (Y, X) in combinations or X == Y:
            continue
        combinations.append((X, Y))
output = []
start = time.time()

for file_name, dct in iterDict(db, most_subcompos, querytype='or'):
    n = n + 1
    #print(n)
    for X, Y in combinations:
        if X in dct and Y in dct:
            dataX = pd.DataFrame(data=dct[X],
                                 index=dct[X.split('_')[0] + '_line_number'],
                                 columns=[X])
            dataY = pd.DataFrame(data=dct[Y],
                                 index=dct[Y.split('_')[0] + '_line_number'],
                                 columns=[Y])
            data = pd.concat([dataX, dataY], axis=1)
            data = Database.fillna(data)
            try:
                output.append(Granger.test(data, 0.05, True, True))
            except:
示例#5
0
from CrashDiag.Preproc.Database import Database, iterDict
from CrashDiag.Preproc.SegDatabase import SegDatabase
from CrashDiag.Preproc.Segmentation import plot_segments
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd

db = Database()

subcomponents = ['ATT_Yaw', 'ATT_DesYaw']

for file_name, file in iterDict(db, subcomponents, querytype='and', timestamp=True):
    ATT_YawDiff = np.column_stack((file['ATT_Yaw'], -file['ATT_DesYaw'])).sum(axis=1)
    ATT_YawDiff = pd.DataFrame(data=ATT_YawDiff, index=file['ATT_Yaw'].index)
    ATT_YawDiff.columns = ['ATT_YawDiff']
    ATT_YawDiff = ATT_YawDiff % 360
    ATT_YawDiff[ATT_YawDiff>180] = ATT_YawDiff[ATT_YawDiff>180] - 360
    plt.plot(ATT_YawDiff)
    plt.show()
    pass

    #seg = SegDatabase.segment_and_pickle_one_log('', file_name, file, ['ATT_YawDiff'], return_obj=True)
    #plot_segments(seg['ATT_YawDiff'])
    #plt.show()